From 6161ac35a04b832a751884677865697a26fdf816 Mon Sep 17 00:00:00 2001 From: Gianmaria Del Monte <39946305+gmgigi96@users.noreply.github.com> Date: Mon, 9 Oct 2023 08:58:04 +0200 Subject: [PATCH] CERNBox cleanup (#4212) --- .drone.env | 4 - .drone.star | 242 -- .github/workflows/changelog.yml | 3 +- .github/workflows/compose.yml | 6 +- .github/workflows/docker.yml | 5 +- .github/workflows/lint.yml | 3 +- .github/workflows/release.yml | 3 +- .github/workflows/test.yml | 41 +- .golangci.yaml | 5 + Makefile | 27 +- changelog/unreleased/cbox-cleanup.md | 5 + cmd/reva/import.go | 66 - cmd/reva/main.go | 1 - cmd/reva/share-create.go | 2 +- cmd/revad/main.go | 2 +- cmd/revad/pkg/config/config.go | 34 +- cmd/revad/pkg/config/grpc.go | 12 +- cmd/revad/pkg/config/http.go | 12 +- cmd/revad/runtime/loader.go | 1 - docker/Dockerfile.revad-eos | 10 +- go.mod | 12 +- go.sum | 27 +- internal/grpc/interceptors/log/log.go | 4 + .../grpc/services/appprovider/appprovider.go | 4 +- .../services/authprovider/authprovider.go | 2 +- .../grpc/services/gateway/storageprovider.go | 4 +- .../ocminvitemanager/ocminvitemanager.go | 4 +- .../ocmshareprovider/ocmshareprovider.go | 6 +- .../grpc/services/permissions/permissions.go | 4 +- .../publicshareprovider.go | 6 + .../publicstorageprovider.go | 11 +- .../storageprovider/storageprovider.go | 20 +- .../usershareprovider/usershareprovider.go | 6 + .../strategy/publicshares/publicshares.go | 31 +- internal/http/interceptors/log/log.go | 3 + .../http/services/appprovider/appprovider.go | 4 +- internal/http/services/archiver/handler.go | 8 +- .../http/services/datagateway/datagateway.go | 4 +- .../services/dataprovider/dataprovider.go | 10 +- internal/http/services/ocmd/ocm.go | 2 +- internal/http/services/ocmd/protocols.go | 10 +- internal/http/services/ocmd/shares.go | 22 +- .../http/services/ocmprovider/ocmprovider.go | 24 +- internal/http/services/overleaf/overleaf.go | 10 +- .../http/services/owncloud/ocdav/download.go | 245 ++ .../http/services/owncloud/ocdav/ocdav.go | 25 +- .../http/services/owncloud/ocdav/propfind.go | 27 +- internal/http/services/owncloud/ocdav/put.go | 53 + .../services/owncloud/ocs/conversions/main.go | 20 +- .../owncloud/ocs/conversions/permissions.go | 15 +- .../ocs/conversions/permissions_test.go | 13 +- .../services/owncloud/ocs/conversions/role.go | 81 +- .../owncloud/ocs/data/capabilities.go | 192 +- .../http/services/owncloud/ocs/data/config.go | 4 +- .../handlers/apps/sharing/shares/public.go | 92 +- .../handlers/apps/sharing/shares/shares.go | 192 +- .../ocs/handlers/apps/sharing/shares/user.go | 116 +- .../owncloud/ocs/handlers/cloud/user/user.go | 8 +- .../ocs/handlers/cloud/users/users.go | 14 +- .../owncloud/ocs/response/response.go | 14 +- .../services/reverseproxy/reverseproxy.go | 4 +- internal/http/services/sciencemesh/share.go | 8 +- .../services/helloworld/helloworld.go | 4 +- .../services/notifications/notifications.go | 14 +- logs_tests | 324 ++ pkg/app/provider/wopi/wopi.go | 22 +- pkg/auth/manager/impersonator/impersonator.go | 67 - .../manager/impersonator/impersonator_test.go | 52 - pkg/auth/manager/json/json.go | 20 +- pkg/auth/manager/ldap/ldap.go | 1 + pkg/auth/manager/loader/loader.go | 2 - pkg/auth/manager/nextcloud/nextcloud.go | 2 +- pkg/auth/manager/oidc/oidc.go | 22 +- .../manager/owncloudsql/accounts/accounts.go | 161 - .../accounts/accounts_suite_test.go | 31 - .../owncloudsql/accounts/accounts_test.go | 266 -- .../manager/owncloudsql/accounts/test.sqlite | Bin 90112 -> 0 bytes pkg/auth/manager/owncloudsql/owncloudsql.go | 185 -- .../manager/owncloudsql/owncloudsql_test.go | 104 - pkg/auth/scope/resourceinfo.go | 1 + pkg/cbox/group/rest/cache.go | 214 -- pkg/cbox/group/rest/rest.go | 311 -- pkg/cbox/loader/loader.go | 31 - pkg/cbox/share/sql/sql.go | 648 ---- .../storage/eoshomewrapper/eoshomewrapper.go | 114 - pkg/cbox/storage/eoswrapper/eoswrapper.go | 232 -- pkg/cbox/user/rest/cache.go | 211 -- pkg/cbox/user/rest/rest.go | 372 --- pkg/cbox/utils/conversions.go | 106 +- .../permissions_darwin.go => ctx/pathctx.go} | 28 +- pkg/ctx/userctx.go | 1 + pkg/eosclient/eosbinary/eosbinary.go | 24 +- pkg/eosclient/eosgrpc/eosgrpc.go | 6 +- pkg/eosclient/eosgrpc/eoshttp.go | 10 + .../handler/emailhandler/emailhandler.go | 10 +- .../notificationhelper/notificationhelper.go | 10 +- pkg/notification/template/template.go | 10 +- .../invite/repository/nextcloud/nextcloud.go | 7 +- pkg/ocm/invite/repository/sql/sql.go | 35 +- pkg/ocm/provider/authorizer/mentix/mentix.go | 2 +- .../share/repository/nextcloud/nextcloud.go | 4 +- pkg/{cbox => }/preferences/sql/sql.go | 0 pkg/publicshare/manager/loader/loader.go | 1 + .../manager}/sql/sql.go | 22 +- pkg/share/manager/sql/conversions.go | 262 -- pkg/share/manager/sql/mocks/UserConverter.go | 78 - pkg/share/manager/sql/sql.go | 431 +-- pkg/share/manager/sql/sql_suite_test.go | 31 - pkg/share/manager/sql/sql_test.go | 274 -- pkg/share/manager/sql/test.db | Bin 655360 -> 0 bytes pkg/share/share.go | 5 +- pkg/smtpclient/smtpclient.go | 14 +- pkg/storage/favorite/loader/loader.go | 1 + pkg/{cbox => storage}/favorite/sql/sql.go | 0 pkg/storage/fs/loader/loader.go | 4 - pkg/storage/fs/local/local.go | 4 +- pkg/storage/fs/localhome/localhome.go | 6 +- pkg/storage/fs/ocis/blobstore/blobstore.go | 83 - .../fs/ocis/blobstore/blobstore_suite_test.go | 31 - .../fs/ocis/blobstore/blobstore_test.go | 117 - pkg/storage/fs/ocis/ocis.go | 50 - pkg/storage/fs/ocis/ocis_suite_test.go | 31 - pkg/storage/fs/ocis/ocis_test.go | 60 - pkg/storage/fs/owncloud/owncloud.go | 2395 --------------- pkg/storage/fs/owncloud/owncloud_unix.go | 84 - pkg/storage/fs/owncloud/owncloud_windows.go | 76 - pkg/storage/fs/owncloud/upload.go | 554 ---- .../fs/owncloudsql/filecache/filecache.go | 697 ----- .../filecache/filecache_suite_test.go | 31 - .../owncloudsql/filecache/filecache_test.go | 571 ---- pkg/storage/fs/owncloudsql/filecache/test.db | Bin 655360 -> 0 bytes pkg/storage/fs/owncloudsql/owncloudsql.go | 2005 ------------ .../fs/owncloudsql/owncloudsql_unix.go | 84 - .../fs/owncloudsql/owncloudsql_windows.go | 76 - pkg/storage/fs/owncloudsql/upload.go | 492 --- pkg/storage/fs/s3/s3.go | 20 +- pkg/storage/fs/s3ng/blobstore/blobstore.go | 97 - pkg/storage/fs/s3ng/option.go | 64 - pkg/storage/fs/s3ng/option_test.go | 71 - pkg/storage/fs/s3ng/s3ng.go | 53 - pkg/storage/fs/s3ng/s3ng_suite_test.go | 31 - pkg/storage/fs/s3ng/s3ng_test.go | 70 - pkg/storage/migrate/metadata.go | 96 - pkg/storage/migrate/shares.go | 149 - pkg/storage/utils/ace/ace.go | 378 --- pkg/storage/utils/ace/ace_suite_test.go | 31 - pkg/storage/utils/ace/ace_test.go | 229 -- .../utils/decomposedfs/decomposedfs.go | 547 ---- .../decomposedfs_concurrency_test.go | 139 - .../decomposedfs/decomposedfs_suite_test.go | 31 - .../utils/decomposedfs/decomposedfs_test.go | 88 - pkg/storage/utils/decomposedfs/grants.go | 190 -- pkg/storage/utils/decomposedfs/grants_test.go | 164 - pkg/storage/utils/decomposedfs/lookup.go | 227 -- pkg/storage/utils/decomposedfs/lookup_test.go | 118 - pkg/storage/utils/decomposedfs/metadata.go | 203 -- .../decomposedfs/mocks/PermissionsChecker.go | 79 - pkg/storage/utils/decomposedfs/mocks/Tree.go | 291 -- pkg/storage/utils/decomposedfs/node/node.go | 995 ------ .../decomposedfs/node/node_suite_test.go | 31 - .../utils/decomposedfs/node/node_test.go | 195 -- .../utils/decomposedfs/node/node_windows.go | 38 - .../utils/decomposedfs/node/permissions.go | 295 -- .../decomposedfs/node/permissions_unix.go | 37 - .../utils/decomposedfs/options/options.go | 84 - .../options/options_suite_test.go | 31 - .../decomposedfs/options/options_test.go | 59 - pkg/storage/utils/decomposedfs/recycle.go | 344 --- pkg/storage/utils/decomposedfs/revisions.go | 193 -- pkg/storage/utils/decomposedfs/spaces.go | 433 --- .../utils/decomposedfs/testhelpers/helpers.go | 209 -- .../decomposedfs/tree/mocks/Blobstore.go | 83 - pkg/storage/utils/decomposedfs/tree/tree.go | 867 ------ .../decomposedfs/tree/tree_suite_test.go | 31 - .../utils/decomposedfs/tree/tree_test.go | 322 -- pkg/storage/utils/decomposedfs/upload.go | 727 ----- pkg/storage/utils/decomposedfs/upload_test.go | 295 -- .../utils/decomposedfs/xattrs/xattrs.go | 104 - pkg/storage/utils/eosfs/eosfs.go | 15 +- pkg/test/vars.go | 2 +- pkg/user/manager/loader/loader.go | 1 - pkg/user/manager/nextcloud/nextcloud.go | 2 +- .../manager/owncloudsql/accounts/accounts.go | 224 -- .../accounts/accounts_suite_test.go | 31 - .../owncloudsql/accounts/accounts_test.go | 456 --- .../manager/owncloudsql/accounts/test.sqlite | Bin 90112 -> 0 bytes pkg/user/manager/owncloudsql/owncloudsql.go | 176 -- .../node/node_unix.go => user/utils.go} | 23 +- pkg/utils/ldap.go | 2 +- tests/acceptance/config/behat-core.yml | 401 +++ .../expected-failures-on-EOS-storage.md | 2675 +++++++++++------ .../expected-failures-on-OCIS-storage.md | 775 ----- .../expected-failures-on-S3NG-storage.md | 775 ----- .../filtered-suites-acceptance-2-EOS | 2 + tests/acceptance/lint-expected-failures.sh | 145 + tests/acceptance/run.sh | 712 +++++ tests/docker/docker-compose.yml | 185 +- tests/docker/eos-storage/Dockerfile | 10 + tests/docker/eos-storage/scripts/eos-run.sh | 42 + tests/docker/eos-storage/sssd/sssd.conf | 25 + .../integration/grpc/storageprovider_test.go | 231 +- .../drone/frontend-global.toml | 114 - .../oc-integration-tests/drone/frontend.toml | 125 - tests/oc-integration-tests/drone/gateway.toml | 83 - .../drone/ldap-users.toml | 71 - tests/oc-integration-tests/drone/ocmd.toml | 30 - .../drone/providers.demo.json | 100 - tests/oc-integration-tests/drone/shares.toml | 20 - .../drone/storage-home-ocis.toml | 47 - .../drone/storage-home-s3ng.toml | 55 - .../drone/storage-local-1.toml | 46 - .../drone/storage-local-2.toml | 46 - .../drone/storage-publiclink.toml | 17 - .../drone/storage-users-ocis.toml | 42 - .../drone/storage-users-s3ng.toml | 49 - .../drone/users.demo.json | 38 - tests/oc-integration-tests/drone/users.toml | 21 - tests/ocis | 2 +- tests/revad/storage-home-s3ng.toml | 55 - ...orage-home-ocis.toml => storage-home.toml} | 36 +- tests/revad/storage-local-1.toml | 38 +- tests/revad/storage-local-2.toml | 38 +- tests/revad/storage-publiclink.toml | 2 - tests/revad/storage-users-ocis.toml | 42 - tests/revad/storage-users-s3ng.toml | 49 - tests/revad/storage-users.toml | 49 + tests/revad/users.demo.json | 26 +- 227 files changed, 5100 insertions(+), 25869 deletions(-) delete mode 100644 .drone.env delete mode 100644 .drone.star create mode 100644 changelog/unreleased/cbox-cleanup.md delete mode 100644 cmd/reva/import.go create mode 100644 internal/http/services/owncloud/ocdav/download.go create mode 100644 logs_tests delete mode 100644 pkg/auth/manager/impersonator/impersonator.go delete mode 100644 pkg/auth/manager/impersonator/impersonator_test.go delete mode 100644 pkg/auth/manager/owncloudsql/accounts/accounts.go delete mode 100644 pkg/auth/manager/owncloudsql/accounts/accounts_suite_test.go delete mode 100644 pkg/auth/manager/owncloudsql/accounts/accounts_test.go delete mode 100644 pkg/auth/manager/owncloudsql/accounts/test.sqlite delete mode 100644 pkg/auth/manager/owncloudsql/owncloudsql.go delete mode 100644 pkg/auth/manager/owncloudsql/owncloudsql_test.go delete mode 100644 pkg/cbox/group/rest/cache.go delete mode 100644 pkg/cbox/group/rest/rest.go delete mode 100644 pkg/cbox/loader/loader.go delete mode 100644 pkg/cbox/share/sql/sql.go delete mode 100644 pkg/cbox/storage/eoshomewrapper/eoshomewrapper.go delete mode 100644 pkg/cbox/storage/eoswrapper/eoswrapper.go delete mode 100644 pkg/cbox/user/rest/cache.go delete mode 100644 pkg/cbox/user/rest/rest.go rename pkg/{storage/utils/decomposedfs/node/permissions_darwin.go => ctx/pathctx.go} (59%) rename pkg/{cbox => }/preferences/sql/sql.go (100%) rename pkg/{cbox/publicshare => publicshare/manager}/sql/sql.go (97%) delete mode 100644 pkg/share/manager/sql/conversions.go delete mode 100644 pkg/share/manager/sql/mocks/UserConverter.go delete mode 100644 pkg/share/manager/sql/sql_suite_test.go delete mode 100644 pkg/share/manager/sql/sql_test.go delete mode 100644 pkg/share/manager/sql/test.db rename pkg/{cbox => storage}/favorite/sql/sql.go (100%) delete mode 100644 pkg/storage/fs/ocis/blobstore/blobstore.go delete mode 100644 pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go delete mode 100644 pkg/storage/fs/ocis/blobstore/blobstore_test.go delete mode 100644 pkg/storage/fs/ocis/ocis.go delete mode 100644 pkg/storage/fs/ocis/ocis_suite_test.go delete mode 100644 pkg/storage/fs/ocis/ocis_test.go delete mode 100644 pkg/storage/fs/owncloud/owncloud.go delete mode 100644 pkg/storage/fs/owncloud/owncloud_unix.go delete mode 100644 pkg/storage/fs/owncloud/owncloud_windows.go delete mode 100644 pkg/storage/fs/owncloud/upload.go delete mode 100644 pkg/storage/fs/owncloudsql/filecache/filecache.go delete mode 100644 pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go delete mode 100644 pkg/storage/fs/owncloudsql/filecache/filecache_test.go delete mode 100644 pkg/storage/fs/owncloudsql/filecache/test.db delete mode 100644 pkg/storage/fs/owncloudsql/owncloudsql.go delete mode 100644 pkg/storage/fs/owncloudsql/owncloudsql_unix.go delete mode 100644 pkg/storage/fs/owncloudsql/owncloudsql_windows.go delete mode 100644 pkg/storage/fs/owncloudsql/upload.go delete mode 100644 pkg/storage/fs/s3ng/blobstore/blobstore.go delete mode 100644 pkg/storage/fs/s3ng/option.go delete mode 100644 pkg/storage/fs/s3ng/option_test.go delete mode 100644 pkg/storage/fs/s3ng/s3ng.go delete mode 100644 pkg/storage/fs/s3ng/s3ng_suite_test.go delete mode 100644 pkg/storage/fs/s3ng/s3ng_test.go delete mode 100644 pkg/storage/migrate/metadata.go delete mode 100644 pkg/storage/migrate/shares.go delete mode 100644 pkg/storage/utils/ace/ace.go delete mode 100644 pkg/storage/utils/ace/ace_suite_test.go delete mode 100644 pkg/storage/utils/ace/ace_test.go delete mode 100644 pkg/storage/utils/decomposedfs/decomposedfs.go delete mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go delete mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go delete mode 100644 pkg/storage/utils/decomposedfs/decomposedfs_test.go delete mode 100644 pkg/storage/utils/decomposedfs/grants.go delete mode 100644 pkg/storage/utils/decomposedfs/grants_test.go delete mode 100644 pkg/storage/utils/decomposedfs/lookup.go delete mode 100644 pkg/storage/utils/decomposedfs/lookup_test.go delete mode 100644 pkg/storage/utils/decomposedfs/metadata.go delete mode 100644 pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go delete mode 100644 pkg/storage/utils/decomposedfs/mocks/Tree.go delete mode 100644 pkg/storage/utils/decomposedfs/node/node.go delete mode 100644 pkg/storage/utils/decomposedfs/node/node_suite_test.go delete mode 100644 pkg/storage/utils/decomposedfs/node/node_test.go delete mode 100644 pkg/storage/utils/decomposedfs/node/node_windows.go delete mode 100644 pkg/storage/utils/decomposedfs/node/permissions.go delete mode 100644 pkg/storage/utils/decomposedfs/node/permissions_unix.go delete mode 100644 pkg/storage/utils/decomposedfs/options/options.go delete mode 100644 pkg/storage/utils/decomposedfs/options/options_suite_test.go delete mode 100644 pkg/storage/utils/decomposedfs/options/options_test.go delete mode 100644 pkg/storage/utils/decomposedfs/recycle.go delete mode 100644 pkg/storage/utils/decomposedfs/revisions.go delete mode 100644 pkg/storage/utils/decomposedfs/spaces.go delete mode 100644 pkg/storage/utils/decomposedfs/testhelpers/helpers.go delete mode 100644 pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go delete mode 100644 pkg/storage/utils/decomposedfs/tree/tree.go delete mode 100644 pkg/storage/utils/decomposedfs/tree/tree_suite_test.go delete mode 100644 pkg/storage/utils/decomposedfs/tree/tree_test.go delete mode 100644 pkg/storage/utils/decomposedfs/upload.go delete mode 100644 pkg/storage/utils/decomposedfs/upload_test.go delete mode 100644 pkg/storage/utils/decomposedfs/xattrs/xattrs.go delete mode 100644 pkg/user/manager/owncloudsql/accounts/accounts.go delete mode 100644 pkg/user/manager/owncloudsql/accounts/accounts_suite_test.go delete mode 100644 pkg/user/manager/owncloudsql/accounts/accounts_test.go delete mode 100644 pkg/user/manager/owncloudsql/accounts/test.sqlite delete mode 100644 pkg/user/manager/owncloudsql/owncloudsql.go rename pkg/{storage/utils/decomposedfs/node/node_unix.go => user/utils.go} (68%) create mode 100644 tests/acceptance/config/behat-core.yml delete mode 100644 tests/acceptance/expected-failures-on-OCIS-storage.md delete mode 100644 tests/acceptance/expected-failures-on-S3NG-storage.md create mode 100644 tests/acceptance/filtered-suites-acceptance-2-EOS create mode 100755 tests/acceptance/lint-expected-failures.sh create mode 100755 tests/acceptance/run.sh create mode 100644 tests/docker/eos-storage/Dockerfile create mode 100755 tests/docker/eos-storage/scripts/eos-run.sh create mode 100644 tests/docker/eos-storage/sssd/sssd.conf delete mode 100644 tests/oc-integration-tests/drone/frontend-global.toml delete mode 100644 tests/oc-integration-tests/drone/frontend.toml delete mode 100644 tests/oc-integration-tests/drone/gateway.toml delete mode 100644 tests/oc-integration-tests/drone/ldap-users.toml delete mode 100644 tests/oc-integration-tests/drone/ocmd.toml delete mode 100644 tests/oc-integration-tests/drone/providers.demo.json delete mode 100644 tests/oc-integration-tests/drone/shares.toml delete mode 100644 tests/oc-integration-tests/drone/storage-home-ocis.toml delete mode 100644 tests/oc-integration-tests/drone/storage-home-s3ng.toml delete mode 100644 tests/oc-integration-tests/drone/storage-local-1.toml delete mode 100644 tests/oc-integration-tests/drone/storage-local-2.toml delete mode 100644 tests/oc-integration-tests/drone/storage-publiclink.toml delete mode 100644 tests/oc-integration-tests/drone/storage-users-ocis.toml delete mode 100644 tests/oc-integration-tests/drone/storage-users-s3ng.toml delete mode 100644 tests/oc-integration-tests/drone/users.demo.json delete mode 100644 tests/oc-integration-tests/drone/users.toml delete mode 100644 tests/revad/storage-home-s3ng.toml rename tests/revad/{storage-home-ocis.toml => storage-home.toml} (54%) delete mode 100644 tests/revad/storage-users-ocis.toml delete mode 100644 tests/revad/storage-users-s3ng.toml create mode 100644 tests/revad/storage-users.toml diff --git a/.drone.env b/.drone.env deleted file mode 100644 index c1cf00c824..0000000000 --- a/.drone.env +++ /dev/null @@ -1,4 +0,0 @@ -# The test runner source for API tests -APITESTS_COMMITID=eb1aa4502e084972a6fd7e57112ae81431961374 -APITESTS_BRANCH=master -APITESTS_REPO_GIT_URL=https://github.com/owncloud/ocis.git diff --git a/.drone.star b/.drone.star deleted file mode 100644 index d90a8f9836..0000000000 --- a/.drone.star +++ /dev/null @@ -1,242 +0,0 @@ -OC_CI_GOLANG = "owncloudci/golang:1.19" -OC_CI_ALPINE = "owncloudci/alpine:latest" -OSIXIA_OPEN_LDAP = "osixia/openldap:1.3.0" -OC_CI_PHP = "cs3org/behat:latest" -OC_CI_BAZEL_BUILDIFIER = "owncloudci/bazel-buildifier:latest" - -def makeStep(): - return { - "name": "build", - "image": OC_CI_GOLANG, - "commands": [ - "make revad", - ], - } - -def cloneApiTestReposStep(): - return { - "name": "clone-api-test-repos", - "image": OC_CI_ALPINE, - "commands": [ - "source /drone/src/.drone.env", - "git clone -b master --depth=1 https://github.com/owncloud/testing.git /drone/src/tmp/testing", - "git clone -b $APITESTS_BRANCH --single-branch --no-tags $APITESTS_REPO_GIT_URL /drone/src/tmp/testrunner", - "cd /drone/src/tmp/testrunner", - "git checkout $APITESTS_COMMITID", - ], - } - -# Shared service definitions -def ldapService(): - return { - "name": "ldap", - "image": OSIXIA_OPEN_LDAP, - "pull": "always", - "environment": { - "LDAP_DOMAIN": "owncloud.com", - "LDAP_ORGANISATION": "ownCloud", - "LDAP_ADMIN_PASSWORD": "admin", - "LDAP_TLS_VERIFY_CLIENT": "never", - "HOSTNAME": "ldap", - }, - } - -def cephService(): - return { - "name": "ceph", - "image": "ceph/daemon", - "pull": "always", - "environment": { - "CEPH_DAEMON": "demo", - "NETWORK_AUTO_DETECT": "4", - "MON_IP": "0.0.0.0", - "CEPH_PUBLIC_NETWORK": "0.0.0.0/0", - "RGW_CIVETWEB_PORT": "4000 ", - "RGW_NAME": "ceph", - "CEPH_DEMO_UID": "test-user", - "CEPH_DEMO_ACCESS_KEY": "test", - "CEPH_DEMO_SECRET_KEY": "test", - "CEPH_DEMO_BUCKET": "test", - }, - } - -# Pipeline definitions -def main(ctx): - return [ - checkStarlark(), - ocisIntegrationTest(), - ] + s3ngIntegrationTests() - -def ocisIntegrationTest(): - return { - "kind": "pipeline", - "type": "docker", - "name": "ocis-integration-tests-2", - "platform": { - "os": "linux", - "arch": "amd64", - }, - "trigger": { - "event": { - "include": [ - "pull_request", - "tag", - ], - }, - }, - "steps": [ - makeStep(), - { - "name": "revad-services", - "image": OC_CI_GOLANG, - "detach": True, - "commands": [ - "cd /drone/src/tests/oc-integration-tests/drone/", - "/drone/src/cmd/revad/revad -c frontend.toml &", - "/drone/src/cmd/revad/revad -c gateway.toml &", - "/drone/src/cmd/revad/revad -c shares.toml &", - "/drone/src/cmd/revad/revad -c storage-home-ocis.toml &", - "/drone/src/cmd/revad/revad -c storage-users-ocis.toml &", - "/drone/src/cmd/revad/revad -c storage-publiclink.toml &", - "/drone/src/cmd/revad/revad -c ldap-users.toml", - ], - }, - cloneApiTestReposStep(), - { - "name": "APIAcceptanceTestsOcisStorage", - "image": OC_CI_PHP, - "commands": [ - "cd /drone/src/tmp/testrunner", - "make test-acceptance-from-core-api", - ], - "environment": { - "TEST_SERVER_URL": "http://revad-services:20080", - "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/*", - "STORAGE_DRIVER": "OCIS", - "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", - "TEST_WITH_LDAP": "true", - "REVA_LDAP_HOSTNAME": "ldap", - "TEST_REVA": "true", - "SEND_SCENARIO_LINE_REFERENCES": "true", - "BEHAT_FILTER_TAGS": "~@provisioning_api-app-required&&~@skipOnOcis-OCIS-Storage&&~@personalSpace&&~@skipOnGraph&&~@carddav&&~@skipOnReva&&~@skipOnRevaMaster", - "DIVIDE_INTO_NUM_PARTS": 6, - "RUN_PART": 2, - "EXPECTED_FAILURES_FILE": "/drone/src/tests/acceptance/expected-failures-on-OCIS-storage.md", - }, - }, - ], - "services": [ - ldapService(), - ], - } - -def s3ngIntegrationTests(): - parallelRuns = 12 - pipelines = [] - for runPart in range(1, parallelRuns + 1): - if runPart in [9]: - continue - - pipelines.append( - { - "kind": "pipeline", - "type": "docker", - "name": "s3ng-integration-tests-%s" % runPart, - "platform": { - "os": "linux", - "arch": "amd64", - }, - "trigger": { - "event": { - "include": [ - "pull_request", - "tag", - ], - }, - }, - "steps": [ - makeStep(), - { - "name": "revad-services", - "image": OC_CI_GOLANG, - "detach": True, - "commands": [ - "cd /drone/src/tests/oc-integration-tests/drone/", - "/drone/src/cmd/revad/revad -c frontend.toml &", - "/drone/src/cmd/revad/revad -c gateway.toml &", - "/drone/src/cmd/revad/revad -c shares.toml &", - "/drone/src/cmd/revad/revad -c storage-home-s3ng.toml &", - "/drone/src/cmd/revad/revad -c storage-users-s3ng.toml &", - "/drone/src/cmd/revad/revad -c storage-publiclink.toml &", - "/drone/src/cmd/revad/revad -c ldap-users.toml", - ], - }, - cloneApiTestReposStep(), - { - "name": "APIAcceptanceTestsS3ngStorage", - "image": OC_CI_PHP, - "commands": [ - "cd /drone/src/tmp/testrunner", - "make test-acceptance-from-core-api", - ], - "environment": { - "TEST_SERVER_URL": "http://revad-services:20080", - "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", - "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/nodes/root/* /drone/src/tmp/reva/data/nodes/*-*-*-* /drone/src/tmp/reva/data/blobs/*", - "STORAGE_DRIVER": "S3NG", - "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", - "TEST_WITH_LDAP": "true", - "REVA_LDAP_HOSTNAME": "ldap", - "TEST_REVA": "true", - "SEND_SCENARIO_LINE_REFERENCES": "true", - "BEHAT_FILTER_TAGS": "~@provisioning_api-app-required&&~@skipOnOcis-OCIS-Storage&&~@personalSpace&&~@skipOnGraph&&~@carddav&&~@skipOnReva&&~@skipOnRevaMaster", - "DIVIDE_INTO_NUM_PARTS": parallelRuns, - "RUN_PART": runPart, - "EXPECTED_FAILURES_FILE": "/drone/src/tests/acceptance/expected-failures-on-S3NG-storage.md", - }, - }, - ], - "services": [ - ldapService(), - cephService(), - ], - }, - ) - - return pipelines - -def checkStarlark(): - return { - "kind": "pipeline", - "type": "docker", - "name": "check-starlark", - "steps": [ - { - "name": "format-check-starlark", - "image": OC_CI_BAZEL_BUILDIFIER, - "commands": [ - "buildifier --mode=check .drone.star", - ], - }, - { - "name": "show-diff", - "image": OC_CI_BAZEL_BUILDIFIER, - "commands": [ - "buildifier --mode=fix .drone.star", - "git diff", - ], - "when": { - "status": [ - "failure", - ], - }, - }, - ], - "depends_on": [], - "trigger": { - "ref": [ - "refs/pull/**", - ], - }, - } diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 430e75ce74..e4c86969e0 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -20,9 +20,10 @@ jobs: - name: Checkout uses: actions/checkout@v3.1.0 - name: Setup Go environment - uses: actions/setup-go@v3.3.0 + uses: actions/setup-go@v4 with: go-version-file: go.mod + cache: false - name: Check changelog run: make check-changelog env: diff --git a/.github/workflows/compose.yml b/.github/workflows/compose.yml index e3d5cc1f8f..aa01a4c38d 100644 --- a/.github/workflows/compose.yml +++ b/.github/workflows/compose.yml @@ -6,8 +6,9 @@ on: required: true type: string image: - required: true type: string + download: + type: boolean submodules: type: boolean parts: @@ -25,10 +26,11 @@ jobs: submodules: ${{ inputs.submodules }} - name: Download image uses: ishworkh/docker-image-artifact-download@v1 + if: inputs.download with: image: ${{ inputs.image }} - name: Test - run: make ${{ inputs.test }} -o docker-revad + run: make ${{ inputs.test }} env: REVAD_IMAGE: ${{ inputs.image }} PARTS: ${{ inputs.parts }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0c62d6de6a..ecd1ecfc96 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,6 +13,9 @@ on: type: boolean platforms: type: string + context: + type: string + default: . outputs: image: value: ${{ jobs.docker.outputs.image }} @@ -40,7 +43,7 @@ jobs: uses: docker/build-push-action@v3 id: build with: - context: . + context: ${{ inputs.context }} file: ${{ inputs.file }} tags: ${{ inputs.tags }} load: ${{ inputs.load }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 51e34bb6c3..272845a5d9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,8 +16,9 @@ jobs: - name: Checkout uses: actions/checkout@v3.1.0 - name: Setup Go environment - uses: actions/setup-go@v3.3.0 + uses: actions/setup-go@v4 with: go-version-file: go.mod + cache: false - name: Run linters run: make lint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 475b8adb8a..a9f447c23a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,9 +12,10 @@ jobs: uses: actions/checkout@v3 - name: Setup Go environment id: go - uses: actions/setup-go@v3.3.0 + uses: actions/setup-go@v4 with: go-version-file: go.mod + cache: false - name: Make distribution run: make dist env: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 50270ae388..33abc742db 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,9 +13,10 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Go environment - uses: actions/setup-go@v3.3.0 + uses: actions/setup-go@v4 with: go-version-file: go.mod + cache: false - name: Test integration run: make test-integration env: @@ -40,9 +41,10 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Go environment - uses: actions/setup-go@v3.3.0 + uses: actions/setup-go@v4 with: go-version-file: go.mod + cache: false - name: Test run: make test-go env: @@ -68,46 +70,39 @@ jobs: with: file: docker/Dockerfile.revad load: true + docker-revad-eos: + name: docker (docker/Dockerfile.revad-eos) + uses: ./.github/workflows/docker.yml + with: + file: docker/Dockerfile.revad-eos + load: false litmus: - needs: docker-revad + needs: + - docker-revad-eos strategy: fail-fast: false matrix: - test: [litmus-1, litmus-2, litmus-3] + test: [litmus-1, litmus-2] uses: ./.github/workflows/compose.yml with: test: ${{ matrix.test }} - image: ${{ needs.docker-revad.outputs.image }} acceptance-1: - needs: docker-revad + needs: + - docker-revad-eos uses: ./.github/workflows/compose.yml with: test: acceptance-1 - image: ${{ needs.docker-revad.outputs.image }} submodules: true acceptance-2: - needs: docker-revad + needs: + - docker-revad-eos strategy: fail-fast: false matrix: - part: [1, 3, 4, 5, 6] + part: [1, 2, 3, 4, 5, 6] uses: ./.github/workflows/compose.yml with: test: acceptance-2 - image: ${{ needs.docker-revad.outputs.image }} submodules: true parts: 6 part: ${{ matrix.part }} - acceptance-3: - needs: docker-revad - strategy: - fail-fast: false - matrix: - part: [9] - uses: ./.github/workflows/compose.yml - with: - test: acceptance-3 - image: ${{ needs.docker-revad.outputs.image }} - submodules: true - parts: 12 - part: ${{ matrix.part }} diff --git a/.golangci.yaml b/.golangci.yaml index 7cafc9f649..5ec02ab851 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -52,6 +52,11 @@ linters: - gocyclo # TODO: consider enabling the 'gocyclo' linter to compute and check the cyclomatic complexity of functions. - forbidigo # TODO: consider enabling the 'forbidigo' linter to forbid identifiers. - dupl # TODO: consider enabling the 'dupl' linter to detect code cloning. + - musttag + - ginkgolinter + - depguard + - revive + - nolintlint - golint # deprecated since v1.41.0 - replaced by 'revive'. - ifshort # deprecated since v1.48.0 - structcheck # deprecated since v1.49.0 - replaced by 'unused'. diff --git a/Makefile b/Makefile index 22eb844244..626c7fb6ec 100644 --- a/Makefile +++ b/Makefile @@ -18,13 +18,15 @@ toolchain: $(GOLANGCI_LINT) $(CALENS) $(GOLANGCI_LINT): @mkdir -p $(@D) - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINDIR=$(@D) sh -s v1.50.1 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINDIR=$(@D) sh -s v1.54.2 +CALENS_DIR := $(shell mktemp -d) $(CALENS): @mkdir -p $(@D) - git clone --depth 1 --branch v0.2.0 -c advice.detachedHead=false https://github.com/restic/calens.git /tmp/calens - cd /tmp/calens && GOBIN=$(@D) go install - rm -rf /tmp/calens + CALENS_DIR=`mktemp -d` + git clone --depth 1 --branch v0.2.0 -c advice.detachedHead=false https://github.com/restic/calens.git $(CALENS_DIR) + cd $(CALENS_DIR) && GOBIN=$(@D) go install + rm -rf $(CALENS_DIR) ################################################################################ @@ -65,17 +67,22 @@ docker-revad-ceph: docker-revad-eos: docker build -f docker/Dockerfile.revad-eos -t revad-eos --build-arg VERSION=$(VERSION) --build-arg GIT_COMMIT=$(GIT_COMMIT) . +.PHONY: docker-eos-full-tests +docker-eos-full-tests: + docker build -f tests/docker/eos-storage/Dockerfile -t eos-full tests/docker/eos-storage + ################################################################################ # Test ################################################################################ -TEST = litmus-1 litmus-2 litmus-3 acceptance-1 acceptance-2 acceptance-3 -export REVAD_IMAGE ?= revad -export PARTS ?= 1 -export PART ?= 1 +TEST = litmus-1 litmus-2 acceptance-1 acceptance-2 +export REVAD_IMAGE ?= revad-eos +export EOS_FULL_IMAGE ?= eos-full +export PARTS ?= 1 +export PART ?= 1 .PHONY: $(TEST) -$(TEST): docker-revad +$(TEST): docker-eos-full-tests docker-revad-eos docker compose -f ./tests/docker/docker-compose.yml up --force-recreate --always-recreate-deps --build --abort-on-container-exit -V --remove-orphans --exit-code-from $@ $@ .PHONY: test-go @@ -128,8 +135,8 @@ toolchain-clean: .PHONY: docker-clean docker-clean: docker compose -f ./tests/docker/docker-compose.yml down --rmi local -v --remove-orphans - docker rmi $(REVAD_IMAGE) .PHONY: clean clean: toolchain-clean docker-clean rm -rf dist + rm -rf tmp diff --git a/changelog/unreleased/cbox-cleanup.md b/changelog/unreleased/cbox-cleanup.md new file mode 100644 index 0000000000..9ec8817e7e --- /dev/null +++ b/changelog/unreleased/cbox-cleanup.md @@ -0,0 +1,5 @@ +Enhancement: CERNBox cleanup + +Remove from the codebase all the cernbox specific code + +https://github.com/cs3org/reva/pull/4212 diff --git a/cmd/reva/import.go b/cmd/reva/import.go deleted file mode 100644 index e93d633e61..0000000000 --- a/cmd/reva/import.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package main - -import ( - "io" - "log" - "path" - - "github.com/cs3org/reva/pkg/storage/migrate" - "github.com/pkg/errors" -) - -func importCommand() *command { - cmd := newCommand("import") - cmd.Description = func() string { return "import metadata" } - cmd.Usage = func() string { return "Usage: import [-flags] " } - namespaceFlag := cmd.String("n", "/", "CS3 namespace prefix") - - cmd.ResetFlags = func() { - *namespaceFlag = "/" - } - - cmd.Action = func(w ...io.Writer) error { - if cmd.NArg() < 1 { - return errors.New("Invalid arguments: " + cmd.Usage()) - } - exportPath := cmd.Args()[0] - - ctx := getAuthContext() - client, err := getClient() - if err != nil { - return err - } - - ns := path.Join("/", *namespaceFlag) - - if err := migrate.ImportMetadata(ctx, client, exportPath, ns); err != nil { - log.Fatal(err) - return err - } - if err := migrate.ImportShares(ctx, client, exportPath, ns); err != nil { - log.Fatal(err) - return err - } - - return nil - } - return cmd -} diff --git a/cmd/reva/main.go b/cmd/reva/main.go index 4a5e06d3ac..d9188f2265 100644 --- a/cmd/reva/main.go +++ b/cmd/reva/main.go @@ -47,7 +47,6 @@ var ( configureCommand(), loginCommand(), whoamiCommand(), - importCommand(), lsCommand(), statCommand(), uploadCommand(), diff --git a/cmd/reva/share-create.go b/cmd/reva/share-create.go index 9e4878acaa..a0efeff0f3 100644 --- a/cmd/reva/share-create.go +++ b/cmd/reva/share-create.go @@ -163,7 +163,7 @@ func getSharePerm(p string) (*provider.ResourcePermissions, error) { case editorPermission: return conversions.NewEditorRole().CS3ResourcePermissions(), nil case collabPermission: - return conversions.NewCoownerRole().CS3ResourcePermissions(), nil + return conversions.NewCollaboratorRole().CS3ResourcePermissions(), nil case denyPermission: return &provider.ResourcePermissions{}, nil default: diff --git a/cmd/revad/main.go b/cmd/revad/main.go index 0f620f81d6..0a889abfec 100644 --- a/cmd/revad/main.go +++ b/cmd/revad/main.go @@ -247,7 +247,7 @@ func getConfigsFromDir(dir string) (confs []string, err error) { for _, value := range files { if !value.IsDir() { expr := regexp.MustCompile(`[\w].toml`) - if expr.Match([]byte(value.Name())) { + if expr.MatchString(value.Name()) { confs = append(confs, path.Join(dir, value.Name())) } } diff --git a/cmd/revad/pkg/config/config.go b/cmd/revad/pkg/config/config.go index 432b2044ab..fd54f1b742 100644 --- a/cmd/revad/pkg/config/config.go +++ b/cmd/revad/pkg/config/config.go @@ -33,37 +33,37 @@ import ( // Config holds the reva configuration. type Config struct { - GRPC *GRPC `key:"grpc" mapstructure:"-" default:"{}"` - HTTP *HTTP `key:"http" mapstructure:"-" default:"{}"` - Serverless *Serverless `key:"serverless" mapstructure:"-" default:"{}"` - Shared *Shared `key:"shared" mapstructure:"shared" default:"{}"` - Log *Log `key:"log" mapstructure:"log" default:"{}" template:"-"` - Core *Core `key:"core" mapstructure:"core" default:"{}" template:"-"` - Vars Vars `key:"vars" mapstructure:"vars" default:"{}" template:"-"` + GRPC *GRPC `default:"{}" key:"grpc" mapstructure:"-"` + HTTP *HTTP `default:"{}" key:"http" mapstructure:"-"` + Serverless *Serverless `default:"{}" key:"serverless" mapstructure:"-"` + Shared *Shared `default:"{}" key:"shared" mapstructure:"shared"` + Log *Log `default:"{}" key:"log" mapstructure:"log" template:"-"` + Core *Core `default:"{}" key:"core" mapstructure:"core" template:"-"` + Vars Vars `default:"{}" key:"vars" mapstructure:"vars" template:"-"` } // Log holds the configuration for the logger. type Log struct { - Output string `key:"output" mapstructure:"output" default:"stdout"` - Mode string `key:"mode" mapstructure:"mode" default:"console"` - Level string `key:"level" mapstructure:"level" default:"trace"` + Output string `default:"stdout" key:"output" mapstructure:"output"` + Mode string `default:"console" key:"mode" mapstructure:"mode"` + Level string `default:"trace" key:"level" mapstructure:"level"` } // Shared holds the shared configuration. type Shared struct { - JWTSecret string `key:"jwt_secret" mapstructure:"jwt_secret" default:"changemeplease"` - GatewaySVC string `key:"gatewaysvc" mapstructure:"gatewaysvc" default:"0.0.0.0:19000"` - DataGateway string `key:"datagateway" mapstructure:"datagateway" default:"http://0.0.0.0:19001/datagateway"` - SkipUserGroupsInToken bool `key:"skip_user_groups_in_token" mapstructure:"skip_user_groups_in_token"` - BlockedUsers []string `key:"blocked_users" mapstructure:"blocked_users" default:"[]"` + JWTSecret string `default:"changemeplease" key:"jwt_secret" mapstructure:"jwt_secret"` + GatewaySVC string `default:"0.0.0.0:19000" key:"gatewaysvc" mapstructure:"gatewaysvc"` + DataGateway string `default:"http://0.0.0.0:19001/datagateway" key:"datagateway" mapstructure:"datagateway"` + SkipUserGroupsInToken bool `key:"skip_user_groups_in_token" mapstructure:"skip_user_groups_in_token"` + BlockedUsers []string `default:"[]" key:"blocked_users" mapstructure:"blocked_users"` } // Core holds the core configuration. type Core struct { MaxCPUs string `key:"max_cpus" mapstructure:"max_cpus"` ConfigDumpFile string `key:"config_dump_file" mapstructure:"config_dump_file"` - TracingEnabled bool `key:"tracing_enabled" mapstructure:"tracing_enabled" default:"true"` - TracingEndpoint string `key:"tracing_endpoint" mapstructure:"tracing_endpoint" default:"localhost:6831"` + TracingEnabled bool `default:"true" key:"tracing_enabled" mapstructure:"tracing_enabled"` + TracingEndpoint string `default:"localhost:6831" key:"tracing_endpoint" mapstructure:"tracing_endpoint"` TracingCollector string `key:"tracing_collector" mapstructure:"tracing_collector"` TracingServiceName string `key:"tracing_service_name" mapstructure:"tracing_service_name"` TracingService string `key:"tracing_service" mapstructure:"tracing_service"` diff --git a/cmd/revad/pkg/config/grpc.go b/cmd/revad/pkg/config/grpc.go index 26c67bea3d..c87e984c9a 100644 --- a/cmd/revad/pkg/config/grpc.go +++ b/cmd/revad/pkg/config/grpc.go @@ -25,13 +25,13 @@ import ( // GRPC holds the configuration for the GRPC services. type GRPC struct { - Address Address `mapstructure:"address" key:"address"` - Network string `mapstructure:"network" key:"network" default:"tcp"` - ShutdownDeadline int `mapstructure:"shutdown_deadline" key:"shutdown_deadline"` - EnableReflection bool `mapstructure:"enable_reflection" key:"enable_reflection"` + Address Address `key:"address" mapstructure:"address"` + Network string `default:"tcp" key:"network" mapstructure:"network"` + ShutdownDeadline int `key:"shutdown_deadline" mapstructure:"shutdown_deadline"` + EnableReflection bool `key:"enable_reflection" mapstructure:"enable_reflection"` - Services map[string]ServicesConfig `mapstructure:"-" key:"services"` - Interceptors map[string]map[string]any `mapstructure:"-" key:"interceptors"` + Services map[string]ServicesConfig `key:"services" mapstructure:"-"` + Interceptors map[string]map[string]any `key:"interceptors" mapstructure:"-"` iterableImpl } diff --git a/cmd/revad/pkg/config/http.go b/cmd/revad/pkg/config/http.go index e29d02e089..e35a5dd8ad 100644 --- a/cmd/revad/pkg/config/http.go +++ b/cmd/revad/pkg/config/http.go @@ -25,13 +25,13 @@ import ( // HTTP holds the configuration for the HTTP services. type HTTP struct { - Network string `mapstructure:"network" key:"network" default:"tcp"` - Address Address `mapstructure:"address" key:"address"` - CertFile string `mapstructure:"certfile" key:"certfile"` - KeyFile string `mapstructure:"keyfile" key:"keyfile"` + Network string `default:"tcp" key:"network" mapstructure:"network"` + Address Address `key:"address" mapstructure:"address"` + CertFile string `key:"certfile" mapstructure:"certfile"` + KeyFile string `key:"keyfile" mapstructure:"keyfile"` - Services map[string]ServicesConfig `mapstructure:"-" key:"services"` - Middlewares map[string]map[string]any `mapstructure:"-" key:"middlewares"` + Services map[string]ServicesConfig `key:"services" mapstructure:"-"` + Middlewares map[string]map[string]any `key:"middlewares" mapstructure:"-"` iterableImpl } diff --git a/cmd/revad/runtime/loader.go b/cmd/revad/runtime/loader.go index ad174b9130..163b66d823 100644 --- a/cmd/revad/runtime/loader.go +++ b/cmd/revad/runtime/loader.go @@ -33,7 +33,6 @@ import ( _ "github.com/cs3org/reva/pkg/appauth/manager/loader" _ "github.com/cs3org/reva/pkg/auth/manager/loader" _ "github.com/cs3org/reva/pkg/auth/registry/loader" - _ "github.com/cs3org/reva/pkg/cbox/loader" _ "github.com/cs3org/reva/pkg/datatx/manager/loader" _ "github.com/cs3org/reva/pkg/group/manager/loader" _ "github.com/cs3org/reva/pkg/metrics/driver/loader" diff --git a/docker/Dockerfile.revad-eos b/docker/Dockerfile.revad-eos index bbde3b230c..a8466ef406 100644 --- a/docker/Dockerfile.revad-eos +++ b/docker/Dockerfile.revad-eos @@ -16,7 +16,7 @@ # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. -FROM golang:alpine3.17 as builder +FROM golang:latest as builder WORKDIR /home/reva COPY . . @@ -25,13 +25,11 @@ ARG GIT_COMMIT ARG VERSION ENV GIT_COMMIT=$GIT_COMMIT ENV VERSION=$VERSION -ENV CGO_ENABLED 1 +ENV CGO_ENABLED 0 -RUN apk add --no-cache gcc musl-dev +RUN make revad -RUN go build -ldflags "-X main.gitCommit=$GIT_COMMIT -X main.version=$VERSION -X main.goVersion=`go version | awk '{print $3}'` -X main.buildDate=`date +%FT%T%z`" -o ./cmd/revad/revad ./cmd/revad/main - -FROM gitlab-registry.cern.ch/dss/eos/eos-all:4.8.91 +FROM gitlab-registry.cern.ch/dss/eos/eos-all:5.0.31 COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ COPY --from=builder /home/reva/cmd/revad/revad /usr/bin/revad diff --git a/go.mod b/go.mod index 94d3c6f008..61f17de78f 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,6 @@ require ( github.com/mattn/go-sqlite3 v1.14.17 github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b github.com/mileusna/useragent v1.2.1 - github.com/minio/minio-go/v7 v7.0.63 github.com/mitchellh/mapstructure v1.5.0 github.com/nats-io/nats-server/v2 v2.9.19 github.com/nats-io/nats-streaming-server v0.25.5 @@ -48,7 +47,6 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.10 github.com/pkg/errors v0.9.1 - github.com/pkg/xattr v0.4.5 github.com/prometheus/alertmanager v0.26.0 github.com/rs/cors v1.9.0 github.com/rs/zerolog v1.28.0 @@ -117,9 +115,7 @@ require ( github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/lestrrat-go/strftime v1.0.4 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -129,13 +125,9 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.43 // indirect github.com/minio/highwayhash v1.0.2 // indirect - github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nats-io/jwt/v2 v2.4.1 // indirect github.com/nats-io/nkeys v0.4.4 // indirect github.com/nats-io/nuid v1.0.1 // indirect @@ -152,7 +144,6 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect - github.com/rs/xid v1.5.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect @@ -168,14 +159,13 @@ require ( golang.org/x/tools v0.9.3 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/src-d/go-errors.v1 v1.0.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -go 1.19 +go 1.21 replace ( github.com/eventials/go-tus => github.com/andrewmostello/go-tus v0.0.0-20200314041820-904a9904af9a diff --git a/go.sum b/go.sum index 4abd0b7cd6..f1a846c20c 100644 --- a/go.sum +++ b/go.sum @@ -1020,6 +1020,7 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exoscale/egoscale v0.46.0/go.mod h1:mpEXBpROAa/2i5GC0r33rfxG+TxSEka11g1PIXt9+zc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -1099,6 +1100,7 @@ github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KA github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= @@ -1114,6 +1116,7 @@ github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobs/pretty v0.0.0-20180724170744-09732c25a95b/go.mod h1:Xo4aNUOrJnVruqWQJBtW6+bTBDTniY8yZum5rF3b5jw= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= @@ -1125,6 +1128,7 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -1367,7 +1371,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -1390,10 +1393,7 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kolo/xmlrpc v0.0.0-20200310150728-e0350524596b/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1423,6 +1423,7 @@ github.com/lestrrat-go/strftime v1.0.4/go.mod h1:E1nN3pCbtMSu1yjSVeyuRFVm/U0xoR7 github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v0.25.3/go.mod h1:GSBKPpjoQfxEfryoCRcgkuUOCuVtGHWhzI8OMdycNTE= @@ -1487,12 +1488,6 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcs github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= -github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ= -github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -1516,11 +1511,9 @@ github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7s github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1574,6 +1567,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -1625,8 +1619,6 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= -github.com/pkg/xattr v0.4.5 h1:P5SvUc1T07cHLto76ESJ+/x5kexU7s9127iVoeEW/hs= -github.com/pkg/xattr v0.4.5/go.mod h1:sBD3RAqlr8Q+RC3FutZcikpT8nyDrIEEBw2J744gVWs= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -1699,11 +1691,10 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1735,6 +1726,7 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= @@ -2183,7 +2175,6 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201110211018-35f3e6cf4a65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2733,8 +2724,6 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ns1/ns1-go.v2 v2.4.4/go.mod h1:GMnKY+ZuoJ+lVLL+78uSTjwTz2jMazq6AfGKQOYhsPk= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/internal/grpc/interceptors/log/log.go b/internal/grpc/interceptors/log/log.go index 3dea1958e7..8d7ee05b0c 100644 --- a/internal/grpc/interceptors/log/log.go +++ b/internal/grpc/interceptors/log/log.go @@ -52,8 +52,10 @@ func NewUnary() grpc.UnaryServerInterceptor { log := appctx.GetLogger(ctx) var event *zerolog.Event if code != codes.OK { + //nolint:zerologlint event = log.Error() } else { + //nolint:zerologlint event = log.Debug() } @@ -92,8 +94,10 @@ func NewStream() grpc.StreamServerInterceptor { log := appctx.GetLogger(ss.Context()) var event *zerolog.Event if code != codes.OK { + //nolint:zerologlint event = log.Error() } else { + //nolint:zerologlint event = log.Info() } diff --git a/internal/grpc/services/appprovider/appprovider.go b/internal/grpc/services/appprovider/appprovider.go index 42283be1ef..999a320c66 100644 --- a/internal/grpc/services/appprovider/appprovider.go +++ b/internal/grpc/services/appprovider/appprovider.go @@ -65,8 +65,8 @@ type config struct { Drivers map[string]map[string]interface{} `mapstructure:"drivers"` AppProviderURL string `mapstructure:"app_provider_url"` GatewaySvc string `mapstructure:"gatewaysvc"` - MimeTypes []string `mapstructure:"mime_types" docs:"nil;A list of mime types supported by this app."` - CustomMimeTypesJSON string `mapstructure:"custom_mime_types_json" docs:"nil;An optional mapping file with the list of supported custom file extensions and corresponding mime types."` + MimeTypes []string `docs:"nil;A list of mime types supported by this app." mapstructure:"mime_types"` + CustomMimeTypesJSON string `docs:"nil;An optional mapping file with the list of supported custom file extensions and corresponding mime types." mapstructure:"custom_mime_types_json"` Priority uint64 `mapstructure:"priority"` Language string `mapstructure:"language"` } diff --git a/internal/grpc/services/authprovider/authprovider.go b/internal/grpc/services/authprovider/authprovider.go index fdb6937014..4297df60ef 100644 --- a/internal/grpc/services/authprovider/authprovider.go +++ b/internal/grpc/services/authprovider/authprovider.go @@ -129,7 +129,7 @@ func (s *service) Authenticate(ctx context.Context, req *provider.AuthenticateRe }, nil case errtypes.NotFound: return &provider.AuthenticateResponse{ - Status: status.NewNotFound(ctx, "unknown client id"), + Status: status.NewNotFound(ctx, "unknown client id: "+err.Error()), }, nil default: err = errors.Wrap(err, "authsvc: error in Authenticate") diff --git a/internal/grpc/services/gateway/storageprovider.go b/internal/grpc/services/gateway/storageprovider.go index a4ec8c4bb2..7dd35b1fe5 100644 --- a/internal/grpc/services/gateway/storageprovider.go +++ b/internal/grpc/services/gateway/storageprovider.go @@ -1455,7 +1455,7 @@ func (s *svc) statAcrossProviders(ctx context.Context, req *provider.StatRequest continue } if resp.Status.Code != rpc.Code_CODE_OK { - log.Err(status.NewErrorFromCode(rpc.Code_CODE_OK, "gateway")) + log.Err(status.NewErrorFromCode(rpc.Code_CODE_OK, "gateway")).Send() continue } if resp.Info != nil { @@ -1854,7 +1854,7 @@ func (s *svc) listContainerAcrossProviders(ctx context.Context, req *provider.Li continue } if resp.Status.Code != rpc.Code_CODE_OK { - log.Err(status.NewErrorFromCode(rpc.Code_CODE_OK, "gateway")) + log.Err(status.NewErrorFromCode(rpc.Code_CODE_OK, "gateway")).Send() continue } diff --git a/internal/grpc/services/ocminvitemanager/ocminvitemanager.go b/internal/grpc/services/ocminvitemanager/ocminvitemanager.go index 0012d0e3dd..2b257d5049 100644 --- a/internal/grpc/services/ocminvitemanager/ocminvitemanager.go +++ b/internal/grpc/services/ocminvitemanager/ocminvitemanager.go @@ -58,8 +58,8 @@ type config struct { TokenExpiration string `mapstructure:"token_expiration"` OCMClientTimeout int `mapstructure:"ocm_timeout"` OCMClientInsecure bool `mapstructure:"ocm_insecure"` - GatewaySVC string `mapstructure:"gatewaysvc" validate:"required"` - ProviderDomain string `mapstructure:"provider_domain" validate:"required" docs:"The same domain registered in the provider authorizer"` + GatewaySVC string `mapstructure:"gatewaysvc" validate:"required"` + ProviderDomain string `docs:"The same domain registered in the provider authorizer" mapstructure:"provider_domain" validate:"required"` tokenExpiration time.Duration } diff --git a/internal/grpc/services/ocmshareprovider/ocmshareprovider.go b/internal/grpc/services/ocmshareprovider/ocmshareprovider.go index 7f45e1dfdf..91f9f6a766 100644 --- a/internal/grpc/services/ocmshareprovider/ocmshareprovider.go +++ b/internal/grpc/services/ocmshareprovider/ocmshareprovider.go @@ -66,9 +66,9 @@ type config struct { Drivers map[string]map[string]interface{} `mapstructure:"drivers"` ClientTimeout int `mapstructure:"client_timeout"` ClientInsecure bool `mapstructure:"client_insecure"` - GatewaySVC string `mapstructure:"gatewaysvc" validate:"required"` - ProviderDomain string `mapstructure:"provider_domain" validate:"required" docs:"The same domain registered in the provider authorizer"` - WebDAVEndpoint string `mapstructure:"webdav_endpoint" validate:"required"` + GatewaySVC string `mapstructure:"gatewaysvc" validate:"required"` + ProviderDomain string `docs:"The same domain registered in the provider authorizer" mapstructure:"provider_domain" validate:"required"` + WebDAVEndpoint string `mapstructure:"webdav_endpoint" validate:"required"` WebappTemplate string `mapstructure:"webapp_template"` } diff --git a/internal/grpc/services/permissions/permissions.go b/internal/grpc/services/permissions/permissions.go index 8969192cc4..52c7ab6254 100644 --- a/internal/grpc/services/permissions/permissions.go +++ b/internal/grpc/services/permissions/permissions.go @@ -43,8 +43,8 @@ func init() { } type config struct { - Driver string `mapstructure:"driver" docs:"localhome;The permission driver to be used."` - Drivers map[string]map[string]interface{} `mapstructure:"drivers" docs:"url:pkg/permission/permission.go"` + Driver string `docs:"localhome;The permission driver to be used." mapstructure:"driver"` + Drivers map[string]map[string]interface{} `docs:"url:pkg/permission/permission.go" mapstructure:"drivers"` } type service struct { diff --git a/internal/grpc/services/publicshareprovider/publicshareprovider.go b/internal/grpc/services/publicshareprovider/publicshareprovider.go index 09313d2f14..ef2d3977ae 100644 --- a/internal/grpc/services/publicshareprovider/publicshareprovider.go +++ b/internal/grpc/services/publicshareprovider/publicshareprovider.go @@ -244,6 +244,12 @@ func (s *service) ListPublicShares(ctx context.Context, req *link.ListPublicShar log.Info().Str("publicshareprovider", "list").Msg("list public share") user, _ := ctxpkg.ContextGetUser(ctx) + if req.Opaque != nil { + if v, ok := req.Opaque.Map[ctxpkg.ResoucePathCtx]; ok { + ctx = ctxpkg.ContextSetResourcePath(ctx, string(v.Value)) + } + } + shares, err := s.sm.ListPublicShares(ctx, user, req.Filters, &provider.ResourceInfo{}, req.GetSign()) if err != nil { log.Err(err).Msg("error listing shares") diff --git a/internal/grpc/services/publicstorageprovider/publicstorageprovider.go b/internal/grpc/services/publicstorageprovider/publicstorageprovider.go index 48b7eba838..5b1c059824 100644 --- a/internal/grpc/services/publicstorageprovider/publicstorageprovider.go +++ b/internal/grpc/services/publicstorageprovider/publicstorageprovider.go @@ -34,6 +34,7 @@ import ( "github.com/cs3org/reva/pkg/rgrpc" "github.com/cs3org/reva/pkg/rgrpc/status" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/pkg/sharedconf" rtrace "github.com/cs3org/reva/pkg/trace" "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/pkg/utils/cfg" @@ -51,7 +52,15 @@ func init() { type config struct { MountPath string `mapstructure:"mount_path"` MountID string `mapstructure:"mount_id"` - GatewayAddr string `mapstructure:"gateway_addr"` + GatewayAddr string `mapstructure:"gatewaysvc"` +} + +func (c *config) ApplyDefaults() { + c.GatewayAddr = sharedconf.GetGatewaySVC(c.GatewayAddr) + + if c.MountPath == "" { + c.MountPath = "/public" + } } type service struct { diff --git a/internal/grpc/services/storageprovider/storageprovider.go b/internal/grpc/services/storageprovider/storageprovider.go index 7460331d7f..ac20f7fcf8 100644 --- a/internal/grpc/services/storageprovider/storageprovider.go +++ b/internal/grpc/services/storageprovider/storageprovider.go @@ -60,15 +60,15 @@ func init() { } type config struct { - MountPath string `mapstructure:"mount_path" docs:"/;The path where the file system would be mounted."` - MountID string `mapstructure:"mount_id" docs:"-;The ID of the mounted file system."` - Driver string `mapstructure:"driver" docs:"localhome;The storage driver to be used."` - Drivers map[string]map[string]interface{} `mapstructure:"drivers" docs:"url:pkg/storage/fs/localhome/localhome.go"` - TmpFolder string `mapstructure:"tmp_folder" docs:"/var/tmp;Path to temporary folder."` - DataServerURL string `mapstructure:"data_server_url" docs:"http://localhost/data;The URL for the data server."` - ExposeDataServer bool `mapstructure:"expose_data_server" docs:"false;Whether to expose data server."` // if true the client will be able to upload/download directly to it - AvailableXS map[string]uint32 `mapstructure:"available_checksums" docs:"nil;List of available checksums."` - CustomMimeTypesJSON string `mapstructure:"custom_mime_types_json" docs:"nil;An optional mapping file with the list of supported custom file extensions and corresponding mime types."` + MountPath string `docs:"/;The path where the file system would be mounted." mapstructure:"mount_path"` + MountID string `docs:"-;The ID of the mounted file system." mapstructure:"mount_id"` + Driver string `docs:"localhome;The storage driver to be used." mapstructure:"driver"` + Drivers map[string]map[string]interface{} `docs:"url:pkg/storage/fs/localhome/localhome.go" mapstructure:"drivers"` + TmpFolder string `docs:"/var/tmp;Path to temporary folder." mapstructure:"tmp_folder"` + DataServerURL string `docs:"http://localhost/data;The URL for the data server." mapstructure:"data_server_url"` + ExposeDataServer bool `docs:"false;Whether to expose data server." mapstructure:"expose_data_server"` // if true the client will be able to upload/download directly to it + AvailableXS map[string]uint32 `docs:"nil;List of available checksums." mapstructure:"available_checksums"` + CustomMimeTypesJSON string `docs:"nil;An optional mapping file with the list of supported custom file extensions and corresponding mime types." mapstructure:"custom_mime_types_json"` MinimunAllowedPathLevelForShare int `mapstructure:"minimum_allowed_path_level_for_share"` } @@ -813,7 +813,7 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide }, nil } - if err := s.wrap(ctx, md, utils.IsAbsoluteReference(req.Ref)); err != nil { + if err := s.wrap(ctx, md, true); err != nil { return &provider.StatResponse{ Status: status.NewInternal(ctx, err, "error wrapping path"), }, nil diff --git a/internal/grpc/services/usershareprovider/usershareprovider.go b/internal/grpc/services/usershareprovider/usershareprovider.go index 3de8bfa3c8..d60beece84 100644 --- a/internal/grpc/services/usershareprovider/usershareprovider.go +++ b/internal/grpc/services/usershareprovider/usershareprovider.go @@ -183,6 +183,12 @@ func (s *service) GetShare(ctx context.Context, req *collaboration.GetShareReque } func (s *service) ListShares(ctx context.Context, req *collaboration.ListSharesRequest) (*collaboration.ListSharesResponse, error) { + if req.Opaque != nil { + if v, ok := req.Opaque.Map[ctxpkg.ResoucePathCtx]; ok { + ctx = ctxpkg.ContextSetResourcePath(ctx, string(v.Value)) + } + } + shares, err := s.sm.ListShares(ctx, req.Filters) // TODO(labkode): add filter to share manager if err != nil { return &collaboration.ListSharesResponse{ diff --git a/internal/http/interceptors/auth/credential/strategy/publicshares/publicshares.go b/internal/http/interceptors/auth/credential/strategy/publicshares/publicshares.go index 3824410f27..731559cd9c 100644 --- a/internal/http/interceptors/auth/credential/strategy/publicshares/publicshares.go +++ b/internal/http/interceptors/auth/credential/strategy/publicshares/publicshares.go @@ -24,6 +24,8 @@ import ( "github.com/cs3org/reva/internal/http/interceptors/auth/credential/registry" "github.com/cs3org/reva/pkg/auth" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" ) func init() { @@ -35,11 +37,27 @@ const ( basicAuthPasswordPrefix = "password|" ) -type strategy struct{} +type config struct { + UseCookies bool `mapstructure:"use_cookies"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + var c config + err := mapstructure.Decode(m, &c) + return &c, err +} + +type strategy struct { + c *config +} // New returns a new auth strategy that handles public share verification. func New(m map[string]interface{}) (auth.CredentialStrategy, error) { - return &strategy{}, nil + c, err := parseConfig(m) + if err != nil { + return nil, errors.Wrap(err, "error parsing config") + } + return &strategy{c: c}, nil } func (s *strategy) GetCredentials(w http.ResponseWriter, r *http.Request) (*auth.Credentials, error) { @@ -51,8 +69,15 @@ func (s *strategy) GetCredentials(w http.ResponseWriter, r *http.Request) (*auth return nil, fmt.Errorf("no public token provided") } - // We can ignore the username since it is always set to "public" in public shares. sharePassword := basicAuthPasswordPrefix + if s.c.UseCookies { + if password, err := r.Cookie("password"); err == nil { + sharePassword += password.Value + return &auth.Credentials{Type: "publicshares", ClientID: token, ClientSecret: sharePassword}, nil + } + } + + // We can ignore the username since it is always set to "public" in public shares. _, password, ok := r.BasicAuth() if ok { sharePassword += password diff --git a/internal/http/interceptors/log/log.go b/internal/http/interceptors/log/log.go index e383a51ed2..432a642f9a 100644 --- a/internal/http/interceptors/log/log.go +++ b/internal/http/interceptors/log/log.go @@ -91,10 +91,13 @@ func writeLog(log *zerolog.Logger, req *http.Request, url url.URL, ts time.Time, var event *zerolog.Event switch { case status < 400: + //nolint:zerologlint event = log.Info() case status < 500: + //nolint:zerologlint event = log.Warn() default: + //nolint:zerologlint event = log.Error() } event.Str("host", host).Str("method", req.Method).Str("uri", uri).Int("status", status). diff --git a/internal/http/services/appprovider/appprovider.go b/internal/http/services/appprovider/appprovider.go index 14289f9006..1fa32a63a6 100644 --- a/internal/http/services/appprovider/appprovider.go +++ b/internal/http/services/appprovider/appprovider.go @@ -52,8 +52,8 @@ func init() { // Config holds the config options for the HTTP appprovider service. type Config struct { Prefix string `mapstructure:"prefix"` - GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` } func (c *Config) ApplyDefaults() { diff --git a/internal/http/services/archiver/handler.go b/internal/http/services/archiver/handler.go index 2f43e2b1e1..79d62bafe5 100644 --- a/internal/http/services/archiver/handler.go +++ b/internal/http/services/archiver/handler.go @@ -58,12 +58,12 @@ type svc struct { // Config holds the config options that need to be passed down to all ocdav handlers. type Config struct { Prefix string `mapstructure:"prefix"` - GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` + GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` Timeout int64 `mapstructure:"timeout"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` Name string `mapstructure:"name"` - MaxNumFiles int64 `mapstructure:"max_num_files" validate:"required,gt=0"` - MaxSize int64 `mapstructure:"max_size" validate:"required,gt=0"` + MaxNumFiles int64 `mapstructure:"max_num_files" validate:"required,gt=0"` + MaxSize int64 `mapstructure:"max_size" validate:"required,gt=0"` AllowedFolders []string `mapstructure:"allowed_folders"` } diff --git a/internal/http/services/datagateway/datagateway.go b/internal/http/services/datagateway/datagateway.go index e1b80d8b21..a8e4a8065f 100644 --- a/internal/http/services/datagateway/datagateway.go +++ b/internal/http/services/datagateway/datagateway.go @@ -56,9 +56,9 @@ type transferClaims struct { } type config struct { Prefix string `mapstructure:"prefix"` - TransferSharedSecret string `mapstructure:"transfer_shared_secret" validate:"required"` + TransferSharedSecret string `mapstructure:"transfer_shared_secret" validate:"required"` Timeout int64 `mapstructure:"timeout"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` } func (c *config) ApplyDefaults() { diff --git a/internal/http/services/dataprovider/dataprovider.go b/internal/http/services/dataprovider/dataprovider.go index 997abc0c9d..bb4718ffcf 100644 --- a/internal/http/services/dataprovider/dataprovider.go +++ b/internal/http/services/dataprovider/dataprovider.go @@ -37,12 +37,12 @@ func init() { } type config struct { - Prefix string `mapstructure:"prefix" docs:"data;The prefix to be used for this HTTP service"` - Driver string `mapstructure:"driver" docs:"localhome;The storage driver to be used."` - Drivers map[string]map[string]interface{} `mapstructure:"drivers" docs:"url:pkg/storage/fs/localhome/localhome.go;The configuration for the storage driver"` - DataTXs map[string]map[string]interface{} `mapstructure:"data_txs" docs:"url:pkg/rhttp/datatx/manager/simple/simple.go;The configuration for the data tx protocols"` + Prefix string `docs:"data;The prefix to be used for this HTTP service" mapstructure:"prefix"` + Driver string `docs:"localhome;The storage driver to be used." mapstructure:"driver"` + Drivers map[string]map[string]interface{} `docs:"url:pkg/storage/fs/localhome/localhome.go;The configuration for the storage driver" mapstructure:"drivers"` + DataTXs map[string]map[string]interface{} `docs:"url:pkg/rhttp/datatx/manager/simple/simple.go;The configuration for the data tx protocols" mapstructure:"data_txs"` Timeout int64 `mapstructure:"timeout"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` } func (c *config) ApplyDefaults() { diff --git a/internal/http/services/ocmd/ocm.go b/internal/http/services/ocmd/ocm.go index 050f9d83b4..13c901539a 100644 --- a/internal/http/services/ocmd/ocm.go +++ b/internal/http/services/ocmd/ocm.go @@ -35,7 +35,7 @@ func init() { type config struct { Prefix string `mapstructure:"prefix"` - GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` + GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` ExposeRecipientDisplayName bool `mapstructure:"expose_recipient_display_name"` } diff --git a/internal/http/services/ocmd/protocols.go b/internal/http/services/ocmd/protocols.go index aff9f4c3cf..bd14508147 100644 --- a/internal/http/services/ocmd/protocols.go +++ b/internal/http/services/ocmd/protocols.go @@ -46,8 +46,8 @@ type Protocol interface { // WebDAV contains the parameters for the WebDAV protocol. type WebDAV struct { SharedSecret string `json:"sharedSecret" validate:"required"` - Permissions []string `json:"permissions" validate:"required,dive,required,oneof=read write share"` - URL string `json:"url" validate:"required"` + Permissions []string `json:"permissions" validate:"required,dive,required,oneof=read write share"` + URL string `json:"url" validate:"required"` } // ToOCMProtocol convert the protocol to a ocm Protocol struct. @@ -75,7 +75,7 @@ func (w *WebDAV) ToOCMProtocol() *ocm.Protocol { // Webapp contains the parameters for the Webapp protocol. type Webapp struct { URITemplate string `json:"uriTemplate" validate:"required"` - ViewMode string `json:"viewMode" validate:"required,dive,required,oneof=view read write"` + ViewMode string `json:"viewMode" validate:"required,dive,required,oneof=view read write"` } // ToOCMProtocol convert the protocol to a ocm Protocol struct. @@ -86,8 +86,8 @@ func (w *Webapp) ToOCMProtocol() *ocm.Protocol { // Datatx contains the parameters for the Datatx protocol. type Datatx struct { SharedSecret string `json:"sharedSecret" validate:"required"` - SourceURI string `json:"srcUri" validate:"required"` - Size uint64 `json:"size" validate:"required"` + SourceURI string `json:"srcUri" validate:"required"` + Size uint64 `json:"size" validate:"required"` } // ToOCMProtocol convert the protocol to a ocm Protocol struct. diff --git a/internal/http/services/ocmd/shares.go b/internal/http/services/ocmd/shares.go index 8905748ef5..ea001b3af4 100644 --- a/internal/http/services/ocmd/shares.go +++ b/internal/http/services/ocmd/shares.go @@ -67,18 +67,18 @@ func (h *sharesHandler) init(c *config) error { } type createShareRequest struct { - ShareWith string `json:"shareWith" validate:"required"` // identifier of the recipient of the share - Name string `json:"name" validate:"required"` // name of the resource - Description string `json:"description"` // (optional) description of the resource - ProviderID string `json:"providerId" validate:"required"` // unique identifier of the resource at provider side - Owner string `json:"owner" validate:"required"` // unique identifier of the owner at provider side - Sender string `json:"sender" validate:"required"` // unique indentifier of the user who wants to share the resource at provider side - OwnerDisplayName string `json:"ownerDisplayName"` // display name of the owner of the resource - SenderDisplayName string `json:"senderDisplayName"` // dispay name of the user who wants to share the resource - ShareType string `json:"shareType" validate:"required,oneof=user group"` // recipient share type (user or group) - ResourceType string `json:"resourceType" validate:"required,oneof=file folder"` + ShareWith string `json:"shareWith" validate:"required"` // identifier of the recipient of the share + Name string `json:"name" validate:"required"` // name of the resource + Description string `json:"description"` // (optional) description of the resource + ProviderID string `json:"providerId" validate:"required"` // unique identifier of the resource at provider side + Owner string `json:"owner" validate:"required"` // unique identifier of the owner at provider side + Sender string `json:"sender" validate:"required"` // unique indentifier of the user who wants to share the resource at provider side + OwnerDisplayName string `json:"ownerDisplayName"` // display name of the owner of the resource + SenderDisplayName string `json:"senderDisplayName"` // dispay name of the user who wants to share the resource + ShareType string `json:"shareType" validate:"required,oneof=user group"` // recipient share type (user or group) + ResourceType string `json:"resourceType" validate:"required,oneof=file folder"` Expiration uint64 `json:"expiration"` - Protocols Protocols `json:"protocol" validate:"required"` + Protocols Protocols `json:"protocol" validate:"required"` } // CreateShare sends all the informations to the consumer needed to start diff --git a/internal/http/services/ocmprovider/ocmprovider.go b/internal/http/services/ocmprovider/ocmprovider.go index d356b236bc..45192a61f8 100644 --- a/internal/http/services/ocmprovider/ocmprovider.go +++ b/internal/http/services/ocmprovider/ocmprovider.go @@ -37,22 +37,22 @@ func init() { } type config struct { - OCMPrefix string `mapstructure:"ocm_prefix" docs:"ocm;The prefix URL where the OCM API is served."` - Endpoint string `mapstructure:"endpoint" docs:"This host's full URL. If it's not configured, it is assumed OCM is not available."` - Provider string `mapstructure:"provider" docs:"reva;A friendly name that defines this service."` - WebdavRoot string `mapstructure:"webdav_root" docs:"/remote.php/dav/ocm;The root URL of the WebDAV endpoint to serve OCM shares."` - WebappRoot string `mapstructure:"webapp_root" docs:"/external/sciencemesh;The root URL to serve Web apps via OCM."` - EnableWebapp bool `mapstructure:"enable_webapp" docs:"false;Whether web apps are enabled in OCM shares."` - EnableDatatx bool `mapstructure:"enable_datatx" docs:"false;Whether data transfers are enabled in OCM shares."` + OCMPrefix string `docs:"ocm;The prefix URL where the OCM API is served." mapstructure:"ocm_prefix"` + Endpoint string `docs:"This host's full URL. If it's not configured, it is assumed OCM is not available." mapstructure:"endpoint"` + Provider string `docs:"reva;A friendly name that defines this service." mapstructure:"provider"` + WebdavRoot string `docs:"/remote.php/dav/ocm;The root URL of the WebDAV endpoint to serve OCM shares." mapstructure:"webdav_root"` + WebappRoot string `docs:"/external/sciencemesh;The root URL to serve Web apps via OCM." mapstructure:"webapp_root"` + EnableWebapp bool `docs:"false;Whether web apps are enabled in OCM shares." mapstructure:"enable_webapp"` + EnableDatatx bool `docs:"false;Whether data transfers are enabled in OCM shares." mapstructure:"enable_datatx"` } type DiscoveryData struct { - Enabled bool `json:"enabled" xml:"enabled"` - APIVersion string `json:"apiVersion" xml:"apiVersion"` - Endpoint string `json:"endPoint" xml:"endPoint"` - Provider string `json:"provider" xml:"provider"` + Enabled bool `json:"enabled" xml:"enabled"` + APIVersion string `json:"apiVersion" xml:"apiVersion"` + Endpoint string `json:"endPoint" xml:"endPoint"` + Provider string `json:"provider" xml:"provider"` ResourceTypes []resourceTypes `json:"resourceTypes" xml:"resourceTypes"` - Capabilities []string `json:"capabilities" xml:"capabilities"` + Capabilities []string `json:"capabilities" xml:"capabilities"` } type resourceTypes struct { diff --git a/internal/http/services/overleaf/overleaf.go b/internal/http/services/overleaf/overleaf.go index ab0bf42312..d3ea3f419f 100644 --- a/internal/http/services/overleaf/overleaf.go +++ b/internal/http/services/overleaf/overleaf.go @@ -55,11 +55,11 @@ type svc struct { type config struct { Prefix string `mapstructure:"prefix"` - GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` - AppName string `mapstructure:"app_name" docs:";The App user-friendly name." validate:"required"` - ArchiverURL string `mapstructure:"archiver_url" docs:";Internet-facing URL of the archiver service, used to serve the files to Overleaf." validate:"required"` - appURL string `mapstructure:"app_url" docs:";The App URL." validate:"required"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + GatewaySvc string `mapstructure:"gatewaysvc" validate:"required"` + AppName string `docs:";The App user-friendly name." mapstructure:"app_name" validate:"required"` + ArchiverURL string `docs:";Internet-facing URL of the archiver service, used to serve the files to Overleaf." mapstructure:"archiver_url" validate:"required"` + appURL string `docs:";The App URL." mapstructure:"app_url" validate:"required"` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` JWTSecret string `mapstructure:"jwt_secret"` } diff --git a/internal/http/services/owncloud/ocdav/download.go b/internal/http/services/owncloud/ocdav/download.go new file mode 100644 index 0000000000..2607c8148f --- /dev/null +++ b/internal/http/services/owncloud/ocdav/download.go @@ -0,0 +1,245 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "io" + "net/http" + "path" + "strings" + + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/internal/http/services/archiver/manager" + "github.com/cs3org/reva/pkg/appctx" + ctxpkg "github.com/cs3org/reva/pkg/ctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/rhttp" + "github.com/cs3org/reva/pkg/storage/utils/downloader" + "github.com/cs3org/reva/pkg/storage/utils/walker" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "google.golang.org/grpc/metadata" +) + +func (s *svc) handleLegacyPublicLinkDownload(w http.ResponseWriter, r *http.Request) { + token := strings.TrimPrefix(r.URL.Path, "/") + files := getFilesFromRequest(r) + s.downloadFiles(r.Context(), w, token, files) +} + +func getFilesFromRequest(r *http.Request) []string { + q := r.URL.Query() + dir := q.Get("path") + files := []string{} + + if q.Get("files") != "" { + files = append(files, path.Join(dir, q.Get("files"))) + } else { + for _, f := range q["files[]"] { + files = append(files, path.Join(dir, f)) + } + } + return files +} + +func (s *svc) authenticate(ctx context.Context, token string) (context.Context, error) { + // TODO (gdelmont): support password protected public links + c, err := s.getClient() + if err != nil { + return nil, err + } + res, err := c.Authenticate(ctx, &gateway.AuthenticateRequest{ + Type: "publicshares", + ClientId: token, + ClientSecret: "password|", + }) + if err != nil { + return nil, err + } + + if res.Status.Code != rpc.Code_CODE_OK { + if res.Status.Code == rpc.Code_CODE_NOT_FOUND { + return nil, errtypes.NotFound(token) + } + return nil, errors.New(res.Status.Message) + } + + ctx = metadata.AppendToOutgoingContext(ctx, ctxpkg.TokenHeader, res.Token) + ctx = ctxpkg.ContextSetToken(ctx, res.Token) + + return ctx, nil +} + +func (s *svc) handleHTTPError(w http.ResponseWriter, err error, log *zerolog.Logger) { + log.Error().Err(err).Msg("ocdav: got error") + switch err.(type) { + case errtypes.NotFound: + http.Error(w, "Resource not found", http.StatusNotFound) + case errtypes.PermissionDenied: + http.Error(w, "Permission denied", http.StatusForbidden) + case manager.ErrMaxSize, manager.ErrMaxFileCount: + http.Error(w, err.Error(), http.StatusRequestEntityTooLarge) + default: + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (s *svc) downloadFiles(ctx context.Context, w http.ResponseWriter, token string, files []string) { + log := appctx.GetLogger(ctx) + ctx, err := s.authenticate(ctx, token) + if err != nil { + s.handleHTTPError(w, err, log) + return + } + isSingleFileShare, res, err := s.isSingleFileShare(ctx, token, files) + if err != nil { + s.handleHTTPError(w, err, log) + return + } + if isSingleFileShare { + s.downloadFile(ctx, w, res) + } else { + s.downloadArchive(ctx, w, token, files) + } +} + +func (s *svc) isSingleFileShare(ctx context.Context, token string, files []string) (bool, *provider.ResourceInfo, error) { + switch len(files) { + case 0: + return s.resourceIsFileInPublicLink(ctx, token, "") + case 1: + return s.resourceIsFileInPublicLink(ctx, token, files[0]) + default: + // FIXME (gdelmont): even if the list contains more than one file + // these (or part of them), could not exist + // in this case, filtering the existing ones, we could + // end up having 0 or 1 files + return false, nil, nil + } +} + +func (s *svc) resourceIsFileInPublicLink(ctx context.Context, token, file string) (bool, *provider.ResourceInfo, error) { + res, err := s.getResourceFromPublicLinkToken(ctx, token, file) + if err != nil { + return false, nil, err + } + return res.Type == provider.ResourceType_RESOURCE_TYPE_FILE, res, nil +} + +func (s *svc) getResourceFromPublicLinkToken(ctx context.Context, token, file string) (*provider.ResourceInfo, error) { + c, err := s.getClient() + if err != nil { + return nil, err + } + res, err := c.GetPublicShareByToken(ctx, &link.GetPublicShareByTokenRequest{ + Token: token, + }) + if err != nil { + return nil, err + } + + if res.Status.Code != rpc.Code_CODE_OK { + if res.Status.Code == rpc.Code_CODE_NOT_FOUND { + return nil, errtypes.NotFound(token) + } + return nil, errtypes.InternalError(res.Status.Message) + } + + statRes, err := c.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: res.Share.ResourceId, Path: file}}) + if err != nil { + return nil, err + } + + if statRes.Status.Code != rpc.Code_CODE_OK { + if statRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + return nil, errtypes.NotFound(token) + } else if statRes.Status.Code == rpc.Code_CODE_PERMISSION_DENIED { + return nil, errtypes.PermissionDenied(file) + } + return nil, errtypes.InternalError(statRes.Status.Message) + } + return statRes.Info, nil +} + +func (s *svc) downloadFile(ctx context.Context, w http.ResponseWriter, res *provider.ResourceInfo) { + log := appctx.GetLogger(ctx) + c, err := s.getClient() + if err != nil { + s.handleHTTPError(w, err, log) + return + } + d := downloader.NewDownloader(c) + r, err := d.Download(ctx, res.Path, "") + if err != nil { + s.handleHTTPError(w, err, log) + return + } + defer r.Close() + + w.WriteHeader(http.StatusOK) + + _, err = io.Copy(w, r) + if err != nil { + s.handleHTTPError(w, err, log) + return + } +} + +func getPublicLinkResources(rootFolder, token string, files []string) []string { + r := make([]string, 0, len(files)) + for _, f := range files { + r = append(r, path.Join(rootFolder, token, f)) + } + if len(r) == 0 { + r = []string{path.Join(rootFolder, token)} + } + return r +} + +func (s *svc) downloadArchive(ctx context.Context, w http.ResponseWriter, token string, files []string) { + log := appctx.GetLogger(ctx) + resources := getPublicLinkResources(s.c.PublicLinkDownload.PublicFolder, token, files) + + gtw, err := s.getClient() + if err != nil { + s.handleHTTPError(w, err, log) + return + } + + downloader := downloader.NewDownloader(gtw, rhttp.Context(ctx)) + walker := walker.NewWalker(gtw) + + archiver, err := manager.NewArchiver(resources, walker, downloader, manager.Config{ + MaxNumFiles: s.c.PublicLinkDownload.MaxNumFiles, + MaxSize: s.c.PublicLinkDownload.MaxSize, + }) + if err != nil { + s.handleHTTPError(w, err, log) + return + } + + if err := archiver.CreateTar(ctx, w); err != nil { + s.handleHTTPError(w, err, log) + return + } +} diff --git a/internal/http/services/owncloud/ocdav/ocdav.go b/internal/http/services/owncloud/ocdav/ocdav.go index 0e93d56737..38dd3d8354 100644 --- a/internal/http/services/owncloud/ocdav/ocdav.go +++ b/internal/http/services/owncloud/ocdav/ocdav.go @@ -84,6 +84,12 @@ func init() { global.Register("ocdav", New) } +type ConfigPublicLinkDownload struct { + MaxNumFiles int64 `mapstructure:"max_num_files"` + MaxSize int64 `mapstructure:"max_size"` + PublicFolder string `mapstructure:"public_folder"` +} + // Config holds the config options that need to be passed down to all ocdav handlers. type Config struct { Prefix string `mapstructure:"prefix"` @@ -100,7 +106,7 @@ type Config struct { OCMNamespace string `mapstructure:"ocm_namespace"` GatewaySvc string `mapstructure:"gatewaysvc"` Timeout int64 `mapstructure:"timeout"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` // If true, HTTP COPY will expect the HTTP-TPC (third-party copy) headers EnableHTTPTpc bool `mapstructure:"enable_http_tpc"` // The authentication scheme to use for the tpc push call when userinfo part is specified in the Destination header uri. Default value is 'bearer'. @@ -111,7 +117,8 @@ type Config struct { PublicURL string `mapstructure:"public_url"` FavoriteStorageDriver string `mapstructure:"favorite_storage_driver"` FavoriteStorageDrivers map[string]map[string]interface{} `mapstructure:"favorite_storage_drivers"` - Notifications map[string]interface{} `mapstructure:"notifications" docs:"Settingsg for the Notification Helper"` + PublicLinkDownload *ConfigPublicLinkDownload `mapstructure:"publiclink_download"` + Notifications map[string]interface{} `docs:"Settingsg for the Notification Helper" mapstructure:"notifications"` } func (c *Config) ApplyDefaults() { @@ -188,7 +195,7 @@ func (s *svc) Close() error { } func (s *svc) Unprotected() []string { - return []string{"/status.php", "/remote.php/dav/public-files/", "/apps/files/", "/index.php/f/", "/index.php/s/", "/remote.php/dav/ocm/"} + return []string{"/status.php", "/remote.php/dav/public-files/", "/apps/files/", "/index.php/f/", "/index.php/s/", "/s/", "/remote.php/dav/ocm/"} } func (s *svc) Handler() http.Handler { @@ -213,6 +220,14 @@ func (s *svc) Handler() http.Handler { head, r.URL.Path = router.ShiftPath(r.URL.Path) log.Debug().Str("head", head).Str("tail", r.URL.Path).Msg("http routing") switch head { + case "s": + if strings.HasSuffix(r.URL.Path, "/download") { + r.URL.Path = strings.TrimSuffix(r.URL.Path, "/download") + s.handleLegacyPublicLinkDownload(w, r) + return + } + http.Error(w, "Not Yet Implemented", http.StatusNotImplemented) + return case "status.php": s.doStatus(w, r) return @@ -233,7 +248,7 @@ func (s *svc) Handler() http.Handler { if head == "s" { token := r.URL.Path rURL := s.c.PublicURL + path.Join(head, token) - + r.URL.Path = "/" // reset old path for redirection http.Redirect(w, r, rURL, http.StatusMovedPermanently) return } @@ -334,7 +349,7 @@ func extractDestination(r *http.Request) (string, error) { func replaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string { result := "" lastIndex := 0 - for _, v := range re.FindAllSubmatchIndex([]byte(str), -1) { + for _, v := range re.FindAllStringSubmatchIndex(str, -1) { groups := []string{} for i := 0; i < len(v); i += 2 { groups = append(groups, str[v[i]:v[i+1]]) diff --git a/internal/http/services/owncloud/ocdav/propfind.go b/internal/http/services/owncloud/ocdav/propfind.go index fda9e06e17..1166cba30b 100644 --- a/internal/http/services/owncloud/ocdav/propfind.go +++ b/internal/http/services/owncloud/ocdav/propfind.go @@ -38,6 +38,7 @@ import ( collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/internal/grpc/services/storageprovider" "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" "github.com/cs3org/reva/pkg/appctx" @@ -162,7 +163,14 @@ func (s *svc) propfindResponse(ctx context.Context, w http.ResponseWriter, r *ht } var linkshares map[string]struct{} - listResp, err := client.ListPublicShares(ctx, &link.ListPublicSharesRequest{Filters: linkFilters}) + listResp, err := client.ListPublicShares(ctx, &link.ListPublicSharesRequest{ + Opaque: &types.Opaque{ + Map: map[string]*types.OpaqueEntry{ + ctxpkg.ResoucePathCtx: {Decoder: "plain", Value: []byte(parentInfo.Path)}, + }, + }, + Filters: linkFilters, + }) if err == nil { linkshares = make(map[string]struct{}, len(listResp.Share)) for i := range listResp.Share { @@ -174,7 +182,14 @@ func (s *svc) propfindResponse(ctx context.Context, w http.ResponseWriter, r *ht } var usershares map[string]struct{} - listSharesResp, err := client.ListShares(ctx, &collaboration.ListSharesRequest{Filters: shareFilters}) + listSharesResp, err := client.ListShares(ctx, &collaboration.ListSharesRequest{ + Filters: shareFilters, + Opaque: &types.Opaque{ + Map: map[string]*types.OpaqueEntry{ + ctxpkg.ResoucePathCtx: {Decoder: "plain", Value: []byte(parentInfo.Path)}, + }, + }, + }) if err == nil { usershares = make(map[string]struct{}, len(listSharesResp.Shares)) for i := range listSharesResp.Shares { @@ -641,7 +656,7 @@ func (s *svc) mdToPropResponse(ctx context.Context, pf *propfindXML, md *provide } else { checksums.WriteString(" MD5:") } - checksums.WriteString(string(e.Value)) + checksums.Write(e.Value) } if e, ok := md.Opaque.Map["adler32"]; ok { if checksums.Len() == 0 { @@ -649,7 +664,7 @@ func (s *svc) mdToPropResponse(ctx context.Context, pf *propfindXML, md *provide } else { checksums.WriteString(" ADLER32:") } - checksums.WriteString(string(e.Value)) + checksums.Write(e.Value) } } if checksums.Len() > 0 { @@ -808,7 +823,7 @@ func (s *svc) mdToPropResponse(ctx context.Context, pf *propfindXML, md *provide } else { checksums.WriteString(" MD5:") } - checksums.WriteString(string(e.Value)) + checksums.Write(e.Value) } if e, ok := md.Opaque.Map["adler32"]; ok { if checksums.Len() == 0 { @@ -816,7 +831,7 @@ func (s *svc) mdToPropResponse(ctx context.Context, pf *propfindXML, md *provide } else { checksums.WriteString(" ADLER32:") } - checksums.WriteString(string(e.Value)) + checksums.Write(e.Value) } } if checksums.Len() > 13 { diff --git a/internal/http/services/owncloud/ocdav/put.go b/internal/http/services/owncloud/ocdav/put.go index c2ee4fffc0..bc7a314292 100644 --- a/internal/http/services/owncloud/ocdav/put.go +++ b/internal/http/services/owncloud/ocdav/put.go @@ -34,14 +34,17 @@ import ( typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/internal/http/services/datagateway" "github.com/cs3org/reva/pkg/appctx" + ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/notification/trigger" "github.com/cs3org/reva/pkg/rhttp" "github.com/cs3org/reva/pkg/storage/utils/chunking" rtrace "github.com/cs3org/reva/pkg/trace" + "github.com/cs3org/reva/pkg/user" "github.com/cs3org/reva/pkg/utils" "github.com/cs3org/reva/pkg/utils/resourceid" "github.com/rs/zerolog" + "go.step.sm/crypto/randutil" ) func sufferMacOSFinder(r *http.Request) bool { @@ -223,6 +226,18 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ Opaque: &typespb.Opaque{Map: opaqueMap}, } + if userInCtxHasUploaderRole(ctx) { + ref.Path, err = randomizePath(ref.Path) + if err != nil { + log.Debug().Err(err).Msg("error randomizing path") + w.WriteHeader(http.StatusInternalServerError) + return + } + uReq.Options = &provider.InitiateFileUploadRequest_IfNotExist{ + IfNotExist: true, + } + } + // where to upload the file? uRes, err := client.InitiateFileUpload(ctx, uReq) if err != nil { @@ -283,6 +298,14 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ HandleWebdavError(&log, w, b, err) return } + if httpRes.StatusCode == http.StatusConflict { + w.WriteHeader(http.StatusConflict) + b, err := Marshal(exception{ + message: "The file cannot be uploaded. Try again.", + }) + HandleWebdavError(&log, w, b, err) + return + } log.Error().Err(err).Msg("PUT request to data server failed") w.WriteHeader(httpRes.StatusCode) return @@ -382,6 +405,36 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ w.WriteHeader(http.StatusNoContent) } +func userInCtxHasUploaderRole(ctx context.Context) bool { + u, ok := ctxpkg.ContextGetUser(ctx) + if !ok { + return false + } + return user.HasUploaderRole(u) +} + +func randomizePath(p string) (string, error) { + rand, err := randutil.String(5, "abcdefghijklmnopqrstuvwxyz") + if err != nil { + return "", err + } + base, ext := split(p) + new := base + "_" + rand + if ext != "" { + new += ext + } + return new, nil +} + +func split(p string) (string, string) { + e := path.Ext(p) + if e == "" { + return p, "" + } + i := strings.Index(p, e) + return p[:i], e +} + func (s *svc) handleSpacesPut(w http.ResponseWriter, r *http.Request, spaceID string) { ctx, span := rtrace.Provider.Tracer("ocdav").Start(r.Context(), "spaces_put") defer span.End() diff --git a/internal/http/services/owncloud/ocs/conversions/main.go b/internal/http/services/owncloud/ocs/conversions/main.go index 1a12638367..4119db2c2a 100644 --- a/internal/http/services/owncloud/ocs/conversions/main.go +++ b/internal/http/services/owncloud/ocs/conversions/main.go @@ -116,9 +116,9 @@ type ShareData struct { // The type of the object being shared. This can be one of 'file' or 'folder'. ItemType string `json:"item_type" xml:"item_type"` // The RFC2045-compliant mimetype of the file. - MimeType string `json:"mimetype" xml:"mimetype"` + MimeType string `json:"mimetype" xml:"mimetype"` StorageID string `json:"storage_id" xml:"storage_id"` - Storage uint64 `json:"storage" xml:"storage"` + Storage uint64 `json:"storage" xml:"storage"` // The unique node id of the item being shared. ItemSource string `json:"item_source" xml:"item_source"` // The unique node id of the item being shared. For legacy reasons item_source and file_source attributes have the same value. @@ -157,16 +157,16 @@ type ShareData struct { // ShareeData holds share recipient search results. type ShareeData struct { - Exact *ExactMatchesData `json:"exact" xml:"exact"` - Users []*MatchData `json:"users" xml:"users>element"` - Groups []*MatchData `json:"groups" xml:"groups>element"` + Exact *ExactMatchesData `json:"exact" xml:"exact"` + Users []*MatchData `json:"users" xml:"users>element"` + Groups []*MatchData `json:"groups" xml:"groups>element"` Remotes []*MatchData `json:"remotes" xml:"remotes>element"` } // ExactMatchesData hold exact matches. type ExactMatchesData struct { - Users []*MatchData `json:"users" xml:"users>element"` - Groups []*MatchData `json:"groups" xml:"groups>element"` + Users []*MatchData `json:"users" xml:"users>element"` + Groups []*MatchData `json:"groups" xml:"groups>element"` Remotes []*MatchData `json:"remotes" xml:"remotes>element"` } @@ -178,9 +178,9 @@ type MatchData struct { // MatchValueData holds the type and actual value. type MatchValueData struct { - ShareType int `json:"shareType" xml:"shareType"` - ShareWith string `json:"shareWith" xml:"shareWith"` - ShareWithProvider string `json:"shareWithProvider" xml:"shareWithProvider"` + ShareType int `json:"shareType" xml:"shareType"` + ShareWith string `json:"shareWith" xml:"shareWith"` + ShareWithProvider string `json:"shareWithProvider" xml:"shareWithProvider"` ShareWithAdditionalInfo string `json:"shareWithAdditionalInfo" xml:"shareWithAdditionalInfo"` } diff --git a/internal/http/services/owncloud/ocs/conversions/permissions.go b/internal/http/services/owncloud/ocs/conversions/permissions.go index b3aff4d850..262af83c7f 100644 --- a/internal/http/services/owncloud/ocs/conversions/permissions.go +++ b/internal/http/services/owncloud/ocs/conversions/permissions.go @@ -38,8 +38,17 @@ const ( PermissionDelete // PermissionShare grants share permissions on a resource. PermissionShare + // PermissionDeny grants permissions to deny access on a resource + // The recipient of the resource will then have PermissionNone. + PermissionDeny + // PermissionNone grants no permissions on a resource. + PermissionNone + // PermissionMax is to be used within value range checks. + PermissionMax Permissions = (1 << (iota - 1)) - 1 // PermissionAll grants all permissions on a resource. - PermissionAll Permissions = (1 << (iota - 1)) - 1 + PermissionAll = PermissionMax - PermissionNone + // PermissionMin is to be used within value range checks. + PermissionMin = PermissionRead ) var ( @@ -51,8 +60,8 @@ var ( // The value must be in the valid range. func NewPermissions(val int) (Permissions, error) { if val == int(PermissionInvalid) { - return PermissionInvalid, fmt.Errorf("permissions %d out of range %d - %d", val, PermissionRead, PermissionAll) - } else if val < int(PermissionInvalid) || int(PermissionAll) < val { + return PermissionInvalid, fmt.Errorf("permissions %d out of range %d - %d", val, PermissionMin, PermissionMax) + } else if val < int(PermissionInvalid) || int(PermissionMax) < val { return PermissionInvalid, ErrPermissionNotInRange } return Permissions(val), nil diff --git a/internal/http/services/owncloud/ocs/conversions/permissions_test.go b/internal/http/services/owncloud/ocs/conversions/permissions_test.go index 1586ba2448..cdc88fdcad 100644 --- a/internal/http/services/owncloud/ocs/conversions/permissions_test.go +++ b/internal/http/services/owncloud/ocs/conversions/permissions_test.go @@ -23,7 +23,7 @@ import ( ) func TestNewPermissions(t *testing.T) { - for val := int(PermissionRead); val <= int(PermissionAll); val++ { + for val := int(PermissionMin); val <= int(PermissionMax); val++ { _, err := NewPermissions(val) if err != nil { t.Errorf("value %d should be a valid permissions", val) @@ -35,7 +35,7 @@ func TestNewPermissionsWithInvalidValueShouldFail(t *testing.T) { vals := []int{ int(PermissionInvalid), -1, - int(PermissionAll) + 1, + int(PermissionMax) + 1, } for _, v := range vals { _, err := NewPermissions(v) @@ -52,10 +52,10 @@ func TestContainPermissionAll(t *testing.T) { 4: PermissionCreate, 8: PermissionDelete, 16: PermissionShare, - 31: PermissionAll, + 63: PermissionAll, } - p, _ := NewPermissions(31) // all permissions should contain all other permissions + p, _ := NewPermissions(63) // all permissions should contain all other permissions for _, value := range table { if !p.Contain(value) { t.Errorf("permissions %d should contain %d", p, value) @@ -68,7 +68,7 @@ func TestContainPermissionRead(t *testing.T) { 4: PermissionCreate, 8: PermissionDelete, 16: PermissionShare, - 31: PermissionAll, + 63: PermissionAll, } p, _ := NewPermissions(1) // read permission should not contain any other permissions @@ -145,10 +145,11 @@ func TestPermissions2Role(t *testing.T) { table := map[Permissions]string{ PermissionRead: RoleViewer, PermissionRead | PermissionWrite | PermissionCreate | PermissionDelete: RoleEditor, - PermissionAll: RoleCoowner, + PermissionAll: RoleCollaborator, PermissionWrite: RoleLegacy, PermissionShare: RoleLegacy, PermissionWrite | PermissionShare: RoleLegacy, + PermissionNone: RoleDenied, } for permissions, role := range table { diff --git a/internal/http/services/owncloud/ocs/conversions/role.go b/internal/http/services/owncloud/ocs/conversions/role.go index 62c912d602..2d0587b395 100644 --- a/internal/http/services/owncloud/ocs/conversions/role.go +++ b/internal/http/services/owncloud/ocs/conversions/role.go @@ -24,6 +24,7 @@ import ( "strings" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage/utils/grants" ) // Role is a set of ocs permissions and cs3 resource permissions under a common name. @@ -36,21 +37,24 @@ type Role struct { const ( // RoleViewer grants non-editor role on a resource. RoleViewer = "viewer" + // RoleReader grants non-editor role on a resource. + RoleReader = "reader" // RoleEditor grants editor permission on a resource, including folders. RoleEditor = "editor" // RoleFileEditor grants editor permission on a single file. RoleFileEditor = "file-editor" - // RoleCoowner grants co-owner permissions on a resource. - RoleCoowner = "coowner" + // RoleCollaborator grants editor+resharing permissions on a resource. + RoleCollaborator = "coowner" // RoleUploader grants uploader permission to upload onto a resource. RoleUploader = "uploader" // RoleManager grants manager permissions on a resource. Semantically equivalent to co-owner. RoleManager = "manager" - // RoleUnknown is used for unknown roles. RoleUnknown = "unknown" // RoleLegacy provides backwards compatibility. RoleLegacy = "legacy" + // RoleDenied grants no permission at all on a resource. + RoleDenied = "denied" ) // CS3ResourcePermissions for the role. @@ -91,7 +95,8 @@ func (r *Role) OCSPermissions() Permissions { // CK = create (folders only) // S = Shared // R = Shareable -// M = Mounted. +// M = Mounted +// Z = Deniable (NEW). func (r *Role) WebDAVPermissions(isDir, isShared, isMountpoint, isPublic bool) string { var b strings.Builder if !isPublic && isShared { @@ -115,20 +120,29 @@ func (r *Role) WebDAVPermissions(isDir, isShared, isMountpoint, isPublic bool) s if isDir && r.ocsPermissions.Contain(PermissionCreate) { fmt.Fprintf(&b, "CK") } + + if r.ocsPermissions.Contain(PermissionDeny) && !isPublic { + fmt.Fprintf(&b, "Z") + } + return b.String() } // RoleFromName creates a role from the name. func RoleFromName(name string) *Role { switch name { + case RoleDenied: + return NewDeniedRole() case RoleViewer: return NewViewerRole() + case RoleReader: + return NewReaderRole() case RoleEditor: return NewEditorRole() case RoleFileEditor: return NewFileEditorRole() - case RoleCoowner: - return NewCoownerRole() + case RoleCollaborator: + return NewCollaboratorRole() case RoleUploader: return NewUploaderRole() case RoleManager: @@ -147,6 +161,15 @@ func NewUnknownRole() *Role { } } +// NewDeniedRole creates a fully denied role. +func NewDeniedRole() *Role { + return &Role{ + Name: RoleDenied, + cS3ResourcePermissions: &provider.ResourcePermissions{}, + ocsPermissions: PermissionNone, + } +} + // NewViewerRole creates a viewer role. func NewViewerRole() *Role { return &Role{ @@ -165,6 +188,25 @@ func NewViewerRole() *Role { } } +// NewReaderRole creates a reader role. +func NewReaderRole() *Role { + return &Role{ + Name: RoleViewer, + cS3ResourcePermissions: &provider.ResourcePermissions{ + // read + GetPath: true, + GetQuota: true, + InitiateFileDownload: true, + ListGrants: true, + ListContainer: true, + ListFileVersions: true, + ListRecycle: true, + Stat: true, + }, + ocsPermissions: PermissionRead, + } +} + // NewEditorRole creates an editor role. func NewEditorRole() *Role { return &Role{ @@ -211,10 +253,10 @@ func NewFileEditorRole() *Role { } } -// NewCoownerRole creates a coowner role. -func NewCoownerRole() *Role { +// NewCollaboratorRole creates a collaborator role. +func NewCollaboratorRole() *Role { return &Role{ - Name: RoleCoowner, + Name: RoleCollaborator, cS3ResourcePermissions: &provider.ResourcePermissions{ GetPath: true, GetQuota: true, @@ -286,10 +328,13 @@ func NewManagerRole() *Role { // RoleFromOCSPermissions tries to map ocs permissions to a role. func RoleFromOCSPermissions(p Permissions) *Role { + if p.Contain(PermissionNone) { + return NewDeniedRole() + } if p.Contain(PermissionRead) { if p.Contain(PermissionWrite) && p.Contain(PermissionCreate) && p.Contain(PermissionDelete) { if p.Contain(PermissionShare) { - return NewCoownerRole() + return NewCollaboratorRole() } return NewEditorRole() } @@ -347,6 +392,9 @@ func NewLegacyRoleFromOCSPermissions(p Permissions) *Role { r.cS3ResourcePermissions.RemoveGrant = true // TODO when are you able to unshare / delete r.cS3ResourcePermissions.UpdateGrant = true } + if p.Contain(PermissionDeny) { + r.cS3ResourcePermissions.DenyGrant = true + } return r } @@ -360,6 +408,11 @@ func RoleFromResourcePermissions(rp *provider.ResourcePermissions) *Role { if rp == nil { return r } + if grants.PermissionsEqual(rp, &provider.ResourcePermissions{}) { + r.ocsPermissions = PermissionNone + r.Name = RoleDenied + return r + } if rp.ListContainer && rp.ListGrants && rp.ListFileVersions && @@ -390,13 +443,17 @@ func RoleFromResourcePermissions(rp *provider.ResourcePermissions) *Role { rp.UpdateGrant { r.ocsPermissions |= PermissionShare } + if rp.DenyGrant { + r.ocsPermissions |= PermissionDeny + } + if r.ocsPermissions.Contain(PermissionRead) { if r.ocsPermissions.Contain(PermissionWrite) && r.ocsPermissions.Contain(PermissionCreate) && r.ocsPermissions.Contain(PermissionDelete) { r.Name = RoleEditor if r.ocsPermissions.Contain(PermissionShare) { - r.Name = RoleCoowner + r.Name = RoleCollaborator } - return r // editor or coowner + return r // editor or collaborator } if r.ocsPermissions == PermissionRead { r.Name = RoleViewer diff --git a/internal/http/services/owncloud/ocs/data/capabilities.go b/internal/http/services/owncloud/ocs/data/capabilities.go index 3eccd7889b..ccda4b98c7 100644 --- a/internal/http/services/owncloud/ocs/data/capabilities.go +++ b/internal/http/services/owncloud/ocs/data/capabilities.go @@ -45,150 +45,152 @@ func (c ocsBool) MarshalXML(e *xml.Encoder, start xml.StartElement) error { // CapabilitiesData TODO document. type CapabilitiesData struct { Capabilities *Capabilities `json:"capabilities" xml:"capabilities"` - Version *Version `json:"version" xml:"version"` + Version *Version `json:"version" xml:"version"` } // Capabilities groups several capability aspects. type Capabilities struct { - Core *CapabilitiesCore `json:"core" xml:"core"` - Checksums *CapabilitiesChecksums `json:"checksums" xml:"checksums"` - Files *CapabilitiesFiles `json:"files" xml:"files" mapstructure:"files"` - Dav *CapabilitiesDav `json:"dav" xml:"dav"` - FilesSharing *CapabilitiesFilesSharing `json:"files_sharing" xml:"files_sharing" mapstructure:"files_sharing"` - Spaces *Spaces `json:"spaces,omitempty" xml:"spaces,omitempty" mapstructure:"spaces"` + Core *CapabilitiesCore `json:"core" xml:"core"` + Checksums *CapabilitiesChecksums `json:"checksums" xml:"checksums"` + Files *CapabilitiesFiles `json:"files" mapstructure:"files" xml:"files"` + Dav *CapabilitiesDav `json:"dav" xml:"dav"` + FilesSharing *CapabilitiesFilesSharing `json:"files_sharing" mapstructure:"files_sharing" xml:"files_sharing"` + Spaces *Spaces `json:"spaces,omitempty" mapstructure:"spaces" xml:"spaces,omitempty"` Notifications *CapabilitiesNotifications `json:"notifications,omitempty" xml:"notifications,omitempty"` - GroupBased *CapabilitiesGroupBased `json:"group_based" xml:"group_based" mapstructure:"group_based"` + GroupBased *CapabilitiesGroupBased `json:"group_based" mapstructure:"group_based" xml:"group_based"` } // Spaces lets a service configure its advertised options related to Storage Spaces. type Spaces struct { - Version string `json:"version" xml:"version" mapstructure:"version"` - Enabled bool `json:"enabled" xml:"enabled" mapstructure:"enabled"` + Version string `json:"version" mapstructure:"version" xml:"version"` + Enabled ocsBool `json:"enabled" mapstructure:"enabled" xml:"enabled"` + Projects ocsBool `json:"projects" mapstructure:"projects" xml:"projects"` } // CapabilitiesCore holds webdav config. type CapabilitiesCore struct { - PollInterval int `json:"pollinterval" xml:"pollinterval" mapstructure:"poll_interval"` - WebdavRoot string `json:"webdav-root,omitempty" xml:"webdav-root,omitempty" mapstructure:"webdav_root"` - Status *Status `json:"status" xml:"status"` - SupportURLSigning ocsBool `json:"support-url-signing" xml:"support-url-signing" mapstructure:"support_url_signing"` + PollInterval int `json:"pollinterval" mapstructure:"poll_interval" xml:"pollinterval"` + WebdavRoot string `json:"webdav-root,omitempty" mapstructure:"webdav_root" xml:"webdav-root,omitempty"` + Status *Status `json:"status" xml:"status"` + SupportURLSigning ocsBool `json:"support-url-signing" mapstructure:"support_url_signing" xml:"support-url-signing"` } // Status holds basic status information. type Status struct { - Installed ocsBool `json:"installed" xml:"installed"` - Maintenance ocsBool `json:"maintenance" xml:"maintenance"` - NeedsDBUpgrade ocsBool `json:"needsDbUpgrade" xml:"needsDbUpgrade"` - Version string `json:"version" xml:"version"` - VersionString string `json:"versionstring" xml:"versionstring"` - Edition string `json:"edition" xml:"edition"` - ProductName string `json:"productname" xml:"productname"` - Product string `json:"product" xml:"product"` + Installed ocsBool `json:"installed" xml:"installed"` + Maintenance ocsBool `json:"maintenance" xml:"maintenance"` + NeedsDBUpgrade ocsBool `json:"needsDbUpgrade" xml:"needsDbUpgrade"` + Version string `json:"version" xml:"version"` + VersionString string `json:"versionstring" xml:"versionstring"` + Edition string `json:"edition" xml:"edition"` + ProductName string `json:"productname" xml:"productname"` + Product string `json:"product" xml:"product"` Hostname string `json:"hostname,omitempty" xml:"hostname,omitempty"` } // CapabilitiesChecksums holds available hashes. type CapabilitiesChecksums struct { - SupportedTypes []string `json:"supportedTypes" xml:"supportedTypes>element" mapstructure:"supported_types"` - PreferredUploadType string `json:"preferredUploadType" xml:"preferredUploadType" mapstructure:"preferred_upload_type"` + SupportedTypes []string `json:"supportedTypes" mapstructure:"supported_types" xml:"supportedTypes>element"` + PreferredUploadType string `json:"preferredUploadType" mapstructure:"preferred_upload_type" xml:"preferredUploadType"` } // CapabilitiesFilesTusSupport TODO this must be a summary of storages. type CapabilitiesFilesTusSupport struct { - Version string `json:"version" xml:"version"` - Resumable string `json:"resumable" xml:"resumable"` - Extension string `json:"extension" xml:"extension"` - MaxChunkSize int `json:"max_chunk_size" xml:"max_chunk_size" mapstructure:"max_chunk_size"` - HTTPMethodOverride string `json:"http_method_override" xml:"http_method_override" mapstructure:"http_method_override"` + Version string `json:"version" xml:"version"` + Resumable string `json:"resumable" xml:"resumable"` + Extension string `json:"extension" xml:"extension"` + MaxChunkSize int `json:"max_chunk_size" mapstructure:"max_chunk_size" xml:"max_chunk_size"` + HTTPMethodOverride string `json:"http_method_override" mapstructure:"http_method_override" xml:"http_method_override"` } // CapabilitiesArchiver holds available archivers information. type CapabilitiesArchiver struct { - Enabled bool `json:"enabled" xml:"enabled" mapstructure:"enabled"` - Version string `json:"version" xml:"version" mapstructure:"version"` - Formats []string `json:"formats" xml:"formats" mapstructure:"formats"` - ArchiverURL string `json:"archiver_url" xml:"archiver_url" mapstructure:"archiver_url"` - MaxNumFiles string `json:"max_num_files" xml:"max_num_files" mapstructure:"max_num_files"` - MaxSize string `json:"max_size" xml:"max_size" mapstructure:"max_size"` + Enabled bool `json:"enabled" mapstructure:"enabled" xml:"enabled"` + Version string `json:"version" mapstructure:"version" xml:"version"` + Formats []string `json:"formats" mapstructure:"formats" xml:"formats"` + ArchiverURL string `json:"archiver_url" mapstructure:"archiver_url" xml:"archiver_url"` + MaxNumFiles string `json:"max_num_files" mapstructure:"max_num_files" xml:"max_num_files"` + MaxSize string `json:"max_size" mapstructure:"max_size" xml:"max_size"` } // CapabilitiesAppProvider holds available app provider information. type CapabilitiesAppProvider struct { - Enabled bool `json:"enabled" xml:"enabled" mapstructure:"enabled"` - Version string `json:"version" xml:"version" mapstructure:"version"` - AppsURL string `json:"apps_url" xml:"apps_url" mapstructure:"apps_url"` - OpenURL string `json:"open_url" xml:"open_url" mapstructure:"open_url"` - NewURL string `json:"new_url" xml:"new_url" mapstructure:"new_url"` + Enabled bool `json:"enabled" mapstructure:"enabled" xml:"enabled"` + Version string `json:"version" mapstructure:"version" xml:"version"` + AppsURL string `json:"apps_url" mapstructure:"apps_url" xml:"apps_url"` + OpenURL string `json:"open_url" mapstructure:"open_url" xml:"open_url"` + NewURL string `json:"new_url" mapstructure:"new_url" xml:"new_url"` } // CapabilitiesFiles TODO this is storage specific, not global. What effect do these options have on the clients? type CapabilitiesFiles struct { - PrivateLinks ocsBool `json:"privateLinks" xml:"privateLinks" mapstructure:"private_links"` - BigFileChunking ocsBool `json:"bigfilechunking" xml:"bigfilechunking"` - Undelete ocsBool `json:"undelete" xml:"undelete"` - Versioning ocsBool `json:"versioning" xml:"versioning"` - Favorites ocsBool `json:"favorites" xml:"favorites"` + PrivateLinks ocsBool `json:"privateLinks" mapstructure:"private_links" xml:"privateLinks"` + BigFileChunking ocsBool `json:"bigfilechunking" xml:"bigfilechunking"` + Undelete ocsBool `json:"undelete" xml:"undelete"` + Versioning ocsBool `json:"versioning" xml:"versioning"` + Favorites ocsBool `json:"favorites" xml:"favorites"` PermanentDeletion ocsBool `json:"permanent_deletion" xml:"permanent_deletion"` - BlacklistedFiles []string `json:"blacklisted_files" xml:"blacklisted_files>element" mapstructure:"blacklisted_files"` - TusSupport *CapabilitiesFilesTusSupport `json:"tus_support" xml:"tus_support" mapstructure:"tus_support"` - Archivers []*CapabilitiesArchiver `json:"archivers" xml:"archivers" mapstructure:"archivers"` - AppProviders []*CapabilitiesAppProvider `json:"app_providers" xml:"app_providers" mapstructure:"app_providers"` + BlacklistedFiles []string `json:"blacklisted_files" mapstructure:"blacklisted_files" xml:"blacklisted_files>element"` + TusSupport *CapabilitiesFilesTusSupport `json:"tus_support" mapstructure:"tus_support" xml:"tus_support"` + Archivers []*CapabilitiesArchiver `json:"archivers" mapstructure:"archivers" xml:"archivers"` + AppProviders []*CapabilitiesAppProvider `json:"app_providers" mapstructure:"app_providers" xml:"app_providers"` } // CapabilitiesDav holds dav endpoint config. type CapabilitiesDav struct { - Chunking string `json:"chunking" xml:"chunking"` - Trashbin string `json:"trashbin" xml:"trashbin"` - Reports []string `json:"reports" xml:"reports>element" mapstructure:"reports"` + Chunking string `json:"chunking" xml:"chunking"` + Trashbin string `json:"trashbin" xml:"trashbin"` + Reports []string `json:"reports" mapstructure:"reports" xml:"reports>element"` ChunkingParallelUploadDisabled bool `json:"chunkingParallelUploadDisabled" xml:"chunkingParallelUploadDisabled"` } // CapabilitiesFilesSharing TODO document. type CapabilitiesFilesSharing struct { - APIEnabled ocsBool `json:"api_enabled" xml:"api_enabled" mapstructure:"api_enabled"` - Resharing ocsBool `json:"resharing" xml:"resharing" mapstructure:"resharing"` - ResharingDefault ocsBool `json:"resharing_default" xml:"resharing_default" mapstructure:"resharing_default"` - DenyAccess ocsBool `json:"deny_access" xml:"deny_access" mapstructure:"deny_access"` - GroupSharing ocsBool `json:"group_sharing" xml:"group_sharing" mapstructure:"group_sharing"` - AutoAcceptShare ocsBool `json:"auto_accept_share" xml:"auto_accept_share" mapstructure:"auto_accept_share"` - ShareWithGroupMembersOnly ocsBool `json:"share_with_group_members_only" xml:"share_with_group_members_only" mapstructure:"share_with_group_members_only"` - ShareWithMembershipGroupsOnly ocsBool `json:"share_with_membership_groups_only" xml:"share_with_membership_groups_only" mapstructure:"share_with_membership_groups_only"` - CanRename ocsBool `json:"can_rename" xml:"can_rename" mapstructure:"can_rename"` - AllowCustom ocsBool `json:"allow_custom" xml:"allow_custom" mapstructure:"allow_custom"` - SearchMinLength int `json:"search_min_length" xml:"search_min_length" mapstructure:"search_min_length"` - DefaultPermissions int `json:"default_permissions" xml:"default_permissions" mapstructure:"default_permissions"` - UserEnumeration *CapabilitiesFilesSharingUserEnumeration `json:"user_enumeration" xml:"user_enumeration" mapstructure:"user_enumeration"` - Federation *CapabilitiesFilesSharingFederation `json:"federation" xml:"federation"` - Public *CapabilitiesFilesSharingPublic `json:"public" xml:"public"` - User *CapabilitiesFilesSharingUser `json:"user" xml:"user"` + APIEnabled ocsBool `json:"api_enabled" mapstructure:"api_enabled" xml:"api_enabled"` + Resharing ocsBool `json:"resharing" mapstructure:"resharing" xml:"resharing"` + ResharingDefault ocsBool `json:"resharing_default" mapstructure:"resharing_default" xml:"resharing_default"` + DenyAccess ocsBool `json:"deny_access" mapstructure:"deny_access" xml:"deny_access"` + GroupSharing ocsBool `json:"group_sharing" mapstructure:"group_sharing" xml:"group_sharing"` + AutoAcceptShare ocsBool `json:"auto_accept_share" mapstructure:"auto_accept_share" xml:"auto_accept_share"` + ShareWithGroupMembersOnly ocsBool `json:"share_with_group_members_only" mapstructure:"share_with_group_members_only" xml:"share_with_group_members_only"` + ShareWithMembershipGroupsOnly ocsBool `json:"share_with_membership_groups_only" mapstructure:"share_with_membership_groups_only" xml:"share_with_membership_groups_only"` + CanRename ocsBool `json:"can_rename" mapstructure:"can_rename" xml:"can_rename"` + AllowCustom ocsBool `json:"allow_custom" mapstructure:"allow_custom" xml:"allow_custom"` + SearchMinLength int `json:"search_min_length" mapstructure:"search_min_length" xml:"search_min_length"` + DefaultPermissions int `json:"default_permissions" mapstructure:"default_permissions" xml:"default_permissions"` + UserEnumeration *CapabilitiesFilesSharingUserEnumeration `json:"user_enumeration" mapstructure:"user_enumeration" xml:"user_enumeration"` + Federation *CapabilitiesFilesSharingFederation `json:"federation" xml:"federation"` + Public *CapabilitiesFilesSharingPublic `json:"public" xml:"public"` + User *CapabilitiesFilesSharingUser `json:"user" xml:"user"` } // CapabilitiesFilesSharingPublic TODO document. type CapabilitiesFilesSharingPublic struct { - Enabled ocsBool `json:"enabled" xml:"enabled"` - SendMail ocsBool `json:"send_mail" xml:"send_mail" mapstructure:"send_mail"` - SocialShare ocsBool `json:"social_share" xml:"social_share" mapstructure:"social_share"` - Upload ocsBool `json:"upload" xml:"upload"` - Multiple ocsBool `json:"multiple" xml:"multiple"` - SupportsUploadOnly ocsBool `json:"supports_upload_only" xml:"supports_upload_only" mapstructure:"supports_upload_only"` - CanEdit ocsBool `json:"can_edit" xml:"can_edit" mapstructure:"can_edit"` - Password *CapabilitiesFilesSharingPublicPassword `json:"password" xml:"password"` - ExpireDate *CapabilitiesFilesSharingPublicExpireDate `json:"expire_date" xml:"expire_date" mapstructure:"expire_date"` + Enabled ocsBool `json:"enabled" xml:"enabled"` + SendMail ocsBool `json:"send_mail" mapstructure:"send_mail" xml:"send_mail"` + SocialShare ocsBool `json:"social_share" mapstructure:"social_share" xml:"social_share"` + Upload ocsBool `json:"upload" xml:"upload"` + Multiple ocsBool `json:"multiple" xml:"multiple"` + SupportsUploadOnly ocsBool `json:"supports_upload_only" mapstructure:"supports_upload_only" xml:"supports_upload_only"` + CanEdit ocsBool `json:"can_edit" mapstructure:"can_edit" xml:"can_edit"` + CanContribute ocsBool `json:"can_contribute" xml:"can_contribute"` + Password *CapabilitiesFilesSharingPublicPassword `json:"password" xml:"password"` + ExpireDate *CapabilitiesFilesSharingPublicExpireDate `json:"expire_date" mapstructure:"expire_date" xml:"expire_date"` } // CapabilitiesFilesSharingPublicPassword TODO document. type CapabilitiesFilesSharingPublicPassword struct { - EnforcedFor *CapabilitiesFilesSharingPublicPasswordEnforcedFor `json:"enforced_for" xml:"enforced_for" mapstructure:"enforced_for"` - Enforced ocsBool `json:"enforced" xml:"enforced"` + EnforcedFor *CapabilitiesFilesSharingPublicPasswordEnforcedFor `json:"enforced_for" mapstructure:"enforced_for" xml:"enforced_for"` + Enforced ocsBool `json:"enforced" xml:"enforced"` } // CapabilitiesFilesSharingPublicPasswordEnforcedFor TODO document. type CapabilitiesFilesSharingPublicPasswordEnforcedFor struct { - ReadOnly ocsBool `json:"read_only" xml:"read_only,omitempty" mapstructure:"read_only"` - ReadWrite ocsBool `json:"read_write" xml:"read_write,omitempty" mapstructure:"read_write"` - UploadOnly ocsBool `json:"upload_only" xml:"upload_only,omitempty" mapstructure:"upload_only"` + ReadOnly ocsBool `json:"read_only" mapstructure:"read_only" xml:"read_only,omitempty"` + ReadWrite ocsBool `json:"read_write" mapstructure:"read_write" xml:"read_write,omitempty"` + UploadOnly ocsBool `json:"upload_only" mapstructure:"upload_only" xml:"upload_only,omitempty"` } // CapabilitiesFilesSharingPublicExpireDate TODO document. @@ -198,21 +200,21 @@ type CapabilitiesFilesSharingPublicExpireDate struct { // CapabilitiesFilesSharingUser TODO document. type CapabilitiesFilesSharingUser struct { - SendMail ocsBool `json:"send_mail" xml:"send_mail" mapstructure:"send_mail"` - ProfilePicture ocsBool `json:"profile_picture" xml:"profile_picture" mapstructure:"profile_picture"` - Settings []*CapabilitiesUserSettings `json:"settings" xml:"settings" mapstructure:"settings"` + SendMail ocsBool `json:"send_mail" mapstructure:"send_mail" xml:"send_mail"` + ProfilePicture ocsBool `json:"profile_picture" mapstructure:"profile_picture" xml:"profile_picture"` + Settings []*CapabilitiesUserSettings `json:"settings" mapstructure:"settings" xml:"settings"` } // CapabilitiesUserSettings holds available user settings service information. type CapabilitiesUserSettings struct { - Enabled bool `json:"enabled" xml:"enabled" mapstructure:"enabled"` - Version string `json:"version" xml:"version" mapstructure:"version"` + Enabled bool `json:"enabled" mapstructure:"enabled" xml:"enabled"` + Version string `json:"version" mapstructure:"version" xml:"version"` } // CapabilitiesFilesSharingUserEnumeration TODO document. type CapabilitiesFilesSharingUserEnumeration struct { - Enabled ocsBool `json:"enabled" xml:"enabled"` - GroupMembersOnly ocsBool `json:"group_members_only" xml:"group_members_only" mapstructure:"group_members_only"` + Enabled ocsBool `json:"enabled" xml:"enabled"` + GroupMembersOnly ocsBool `json:"group_members_only" mapstructure:"group_members_only" xml:"group_members_only"` } // CapabilitiesFilesSharingFederation holds outgoing and incoming flags. @@ -223,20 +225,20 @@ type CapabilitiesFilesSharingFederation struct { // CapabilitiesNotifications holds a list of notification endpoints. type CapabilitiesNotifications struct { - Endpoints []string `json:"ocs-endpoints,omitempty" xml:"ocs-endpoints>element,omitempty" mapstructure:"endpoints"` + Endpoints []string `json:"ocs-endpoints,omitempty" mapstructure:"endpoints" xml:"ocs-endpoints>element,omitempty"` } // CapabilitiesGroupBased holds capabilities based on the groups a user belongs to. type CapabilitiesGroupBased struct { - Capabilities []string `json:"capabilities" xml:"capabilities" mapstructure:"capabilities"` + Capabilities []string `json:"capabilities" mapstructure:"capabilities" xml:"capabilities"` } // Version holds version information. type Version struct { - Major int `json:"major" xml:"major"` - Minor int `json:"minor" xml:"minor"` - Micro int `json:"micro" xml:"micro"` // = patch level - String string `json:"string" xml:"string"` + Major int `json:"major" xml:"major"` + Minor int `json:"minor" xml:"minor"` + Micro int `json:"micro" xml:"micro"` // = patch level + String string `json:"string" xml:"string"` Edition string `json:"edition" xml:"edition"` Product string `json:"product" xml:"product"` } diff --git a/internal/http/services/owncloud/ocs/data/config.go b/internal/http/services/owncloud/ocs/data/config.go index a018d7927d..eb14d70f0c 100644 --- a/internal/http/services/owncloud/ocs/data/config.go +++ b/internal/http/services/owncloud/ocs/data/config.go @@ -22,7 +22,7 @@ package data type ConfigData struct { Version string `json:"version" xml:"version"` Website string `json:"website" xml:"website"` - Host string `json:"host" xml:"host"` + Host string `json:"host" xml:"host"` Contact string `json:"contact" xml:"contact"` - SSL string `json:"ssl" xml:"ssl"` + SSL string `json:"ssl" xml:"ssl"` } diff --git a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/public.go b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/public.go index 312b55f3fb..feb340594a 100644 --- a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/public.go +++ b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/public.go @@ -19,11 +19,14 @@ package shares import ( + "context" "encoding/json" "fmt" "net/http" "strconv" + "sync" + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -180,58 +183,75 @@ func (h *Handler) listPublicShares(r *http.Request, filters []*link.ListPublicSh log := appctx.GetLogger(ctx) ocsDataPayload := make([]*conversions.ShareData, 0) - // TODO(refs) why is this guard needed? Are we moving towards a gateway only for service discovery? without a gateway this is dead code. - if h.gatewayAddr != "" { - client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) - if err != nil { - return ocsDataPayload, nil, err - } + client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) + if err != nil { + return ocsDataPayload, nil, err + } - req := link.ListPublicSharesRequest{ - Filters: filters, - } + req := link.ListPublicSharesRequest{ + Filters: filters, + } - res, err := client.ListPublicShares(ctx, &req) - if err != nil { - return ocsDataPayload, nil, err - } - if res.Status.Code != rpc.Code_CODE_OK { - return ocsDataPayload, res.Status, nil - } + res, err := client.ListPublicShares(ctx, &req) + if err != nil { + return ocsDataPayload, nil, err + } + if res.Status.Code != rpc.Code_CODE_OK { + return ocsDataPayload, res.Status, nil + } - for _, share := range res.GetShare() { - info, status, err := h.getResourceInfoByID(ctx, client, share.ResourceId) - if err != nil || status.Code != rpc.Code_CODE_OK { - log.Debug().Interface("share", share).Interface("status", status).Err(err).Msg("could not stat share, skipping") - continue - } + var wg sync.WaitGroup + workers := 50 + input := make(chan *link.PublicShare, len(res.Share)) + output := make(chan *conversions.ShareData, len(res.Share)) - sData := conversions.PublicShare2ShareData(share, r, h.publicURL) + for i := 0; i < workers; i++ { + wg.Add(1) + go func(ctx context.Context, client gateway.GatewayAPIClient, input chan *link.PublicShare, output chan *conversions.ShareData, wg *sync.WaitGroup) { + defer wg.Done() - sData.Name = share.DisplayName + for share := range input { + info, status, err := h.getResourceInfoByID(ctx, client, share.ResourceId) + if err != nil || status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("share", share.Id).Interface("status", status).Err(err).Msg("could not stat share, skipping") + return + } - if err := h.addFileInfo(ctx, sData, info); err != nil { - log.Debug().Interface("share", share).Interface("info", info).Err(err).Msg("could not add file info, skipping") - continue - } - h.mapUserIds(ctx, client, sData) + sData := conversions.PublicShare2ShareData(share, r, h.publicURL) - log.Debug().Interface("share", share).Interface("info", info).Interface("shareData", share).Msg("mapped") + sData.Name = share.DisplayName - ocsDataPayload = append(ocsDataPayload, sData) - } + if err := h.addFileInfo(ctx, sData, info); err != nil { + log.Debug().Interface("share", share.Id).Err(err).Msg("could not add file info, skipping") + return + } + h.mapUserIds(ctx, client, sData) + + log.Debug().Interface("share", share.Id).Msg("mapped") + output <- sData + } + }(ctx, client, input, output, &wg) + } + + for _, share := range res.Share { + input <- share + } + close(input) + wg.Wait() + close(output) - return ocsDataPayload, nil, nil + for s := range output { + ocsDataPayload = append(ocsDataPayload, s) } - return ocsDataPayload, nil, errors.New("bad request") + return ocsDataPayload, nil, nil } func (h *Handler) isPublicShare(r *http.Request, oid string) bool { logger := appctx.GetLogger(r.Context()) client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) if err != nil { - logger.Err(err) + logger.Err(err).Send() } psRes, err := client.GetPublicShare(r.Context(), &link.GetPublicShareRequest{ @@ -244,7 +264,7 @@ func (h *Handler) isPublicShare(r *http.Request, oid string) bool { }, }) if err != nil { - logger.Err(err) + logger.Err(err).Send() return false } diff --git a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go index 853b7771d2..44a8309dd2 100644 --- a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go +++ b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go @@ -29,6 +29,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "text/template" "time" @@ -210,23 +211,23 @@ func (h *Handler) CreateShare(w http.ResponseWriter, r *http.Request) { switch shareType { case int(conversions.ShareTypeUser): - // user collaborations default to coowner - if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewCoownerRole()); err == nil { + // user collaborations default to collab + if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewCollaboratorRole()); err == nil { h.createUserShare(w, r, statRes.Info, role, val) } case int(conversions.ShareTypeGroup): - // group collaborations default to coowner - if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewCoownerRole()); err == nil { + // group collaborations default to collab + if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewCollaboratorRole()); err == nil { h.createGroupShare(w, r, statRes.Info, role, val) } case int(conversions.ShareTypePublicLink): // public links default to read only - if _, _, err := h.extractPermissions(w, r, statRes.Info, conversions.NewViewerRole()); err == nil { + if _, _, err := h.extractPermissions(w, r, statRes.Info, conversions.NewReaderRole()); err == nil { h.createPublicLinkShare(w, r, statRes.Info) } case int(conversions.ShareTypeFederatedCloudShare): // federated shares default to read only - if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewViewerRole()); err == nil { + if role, val, err := h.extractPermissions(w, r, statRes.Info, conversions.NewReaderRole()); err == nil { h.createFederatedCloudShare(w, r, statRes.Info, role, val) } case int(conversions.ShareTypeSpaceMembership): @@ -405,10 +406,17 @@ func (h *Handler) extractPermissions(w http.ResponseWriter, r *http.Request, ri } } - existingPermissions := conversions.RoleFromResourcePermissions(ri.PermissionSet).OCSPermissions() - if permissions == conversions.PermissionInvalid || !existingPermissions.Contain(permissions) { - response.WriteOCSError(w, r, http.StatusNotFound, "Cannot set the requested share permissions", nil) - return nil, nil, errors.New("cannot set the requested share permissions") + // add a deny permission only if the user has the grant to deny (ResourcePermissions.DenyGrant == true) + if permissions == conversions.PermissionNone { + if !ri.PermissionSet.DenyGrant { + response.WriteOCSError(w, r, http.StatusNotFound, "Cannot set the requested share permissions: no deny grant on resource", nil) + } + } else { + existingPermissions := conversions.RoleFromResourcePermissions(ri.PermissionSet).OCSPermissions() + if permissions == conversions.PermissionInvalid || !existingPermissions.Contain(permissions) { + response.WriteOCSError(w, r, http.StatusNotFound, "Cannot set the requested share permissions", nil) + return nil, nil, errors.New("cannot set the requested share permissions") + } } role = conversions.RoleFromOCSPermissions(permissions) @@ -881,84 +889,106 @@ func (h *Handler) listSharesWithMe(w http.ResponseWriter, r *http.Request) { shares := make([]*conversions.ShareData, 0, len(lrsRes.GetShares())) - // TODO(refs) filter out "invalid" shares - for _, rs := range lrsRes.GetShares() { - if stateFilter != ocsStateUnknown && rs.GetState() != stateFilter { - continue - } - var info *provider.ResourceInfo - if pinfo != nil { - // check if the shared resource matches the path resource - if !utils.ResourceIDEqual(rs.Share.ResourceId, pinfo.Id) { - // try next share - continue - } - // we can reuse the stat info - info = pinfo - } else { - var status *rpc.Status - info, status, err = h.getResourceInfoByID(ctx, client, rs.Share.ResourceId) - if err != nil || status.Code != rpc.Code_CODE_OK { - h.logProblems(status, err, "could not stat, skipping", log) - continue - } - } - - data, err := conversions.CS3Share2ShareData(r.Context(), rs.Share) - if err != nil { - log.Debug().Interface("share", rs.Share).Interface("shareData", data).Err(err).Msg("could not CS3Share2ShareData, skipping") - continue - } + var wg sync.WaitGroup + workers := 50 + input := make(chan *collaboration.ReceivedShare, len(lrsRes.GetShares())) + output := make(chan *conversions.ShareData, len(lrsRes.GetShares())) - data.State = mapState(rs.GetState()) + for i := 0; i < workers; i++ { + wg.Add(1) + go func(ctx context.Context, client gateway.GatewayAPIClient, input chan *collaboration.ReceivedShare, output chan *conversions.ShareData, wg *sync.WaitGroup) { + defer wg.Done() - if err := h.addFileInfo(ctx, data, info); err != nil { - log.Debug().Interface("received_share", rs).Interface("info", info).Interface("shareData", data).Err(err).Msg("could not add file info, skipping") - continue - } - h.mapUserIds(r.Context(), client, data) - - if data.State == ocsStateAccepted { - // only accepted shares can be accessed when jailing users into their home. - // in this case we cannot stat shared resources that are outside the users home (/home), - // the path (/users/u-u-i-d/foo) will not be accessible - - // in a global namespace we can access the share using the full path - // in a jailed namespace we have to point to the mount point in the users /Shares jail - // - needed for oc10 hot migration - // or use the /dav/spaces/ endpoint? - - // list /Shares and match fileids with list of received shares - // - only works for a /Shares folder jail - // - does not work for freely mountable shares as in oc10 because we would need to iterate over the whole tree, there is no listing of mountpoints, yet - - // can we return the mountpoint when the gateway resolves the listing of shares? - // - no, the gateway only sees the same list any has the same options as the ocs service - // - we would need to have a list of mountpoints for the shares -> owncloudstorageprovider for hot migration + for rs := range input { + if stateFilter != ocsStateUnknown && rs.GetState() != stateFilter { + return + } + var info *provider.ResourceInfo + if pinfo != nil { + // check if the shared resource matches the path resource + if !utils.ResourceIDEqual(rs.Share.ResourceId, pinfo.Id) { + // try next share + return + } + // we can reuse the stat info + info = pinfo + } else { + var status *rpc.Status + info, status, err = h.getResourceInfoByID(ctx, client, rs.Share.ResourceId) + if err != nil || status.Code != rpc.Code_CODE_OK { + h.logProblems(status, err, "could not stat, skipping", log) + return + } + } - // best we can do for now is stat the /Shares jail if it is set and return those paths + data, err := conversions.CS3Share2ShareData(r.Context(), rs.Share) + if err != nil { + log.Debug().Interface("share", rs.Share.Id).Err(err).Msg("CS3Share2ShareData call failes, skipping") + return + } - // if we are in a jail and the current share has been accepted use the stat from the share jail - // Needed because received shares can be jailed in a folder in the users home + data.State = mapState(rs.GetState()) - if h.sharePrefix != "/" { - // if we have share jail infos use them to build the path - if sji := findMatch(shareJailInfos, rs.Share.ResourceId); sji != nil { - // override path with info from share jail - data.FileTarget = path.Join(h.sharePrefix, path.Base(sji.Path)) - data.Path = path.Join(h.sharePrefix, path.Base(sji.Path)) - } else { - data.FileTarget = path.Join(h.sharePrefix, path.Base(info.Path)) - data.Path = path.Join(h.sharePrefix, path.Base(info.Path)) + if err := h.addFileInfo(ctx, data, info); err != nil { + log.Debug().Interface("received_share", rs.Share.Id).Err(err).Msg("could not add file info, skipping") + return } - } else { - data.FileTarget = info.Path - data.Path = info.Path + h.mapUserIds(r.Context(), client, data) + + if data.State == ocsStateAccepted { + // only accepted shares can be accessed when jailing users into their home. + // in this case we cannot stat shared resources that are outside the users home (/home), + // the path (/users/u-u-i-d/foo) will not be accessible + + // in a global namespace we can access the share using the full path + // in a jailed namespace we have to point to the mount point in the users /Shares jail + // - needed for oc10 hot migration + // or use the /dav/spaces/ endpoint? + + // list /Shares and match fileids with list of received shares + // - only works for a /Shares folder jail + // - does not work for freely mountable shares as in oc10 because we would need to iterate over the whole tree, there is no listing of mountpoints, yet + + // can we return the mountpoint when the gateway resolves the listing of shares? + // - no, the gateway only sees the same list any has the same options as the ocs service + // - we would need to have a list of mountpoints for the shares -> owncloudstorageprovider for hot migration + + // best we can do for now is stat the /Shares jail if it is set and return those paths + + // if we are in a jail and the current share has been accepted use the stat from the share jail + // Needed because received shares can be jailed in a folder in the users home + + if h.sharePrefix != "/" { + // if we have share jail infos use them to build the path + if sji := findMatch(shareJailInfos, rs.Share.ResourceId); sji != nil { + // override path with info from share jail + data.FileTarget = path.Join(h.sharePrefix, path.Base(sji.Path)) + data.Path = path.Join(h.sharePrefix, path.Base(sji.Path)) + } else { + data.FileTarget = path.Join(h.sharePrefix, path.Base(info.Path)) + data.Path = path.Join(h.sharePrefix, path.Base(info.Path)) + } + } else { + data.FileTarget = info.Path + data.Path = info.Path + } + } + + log.Debug().Msgf("share: %+v", data) + output <- data } - } + }(ctx, client, input, output, &wg) + } + + for _, share := range lrsRes.GetShares() { + input <- share + } + close(input) + wg.Wait() + close(output) - shares = append(shares, data) - log.Debug().Msgf("share: %+v", *data) + for s := range output { + shares = append(shares, s) } if h.listOCMShares { @@ -1041,7 +1071,7 @@ func (h *Handler) listSharesWithOthers(w http.ResponseWriter, r *http.Request) { shares = append(shares, publicShares...) } if listUserShares { - userShares, status, err := h.listUserShares(r, filters) + userShares, status, err := h.listUserShares(r, filters, p) h.logProblems(status, err, "could not listUserShares", log) shares = append(shares, userShares...) } diff --git a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go index 57ab112490..7b988ce0e8 100644 --- a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go +++ b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go @@ -19,9 +19,12 @@ package shares import ( + "context" "net/http" "strconv" + "sync" + gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" @@ -100,7 +103,7 @@ func (h *Handler) isUserShare(r *http.Request, oid string) bool { logger := appctx.GetLogger(r.Context()) client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) if err != nil { - logger.Err(err) + logger.Err(err).Send() } getShareRes, err := client.GetShare(r.Context(), &collaboration.GetShareRequest{ @@ -113,7 +116,7 @@ func (h *Handler) isUserShare(r *http.Request, oid string) bool { }, }) if err != nil { - logger.Err(err) + logger.Err(err).Send() return false } @@ -276,63 +279,88 @@ func (h *Handler) removeFederatedShare(w http.ResponseWriter, r *http.Request, s response.WriteOCSSuccess(w, r, data) } -func (h *Handler) listUserShares(r *http.Request, filters []*collaboration.Filter) ([]*conversions.ShareData, *rpc.Status, error) { +func (h *Handler) listUserShares(r *http.Request, filters []*collaboration.Filter, ctxPath string) ([]*conversions.ShareData, *rpc.Status, error) { ctx := r.Context() log := appctx.GetLogger(ctx) lsUserSharesRequest := collaboration.ListSharesRequest{ Filters: filters, + Opaque: &types.Opaque{ + Map: map[string]*types.OpaqueEntry{ + ctxpkg.ResoucePathCtx: {Decoder: "plain", Value: []byte(ctxPath)}, + }, + }, } ocsDataPayload := make([]*conversions.ShareData, 0) - if h.gatewayAddr != "" { - // get a connection to the users share provider - client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) - if err != nil { - return ocsDataPayload, nil, err - } - - // do list shares request. filtered - lsUserSharesResponse, err := client.ListShares(ctx, &lsUserSharesRequest) - if err != nil { - return ocsDataPayload, nil, err - } - if lsUserSharesResponse.Status.Code != rpc.Code_CODE_OK { - return ocsDataPayload, lsUserSharesResponse.Status, nil - } + client, err := pool.GetGatewayServiceClient(pool.Endpoint(h.gatewayAddr)) + if err != nil { + return ocsDataPayload, nil, err + } - // build OCS response payload - for _, s := range lsUserSharesResponse.Shares { - data, err := conversions.CS3Share2ShareData(ctx, s) - if err != nil { - log.Debug().Interface("share", s).Interface("shareData", data).Err(err).Msg("could not CS3Share2ShareData, skipping") - continue - } + // do list shares request. filtered + lsUserSharesResponse, err := client.ListShares(ctx, &lsUserSharesRequest) + if err != nil { + return ocsDataPayload, nil, err + } + if lsUserSharesResponse.Status.Code != rpc.Code_CODE_OK { + return ocsDataPayload, lsUserSharesResponse.Status, nil + } - info, status, err := h.getResourceInfoByID(ctx, client, s.ResourceId) - if err != nil || status.Code != rpc.Code_CODE_OK { - log.Debug().Interface("share", s).Interface("status", status).Interface("shareData", data).Err(err).Msg("could not stat share, skipping") - continue + var wg sync.WaitGroup + workers := 50 + input := make(chan *collaboration.Share, len(lsUserSharesResponse.Shares)) + output := make(chan *conversions.ShareData, len(lsUserSharesResponse.Shares)) + + for i := 0; i < workers; i++ { + wg.Add(1) + go func(ctx context.Context, client gateway.GatewayAPIClient, input chan *collaboration.Share, output chan *conversions.ShareData, wg *sync.WaitGroup) { + defer wg.Done() + + // build OCS response payload + for s := range input { + data, err := conversions.CS3Share2ShareData(ctx, s) + if err != nil { + log.Debug().Interface("share", s.Id).Err(err).Msg("CS3Share2ShareData returned error, skipping") + return + } + + info, status, err := h.getResourceInfoByID(ctx, client, s.ResourceId) + if err != nil || status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("share", s.Id).Interface("status", status).Err(err).Msg("could not stat share, skipping") + return + } + + if err := h.addFileInfo(ctx, data, info); err != nil { + log.Debug().Interface("share", s.Id).Err(err).Msg("could not add file info, skipping") + return + } + h.mapUserIds(ctx, client, data) + + log.Debug().Interface("share", s.Id).Msg("mapped") + output <- data } + }(ctx, client, input, output, &wg) + } - if err := h.addFileInfo(ctx, data, info); err != nil { - log.Debug().Interface("share", s).Interface("info", info).Interface("shareData", data).Err(err).Msg("could not add file info, skipping") - continue - } - h.mapUserIds(ctx, client, data) + for _, share := range lsUserSharesResponse.Shares { + input <- share + } + close(input) + wg.Wait() + close(output) - log.Debug().Interface("share", s).Interface("info", info).Interface("shareData", data).Msg("mapped") - ocsDataPayload = append(ocsDataPayload, data) - } + for s := range output { + ocsDataPayload = append(ocsDataPayload, s) + } - if h.listOCMShares { - // include the ocm shares - ocmShares, err := h.listOutcomingFederatedShares(ctx, client, convertToOCMFilters(filters)) - if err != nil { - return nil, nil, err - } - ocsDataPayload = append(ocsDataPayload, ocmShares...) + if h.listOCMShares { + // include the ocm shares + ocmShares, err := h.listOutcomingFederatedShares(ctx, client, convertToOCMFilters(filters)) + if err != nil { + return nil, nil, err } + ocsDataPayload = append(ocsDataPayload, ocmShares...) } return ocsDataPayload, nil, nil diff --git a/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go b/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go index ba8bffc5c9..05c0a7b71d 100644 --- a/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go +++ b/internal/http/services/owncloud/ocs/handlers/cloud/user/user.go @@ -158,9 +158,9 @@ func parseUpdateSelfRequest(r *http.Request) (updateSelfRequest, error) { // User holds user data. type User struct { // TODO needs better naming, clarify if we need a userid, a username or both - ID string `json:"id" xml:"id"` - DisplayName string `json:"display-name" xml:"display-name"` - Email string `json:"email" xml:"email"` - UserType string `json:"user-type" xml:"user-type"` + ID string `json:"id" xml:"id"` + DisplayName string `json:"display-name" xml:"display-name"` + Email string `json:"email" xml:"email"` + UserType string `json:"user-type" xml:"user-type"` Language string `json:"language,omitempty" xml:"language,omitempty"` } diff --git a/internal/http/services/owncloud/ocs/handlers/cloud/users/users.go b/internal/http/services/owncloud/ocs/handlers/cloud/users/users.go index 67638144cf..015905571f 100644 --- a/internal/http/services/owncloud/ocs/handlers/cloud/users/users.go +++ b/internal/http/services/owncloud/ocs/handlers/cloud/users/users.go @@ -54,19 +54,19 @@ func (h *Handler) GetGroups(w http.ResponseWriter, r *http.Request) { // Quota holds quota information. type Quota struct { - Free int64 `json:"free" xml:"free"` - Used int64 `json:"used" xml:"used"` - Total int64 `json:"total" xml:"total"` - Relative float32 `json:"relative" xml:"relative"` + Free int64 `json:"free" xml:"free"` + Used int64 `json:"used" xml:"used"` + Total int64 `json:"total" xml:"total"` + Relative float32 `json:"relative" xml:"relative"` Definition string `json:"definition" xml:"definition"` } // Users holds users data. type Users struct { - Quota *Quota `json:"quota" xml:"quota"` - Email string `json:"email" xml:"email"` + Quota *Quota `json:"quota" xml:"quota"` + Email string `json:"email" xml:"email"` DisplayName string `json:"displayname" xml:"displayname"` - UserType string `json:"user-type" xml:"user-type"` + UserType string `json:"user-type" xml:"user-type"` // FIXME home should never be exposed ... even in oc 10 // home TwoFactorAuthEnabled bool `json:"two_factor_auth_enabled" xml:"two_factor_auth_enabled"` diff --git a/internal/http/services/owncloud/ocs/response/response.go b/internal/http/services/owncloud/ocs/response/response.go index 4c6b029299..82f9a70bd4 100644 --- a/internal/http/services/owncloud/ocs/response/response.go +++ b/internal/http/services/owncloud/ocs/response/response.go @@ -47,8 +47,8 @@ type Response struct { // Payload combines response metadata and data. type Payload struct { - XMLName struct{} `json:"-" xml:"ocs"` - Meta Meta `json:"meta" xml:"meta"` + XMLName struct{} `json:"-" xml:"ocs"` + Meta Meta `json:"meta" xml:"meta"` Data interface{} `json:"data,omitempty" xml:"data,omitempty"` } @@ -102,10 +102,10 @@ func (p Payload) MarshalXML(e *xml.Encoder, start xml.StartElement) (err error) // Meta holds response metadata. type Meta struct { - Status string `json:"status" xml:"status"` - StatusCode int `json:"statuscode" xml:"statuscode"` - Message string `json:"message" xml:"message"` - TotalItems string `json:"totalitems,omitempty" xml:"totalitems,omitempty"` + Status string `json:"status" xml:"status"` + StatusCode int `json:"statuscode" xml:"statuscode"` + Message string `json:"message" xml:"message"` + TotalItems string `json:"totalitems,omitempty" xml:"totalitems,omitempty"` ItemsPerPage string `json:"itemsperpage,omitempty" xml:"itemsperpage,omitempty"` } @@ -189,7 +189,7 @@ func encodeXML(res Response) ([]byte, error) { return nil, err } b := new(bytes.Buffer) - b.Write([]byte(xml.Header)) + b.WriteString(xml.Header) b.Write(marshalled) return b.Bytes(), nil } diff --git a/internal/http/services/reverseproxy/reverseproxy.go b/internal/http/services/reverseproxy/reverseproxy.go index 2c81b20815..ae1c9300b1 100644 --- a/internal/http/services/reverseproxy/reverseproxy.go +++ b/internal/http/services/reverseproxy/reverseproxy.go @@ -37,8 +37,8 @@ func init() { } type proxyRule struct { - Endpoint string `mapstructure:"endpoint" json:"endpoint"` - Backend string `mapstructure:"backend" json:"backend"` + Endpoint string `json:"endpoint" mapstructure:"endpoint"` + Backend string `json:"backend" mapstructure:"backend"` } type config struct { diff --git a/internal/http/services/sciencemesh/share.go b/internal/http/services/sciencemesh/share.go index ac84dc41ad..d8b292f233 100644 --- a/internal/http/services/sciencemesh/share.go +++ b/internal/http/services/sciencemesh/share.go @@ -52,12 +52,12 @@ func (h *sharesHandler) init(c *config) error { } type createShareRequest struct { - SourcePath string `json:"sourcePath" validate:"required"` - TargetPath string `json:"targetPath" validate:"required"` + SourcePath string `json:"sourcePath" validate:"required"` + TargetPath string `json:"targetPath" validate:"required"` Type string `json:"type"` - Role string `json:"role" validate:"oneof=viewer editor"` + Role string `json:"role" validate:"oneof=viewer editor"` RecipientUsername string `json:"recipientUsername" validate:"required"` - RecipientHost string `json:"recipientHost" validate:"required"` + RecipientHost string `json:"recipientHost" validate:"required"` } // CreateShare creates an OCM share. diff --git a/internal/serverless/services/helloworld/helloworld.go b/internal/serverless/services/helloworld/helloworld.go index 46bf5cc190..9b7023c8fc 100644 --- a/internal/serverless/services/helloworld/helloworld.go +++ b/internal/serverless/services/helloworld/helloworld.go @@ -62,7 +62,7 @@ func New(ctx context.Context, m map[string]interface{}) (rserverless.Service, er file, err := os.OpenFile(conf.Outfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { - log.Err(err) + log.Err(err).Send() return nil, err } @@ -94,7 +94,7 @@ func (s *svc) sayHello(filename string) { _, err := s.file.Write([]byte(h)) if err != nil { - s.log.Err(err) + s.log.Err(err).Send() } time.Sleep(5 * time.Second) } diff --git a/internal/serverless/services/notifications/notifications.go b/internal/serverless/services/notifications/notifications.go index 22256b90c0..e3dcf6c91d 100644 --- a/internal/serverless/services/notifications/notifications.go +++ b/internal/serverless/services/notifications/notifications.go @@ -44,13 +44,13 @@ import ( ) type config struct { - NatsAddress string `mapstructure:"nats_address" docs:";The NATS server address."` - NatsToken string `mapstructure:"nats_token" docs:"The token to authenticate against the NATS server"` - NatsPrefix string `mapstructure:"nats_prefix" docs:"reva-notifications;The notifications NATS stream."` - HandlerConf map[string]map[string]interface{} `mapstructure:"handlers" docs:";Settings for the different notification handlers."` - GroupingInterval int `mapstructure:"grouping_interval" docs:"60;Time in seconds to group incoming notification triggers"` - GroupingMaxSize int `mapstructure:"grouping_max_size" docs:"100;Maximum number of notifications to group"` - StorageDriver string `mapstructure:"storage_driver" docs:"mysql;The driver used to store notifications"` + NatsAddress string `docs:";The NATS server address." mapstructure:"nats_address"` + NatsToken string `docs:"The token to authenticate against the NATS server" mapstructure:"nats_token"` + NatsPrefix string `docs:"reva-notifications;The notifications NATS stream." mapstructure:"nats_prefix"` + HandlerConf map[string]map[string]interface{} `docs:";Settings for the different notification handlers." mapstructure:"handlers"` + GroupingInterval int `docs:"60;Time in seconds to group incoming notification triggers" mapstructure:"grouping_interval"` + GroupingMaxSize int `docs:"100;Maximum number of notifications to group" mapstructure:"grouping_max_size"` + StorageDriver string `docs:"mysql;The driver used to store notifications" mapstructure:"storage_driver"` StorageDrivers map[string]map[string]interface{} `mapstructure:"storage_drivers"` } diff --git a/logs_tests b/logs_tests new file mode 100644 index 0000000000..43b5742d44 --- /dev/null +++ b/logs_tests @@ -0,0 +1,324 @@ +docker compose -f ./tests/docker/docker-compose.yml up --force-recreate --always-recreate-deps --build --abort-on-container-exit -V --remove-orphans --exit-code-from acceptance-2 acceptance-2 +#0 building with "default" instance using docker driver + +#1 [frontend internal] load build definition from Dockerfile.revad-eos +#1 transferring dockerfile: 1.33kB done +#1 DONE 0.0s + +#2 [frontend internal] load .dockerignore +#2 transferring context: 102B done +#2 DONE 0.0s + +#3 [storage-home-ocis internal] load .dockerignore +#3 transferring context: 102B done +#3 DONE 0.0s + +#4 [storage-home-ocis internal] load build definition from Dockerfile.revad-eos +#4 transferring dockerfile: 1.33kB done +#4 DONE 0.0s + +#5 [shares internal] load build definition from Dockerfile.revad-eos +#5 transferring dockerfile: 1.33kB done +#5 DONE 0.0s + +#6 [shares internal] load .dockerignore +#6 transferring context: 102B done +#6 DONE 0.0s + +#7 [ldap-users internal] load build definition from Dockerfile.revad-eos +#7 transferring dockerfile: 1.33kB done +#7 DONE 0.0s + +#8 [eos-storage internal] load build definition from Dockerfile +#8 transferring dockerfile: 347B done +#8 DONE 0.0s + +#9 [eos-storage internal] load .dockerignore +#9 transferring context: 2B done +#9 DONE 0.0s + +#10 [ldap-users internal] load .dockerignore +#10 transferring context: 102B done +#10 DONE 0.0s + +#11 [gateway internal] load build definition from Dockerfile.revad-eos +#11 transferring dockerfile: 1.33kB done +#11 DONE 0.0s + +#12 [eos-storage internal] load metadata for gitlab-registry.cern.ch/dss/eos/eos-ci:5.1.25 +#12 DONE 0.0s + +#13 [eos-storage 1/5] FROM gitlab-registry.cern.ch/dss/eos/eos-ci:5.1.25 +#13 DONE 0.0s + +#14 [gateway internal] load .dockerignore +#14 transferring context: 102B done +#14 DONE 0.0s + +#15 [gateway internal] load metadata for gitlab-registry.cern.ch/dss/eos/eos-all:5.0.31 +#15 DONE 0.0s + +#16 [eos-storage internal] load build context +#16 transferring context: 125B done +#16 DONE 0.0s + +#17 [eos-storage 4/5] RUN ulimit -n 1024000 && yum install -y sssd sssd-client +#17 CACHED + +#18 [eos-storage 3/5] COPY sssd/sssd.conf /etc/sssd/sssd.conf +#18 CACHED + +#19 [eos-storage 2/5] COPY scripts/eos-run.sh /mnt/scripts/eos-run.sh +#19 CACHED + +#20 [eos-storage 5/5] RUN chmod 0600 /etc/sssd/sssd.conf && chown root:root /etc/sssd/sssd.conf +#20 CACHED + +#21 [eos-storage] exporting to image +#21 exporting layers done +#21 writing image sha256:50779ff49d35cb0e27158991b4926d8e98e71a1dc9d983a217899ff29ed675dc +#21 writing image sha256:50779ff49d35cb0e27158991b4926d8e98e71a1dc9d983a217899ff29ed675dc done +#21 naming to docker.io/library/docker-eos-storage done +#21 DONE 0.0s + +#22 [storage-users-ocis internal] load .dockerignore +#22 transferring context: 102B done +#22 DONE 0.0s + +#23 [storage-users-ocis internal] load build definition from Dockerfile.revad-eos +#23 transferring dockerfile: 1.33kB done +#23 DONE 0.0s + +#24 [storage-publiclink internal] load .dockerignore +#24 transferring context: 102B done +#24 DONE 0.0s + +#25 [storage-publiclink internal] load build definition from Dockerfile.revad-eos +#25 transferring dockerfile: 1.33kB done +#25 DONE 0.0s + +#15 [storage-publiclink internal] load metadata for gitlab-registry.cern.ch/dss/eos/eos-all:5.0.31 +#15 DONE 0.0s + +#26 [storage-publiclink internal] load metadata for docker.io/library/golang:latest +#26 DONE 1.4s + +#27 [storage-home-ocis builder 1/4] FROM docker.io/library/golang:latest@sha256:19600fdcae402165dcdab18cb9649540bde6be7274dedb5d205b2f84029fe909 +#27 DONE 0.0s + +#28 [storage-publiclink stage-1 1/4] FROM gitlab-registry.cern.ch/dss/eos/eos-all:5.0.31 +#28 DONE 0.0s + +#29 [gateway internal] load build context +#29 transferring context: 92.02kB 0.1s done +#29 DONE 0.1s + +#30 [shares internal] load build context +#30 transferring context: 92.02kB 0.1s done +#30 DONE 0.2s + +#31 [frontend internal] load build context +#31 transferring context: 92.02kB 0.1s done +#31 DONE 0.1s + +#32 [storage-publiclink builder 2/4] WORKDIR /home/reva +#32 CACHED + +#33 [storage-publiclink internal] load build context +#33 transferring context: 92.02kB 0.1s done +#33 DONE 0.1s + +#34 [storage-users-ocis internal] load build context +#34 transferring context: 92.02kB 0.1s done +#34 DONE 0.1s + +#35 [storage-home-ocis internal] load build context +#35 transferring context: 92.02kB 0.1s done +#35 DONE 0.2s + +#36 [ldap-users internal] load build context +#36 transferring context: 92.02kB 0.1s done +#36 DONE 0.2s + +#37 [gateway builder 3/4] COPY . . +#37 DONE 0.4s + +#38 [gateway builder 4/4] RUN make revad +#38 0.251 go build -ldflags "`[[ -z "" ]] && echo "" || echo "-extldflags=-static"` -X github.com/cs3org/reva/cmd/revad.gitCommit= -X github.com/cs3org/reva/cmd/revad.version= -X github.com/cs3org/reva/cmd/revad.goVersion=`go version | awk '{print $3}'` -X github.com/cs3org/reva/cmd/revad.buildDate=`date +%FT%T%z`" -o ./cmd/revad/revad ./cmd/revad/main +#38 0.276 go: downloading github.com/google/uuid v1.3.1 +#38 0.276 go: downloading github.com/pkg/errors v0.9.1 +#38 0.276 go: downloading github.com/rs/zerolog v1.28.0 +#38 0.296 go: downloading go.opencensus.io v0.24.0 +#38 0.328 go: downloading github.com/BurntSushi/toml v1.3.2 +#38 0.359 go: downloading go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 +#38 1.446 go: downloading golang.org/x/sync v0.3.0 +#38 1.535 go: downloading google.golang.org/grpc v1.58.0 +#38 1.535 go: downloading github.com/creasty/defaults v1.7.0 +#38 1.535 go: downloading github.com/mitchellh/mapstructure v1.5.0 +#38 1.623 go: downloading github.com/cs3org/go-cs3apis v0.0.0-20230727093620-0f4399be4543 +#38 1.647 go: downloading github.com/go-ldap/ldap/v3 v3.4.6 +#38 1.774 go: downloading github.com/golang/protobuf v1.5.3 +#38 1.774 go: downloading go.step.sm/crypto v0.35.0 +#38 1.774 go: downloading google.golang.org/protobuf v1.31.0 +#38 1.789 go: downloading go.opentelemetry.io/otel/trace v1.14.0 +#38 1.790 go: downloading github.com/bluele/gcache v0.0.2 +#38 1.888 go: downloading go.opentelemetry.io/otel v1.14.0 +#38 3.259 go: downloading github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 +#38 3.404 go: downloading go.opentelemetry.io/otel/exporters/jaeger v1.11.2 +#38 3.442 go: downloading go.opentelemetry.io/otel/sdk v1.11.2 +#38 3.605 go: downloading go.opentelemetry.io/otel/metric v0.34.0 +#38 3.727 go: downloading github.com/mattn/go-colorable v0.1.12 +#38 3.760 go: downloading google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 +#38 3.826 go: downloading github.com/mileusna/useragent v1.2.1 +#38 6.475 go: downloading github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 +#38 6.572 go: downloading github.com/go-asn1-ber/asn1-ber v1.5.5 +#38 6.776 go: downloading github.com/asim/go-micro/plugins/events/nats/v4 v4.7.0 +#38 6.897 go: downloading go-micro.dev/v4 v4.3.1-0.20211108085239-0c2041e43908 +#38 7.314 go: downloading github.com/juliangruber/go-intersect v1.1.0 +#38 7.426 go: downloading github.com/ReneKroon/ttlcache/v2 v2.11.0 +#38 7.535 go: downloading github.com/golang-jwt/jwt v3.2.2+incompatible +#38 7.694 go: downloading github.com/studio-b12/gowebdav v0.9.0 +#38 15.58 go: downloading github.com/rs/cors v1.9.0 +#38 15.58 go: downloading github.com/go-chi/chi/v5 v5.0.8 +#38 15.58 go: downloading github.com/gdexlab/go-render v1.0.1 +#38 15.58 go: downloading github.com/go-playground/validator/v10 v10.15.4 +#38 15.59 go: downloading github.com/tus/tusd v1.13.0 +#38 15.59 go: downloading contrib.go.opencensus.io/exporter/prometheus v0.4.2 +#38 15.59 go: downloading github.com/nats-io/nats.go v1.27.0 +#38 15.59 go: downloading github.com/beevik/etree v1.2.0 +#38 15.82 go: downloading github.com/wk8/go-ordered-map v1.0.0 +#38 15.82 go: downloading github.com/glpatcern/go-mime v0.0.0-20221026162842-2a8d71ad17a9 +#38 15.82 go: downloading golang.org/x/crypto v0.13.0 +#38 15.82 go: downloading github.com/coreos/go-oidc/v3 v3.5.0 +#38 15.82 go: downloading github.com/sethvargo/go-password v0.2.0 +#38 15.82 go: downloading golang.org/x/oauth2 v0.11.0 +#38 15.83 go: downloading github.com/Masterminds/sprig v2.22.0+incompatible +#38 16.12 go: downloading github.com/go-sql-driver/mysql v1.7.1 +#38 16.12 go: downloading github.com/gomodule/redigo v1.8.9 +#38 16.12 go: downloading github.com/aws/aws-sdk-go v1.45.1 +#38 16.24 go: downloading golang.org/x/sys v0.12.0 +#38 16.24 go: downloading github.com/go-logr/logr v1.2.4 +#38 16.57 go: downloading github.com/mattn/go-isatty v0.0.17 +#38 16.68 go: downloading github.com/nats-io/stan.go v0.10.4 +#38 16.70 go: downloading github.com/cenkalti/backoff v2.2.1+incompatible +#38 16.79 go: downloading github.com/nats-io/nats-server/v2 v2.9.19 +#38 16.80 go: downloading github.com/nats-io/nats-streaming-server v0.25.5 +#38 17.50 go: downloading github.com/go-playground/locales v0.14.1 +#38 18.40 go: downloading github.com/go-playground/universal-translator v0.18.1 +#38 19.65 go: downloading github.com/google/go-cmp v0.5.9 +#38 19.77 go: downloading golang.org/x/net v0.14.0 +#38 20.66 go: downloading google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 +#38 20.81 go: downloading github.com/prometheus/alertmanager v0.26.0 +#38 20.81 go: downloading github.com/prometheus/client_golang v1.16.0 +#38 21.98 go: downloading github.com/prometheus/statsd_exporter v0.22.7 +#38 22.13 go: downloading github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 +#38 22.21 go: downloading github.com/gabriel-vasile/mimetype v1.4.2 +#38 22.27 go: downloading github.com/leodido/go-urn v1.2.4 +#38 22.88 go: downloading golang.org/x/text v0.13.0 +#38 27.74 go: downloading github.com/klauspost/compress v1.16.7 +#38 44.85 go: downloading github.com/nats-io/nkeys v0.4.4 +#38 44.85 go: downloading github.com/Masterminds/goutils v1.1.1 +#38 44.85 go: downloading github.com/nats-io/nuid v1.0.1 +#38 44.85 go: downloading github.com/Masterminds/semver v1.5.0 +#38 44.85 go: downloading github.com/huandu/xstrings v1.3.3 +#38 44.85 go: downloading github.com/imdario/mergo v0.3.12 +#38 45.26 go: downloading github.com/mitchellh/copystructure v1.2.0 +#38 45.27 go: downloading github.com/go-jose/go-jose/v3 v3.0.0 +#38 45.27 go: downloading github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +#38 45.27 go: downloading github.com/mattn/go-sqlite3 v1.14.17 +#38 45.56 go: downloading github.com/go-logr/stdr v1.2.2 +#38 45.69 go: downloading github.com/hashicorp/go-hclog v1.5.0 +#38 46.97 go: downloading github.com/hashicorp/go-msgpack/v2 v2.1.0 +#38 46.97 go: downloading github.com/hashicorp/raft v1.5.0 +#38 47.02 go: downloading github.com/prometheus/procfs v0.11.0 +#38 47.02 go: downloading go.etcd.io/bbolt v1.3.7 +#38 47.67 go: downloading github.com/minio/highwayhash v1.0.2 +#38 47.70 go: downloading github.com/hashicorp/go-msgpack v1.1.5 +#38 47.95 go: downloading github.com/nats-io/jwt/v2 v2.4.1 +#38 48.06 go: downloading golang.org/x/time v0.3.0 +#38 48.44 go: downloading github.com/beorn7/perks v1.0.1 +#38 48.44 go: downloading github.com/cespare/xxhash/v2 v2.2.0 +#38 48.82 go: downloading github.com/prometheus/client_model v0.4.0 +#38 48.86 go: downloading github.com/prometheus/common v0.44.0 +#38 48.87 go: downloading github.com/cespare/xxhash v1.1.0 +#38 48.87 go: downloading github.com/go-kit/log v0.2.1 +#38 48.93 go: downloading gopkg.in/yaml.v2 v2.4.0 +#38 49.05 go: downloading github.com/patrickmn/go-cache v2.1.0+incompatible +#38 49.10 go: downloading github.com/gogo/protobuf v1.3.2 +#38 49.10 go: downloading github.com/mitchellh/reflectwalk v1.0.2 +#38 49.14 go: downloading github.com/fatih/color v1.13.0 +#38 49.22 go: downloading github.com/armon/go-metrics v0.4.1 +#38 50.29 go: downloading github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 +#38 50.29 go: downloading github.com/jmespath/go-jmespath v0.4.0 +#38 50.30 go: downloading github.com/matttproud/golang_protobuf_extensions v1.0.4 +#38 50.41 go: downloading github.com/go-logfmt/logfmt v0.5.1 +#38 50.77 go: downloading github.com/hashicorp/go-immutable-radix v1.3.1 +#38 50.77 go: downloading github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c +#38 51.02 go: downloading github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 +#38 51.03 go: downloading github.com/miekg/dns v1.1.43 +#38 51.56 go: downloading github.com/hashicorp/golang-lru v0.6.0 +#38 DONE 97.6s + +#39 [gateway stage-1 2/4] COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ +#39 CACHED + +#40 [storage-home-ocis builder 3/4] COPY . . +#40 CACHED + +#41 [storage-home-ocis builder 4/4] RUN make revad +#41 CACHED + +#42 [storage-home-ocis stage-1 2/4] COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ +#42 CACHED + +#43 [storage-home-ocis stage-1 3/4] COPY --from=builder /home/reva/cmd/revad/revad /usr/bin/revad +#43 DONE 0.0s + +#44 [storage-home-ocis stage-1 4/4] RUN mkdir -p /etc/revad/ && touch /etc/revad/revad.toml +#44 DONE 0.2s + +#45 [gateway] exporting to image +#45 exporting layers +#45 ... + +#46 [frontend] exporting to image +#46 exporting layers 4.6s done +#46 writing image sha256:25f1937bd1a2c2149eec7ad4214295558da354ac806cd0de609189da1dfcab7d done +#46 naming to docker.io/library/docker-frontend done +#46 DONE 4.6s + +#47 [shares] exporting to image +#47 exporting layers 4.6s done +#47 writing image sha256:3b4c8039a91d26b69728832dcb221b5459c4da99f9f947ee157abb6d55ccfae8 done +#47 naming to docker.io/library/docker-shares done +#47 DONE 4.6s + +#48 [storage-home-ocis] exporting to image +#48 exporting layers 4.6s done +#48 writing image sha256:80808e18a7ddd773995d32be339db783d4de20c82ea9a37f07edbbe6f6037836 done +#48 naming to docker.io/library/docker-storage-home-ocis done +#48 DONE 4.6s + +#49 [ldap-users] exporting to image +#49 exporting layers 4.7s done +#49 writing image sha256:f6e440868593eade47a3edbbe655941650f98895e1ae0cb53c1720be575cfc71 done +#49 naming to docker.io/library/docker-ldap-users done +#49 DONE 4.7s + +#50 [storage-users-ocis] exporting to image +#50 exporting layers 4.6s done +#50 writing image sha256:8e8e504b0939797f02ea7b70956e1191fc01d31f0074168087e39d6f9fbbab7b done +#50 naming to docker.io/library/docker-storage-users-ocis done +#50 DONE 4.6s + +#51 [storage-publiclink] exporting to image +#51 exporting layers 4.6s done +#51 writing image sha256:804891df2a6ec0eec32487075b03bbd23f5052c177159962b85edcf3161b739f done +#51 naming to docker.io/library/docker-storage-publiclink done +#51 DONE 4.6s + +#45 [gateway] exporting to image +#45 exporting layers 4.7s done +#45 writing image sha256:b4faa2f6541dfb48e4277414e9cef1771f970aff2931ddd624b20a1be2e5ab07 done +#45 naming to docker.io/library/docker-gateway done +#45 DONE 4.7s diff --git a/pkg/app/provider/wopi/wopi.go b/pkg/app/provider/wopi/wopi.go index 690711e14d..fbd092fcbf 100644 --- a/pkg/app/provider/wopi/wopi.go +++ b/pkg/app/provider/wopi/wopi.go @@ -76,17 +76,17 @@ func init() { } type config struct { - MimeTypes []string `mapstructure:"mime_types" docs:"nil;Inherited from the appprovider."` - IOPSecret string `mapstructure:"iop_secret" docs:";The IOP secret used to connect to the wopiserver."` - WopiURL string `mapstructure:"wopi_url" docs:";The wopiserver's URL."` - AppName string `mapstructure:"app_name" docs:";The App user-friendly name."` - AppIconURI string `mapstructure:"app_icon_uri" docs:";A URI to a static asset which represents the app icon."` - FolderBaseURL string `mapstructure:"folder_base_url" docs:";The base URL to generate links to navigate back to the containing folder."` - AppURL string `mapstructure:"app_url" docs:";The App URL."` - AppIntURL string `mapstructure:"app_int_url" docs:";The internal app URL in case of dockerized deployments. Defaults to AppURL"` - AppAPIKey string `mapstructure:"app_api_key" docs:";The API key used by the app, if applicable."` - JWTSecret string `mapstructure:"jwt_secret" docs:";The JWT secret to be used to retrieve the token TTL."` - AppDesktopOnly bool `mapstructure:"app_desktop_only" docs:"false;Specifies if the app can be opened only on desktop."` + MimeTypes []string `docs:"nil;Inherited from the appprovider." mapstructure:"mime_types"` + IOPSecret string `docs:";The IOP secret used to connect to the wopiserver." mapstructure:"iop_secret"` + WopiURL string `docs:";The wopiserver's URL." mapstructure:"wopi_url"` + AppName string `docs:";The App user-friendly name." mapstructure:"app_name"` + AppIconURI string `docs:";A URI to a static asset which represents the app icon." mapstructure:"app_icon_uri"` + FolderBaseURL string `docs:";The base URL to generate links to navigate back to the containing folder." mapstructure:"folder_base_url"` + AppURL string `docs:";The App URL." mapstructure:"app_url"` + AppIntURL string `docs:";The internal app URL in case of dockerized deployments. Defaults to AppURL" mapstructure:"app_int_url"` + AppAPIKey string `docs:";The API key used by the app, if applicable." mapstructure:"app_api_key"` + JWTSecret string `docs:";The JWT secret to be used to retrieve the token TTL." mapstructure:"jwt_secret"` + AppDesktopOnly bool `docs:"false;Specifies if the app can be opened only on desktop." mapstructure:"app_desktop_only"` InsecureConnections bool `mapstructure:"insecure_connections"` } diff --git a/pkg/auth/manager/impersonator/impersonator.go b/pkg/auth/manager/impersonator/impersonator.go deleted file mode 100644 index dbb1a58cca..0000000000 --- a/pkg/auth/manager/impersonator/impersonator.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package impersonator - -import ( - "context" - "strings" - - authpb "github.com/cs3org/go-cs3apis/cs3/auth/provider/v1beta1" - user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/auth" - "github.com/cs3org/reva/pkg/auth/manager/registry" - "github.com/cs3org/reva/pkg/auth/scope" -) - -func init() { - registry.Register("impersonator", New) -} - -type mgr struct{} - -// New returns an auth manager implementation that allows to authenticate with any credentials. -func New(ctx context.Context, c map[string]interface{}) (auth.Manager, error) { - return &mgr{}, nil -} - -func (m *mgr) Configure(ml map[string]interface{}) error { - return nil -} - -func (m *mgr) Authenticate(ctx context.Context, clientID, clientSecret string) (*user.User, map[string]*authpb.Scope, error) { - // allow passing in uid as @ - at := strings.LastIndex(clientID, "@") - uid := &user.UserId{Type: user.UserType_USER_TYPE_PRIMARY} - if at < 0 { - uid.OpaqueId = clientID - } else { - uid.OpaqueId = clientID[:at] - uid.Idp = clientID[at+1:] - } - - scope, err := scope.AddOwnerScope(nil) - if err != nil { - return nil, nil, err - } - - return &user.User{ - Id: uid, - // not much else to provide - }, scope, nil -} diff --git a/pkg/auth/manager/impersonator/impersonator_test.go b/pkg/auth/manager/impersonator/impersonator_test.go deleted file mode 100644 index db65f0c954..0000000000 --- a/pkg/auth/manager/impersonator/impersonator_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package impersonator - -import ( - "context" - "testing" -) - -func TestImpersonator(t *testing.T) { - ctx := context.Background() - i, _ := New(ctx, nil) - u, _, err := i.Authenticate(ctx, "admin", "pwd") - if err != nil { - t.Fatal(err) - } - - if u.Id.OpaqueId != "admin" { - t.Errorf("%#v, wanted %#v", u.Id.OpaqueId, "admin") - } - if u.Id.Idp != "" { - t.Errorf("%#v, wanted %#v", u.Id.Idp, "") - } - - ctx = context.Background() - u, _, err = i.Authenticate(ctx, "opaqueid@idp", "pwd") - if err != nil { - t.Fatal(err) - } - if u.Id.OpaqueId != "opaqueid" { - t.Errorf("%#v, wanted %#v", u.Id.OpaqueId, "opaqueid") - } - if u.Id.Idp != "idp" { - t.Errorf("%#v, wanted %#v", u.Id.Idp, "idp") - } -} diff --git a/pkg/auth/manager/json/json.go b/pkg/auth/manager/json/json.go index 36c6d41641..26b4bec965 100644 --- a/pkg/auth/manager/json/json.go +++ b/pkg/auth/manager/json/json.go @@ -40,16 +40,16 @@ func init() { // Credentials holds a pair of secret and userid. type Credentials struct { - ID *user.UserId `mapstructure:"id" json:"id"` - Username string `mapstructure:"username" json:"username"` - Mail string `mapstructure:"mail" json:"mail"` - MailVerified bool `mapstructure:"mail_verified" json:"mail_verified"` - DisplayName string `mapstructure:"display_name" json:"display_name"` - Secret string `mapstructure:"secret" json:"secret"` - Groups []string `mapstructure:"groups" json:"groups"` - UIDNumber int64 `mapstructure:"uid_number" json:"uid_number"` - GIDNumber int64 `mapstructure:"gid_number" json:"gid_number"` - Opaque *typespb.Opaque `mapstructure:"opaque" json:"opaque"` + ID *user.UserId `json:"id" mapstructure:"id"` + Username string `json:"username" mapstructure:"username"` + Mail string `json:"mail" mapstructure:"mail"` + MailVerified bool `json:"mail_verified" mapstructure:"mail_verified"` + DisplayName string `json:"display_name" mapstructure:"display_name"` + Secret string `json:"secret" mapstructure:"secret"` + Groups []string `json:"groups" mapstructure:"groups"` + UIDNumber int64 `json:"uid_number" mapstructure:"uid_number"` + GIDNumber int64 `json:"gid_number" mapstructure:"gid_number"` + Opaque *typespb.Opaque `json:"opaque" mapstructure:"opaque"` } type manager struct { diff --git a/pkg/auth/manager/ldap/ldap.go b/pkg/auth/manager/ldap/ldap.go index 28abc4ba62..30f5331962 100644 --- a/pkg/auth/manager/ldap/ldap.go +++ b/pkg/auth/manager/ldap/ldap.go @@ -142,6 +142,7 @@ func (am *mgr) Authenticate(ctx context.Context, clientID, clientSecret string) return nil, nil, err } + log.Trace().Interface("entries", sr.Entries).Send() if len(sr.Entries) != 1 { return nil, nil, errtypes.NotFound(clientID) } diff --git a/pkg/auth/manager/loader/loader.go b/pkg/auth/manager/loader/loader.go index 06f7287e0d..8ff80bd188 100644 --- a/pkg/auth/manager/loader/loader.go +++ b/pkg/auth/manager/loader/loader.go @@ -22,14 +22,12 @@ import ( // Load core authentication managers. _ "github.com/cs3org/reva/pkg/auth/manager/appauth" _ "github.com/cs3org/reva/pkg/auth/manager/demo" - _ "github.com/cs3org/reva/pkg/auth/manager/impersonator" _ "github.com/cs3org/reva/pkg/auth/manager/json" _ "github.com/cs3org/reva/pkg/auth/manager/ldap" _ "github.com/cs3org/reva/pkg/auth/manager/machine" _ "github.com/cs3org/reva/pkg/auth/manager/nextcloud" _ "github.com/cs3org/reva/pkg/auth/manager/ocmshares" _ "github.com/cs3org/reva/pkg/auth/manager/oidc" - _ "github.com/cs3org/reva/pkg/auth/manager/owncloudsql" _ "github.com/cs3org/reva/pkg/auth/manager/publicshares" // Add your own here. ) diff --git a/pkg/auth/manager/nextcloud/nextcloud.go b/pkg/auth/manager/nextcloud/nextcloud.go index da5f875433..1bbc00bc85 100644 --- a/pkg/auth/manager/nextcloud/nextcloud.go +++ b/pkg/auth/manager/nextcloud/nextcloud.go @@ -52,7 +52,7 @@ type Manager struct { // AuthManagerConfig contains config for a Nextcloud-based AuthManager. type AuthManagerConfig struct { - EndPoint string `mapstructure:"endpoint" docs:";The Nextcloud backend endpoint for user check"` + EndPoint string `docs:";The Nextcloud backend endpoint for user check" mapstructure:"endpoint"` SharedSecret string `mapstructure:"shared_secret"` MockHTTP bool `mapstructure:"mock_http"` } diff --git a/pkg/auth/manager/oidc/oidc.go b/pkg/auth/manager/oidc/oidc.go index 4df459d9af..5b0afeff0b 100644 --- a/pkg/auth/manager/oidc/oidc.go +++ b/pkg/auth/manager/oidc/oidc.go @@ -60,20 +60,20 @@ type mgr struct { } type config struct { - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` - Issuer string `mapstructure:"issuer" docs:";The issuer of the OIDC token."` - IDClaim string `mapstructure:"id_claim" docs:"sub;The claim containing the ID of the user."` - UIDClaim string `mapstructure:"uid_claim" docs:";The claim containing the UID of the user."` - GIDClaim string `mapstructure:"gid_claim" docs:";The claim containing the GID of the user."` - GatewaySvc string `mapstructure:"gatewaysvc" docs:";The endpoint at which the GRPC gateway is exposed."` - UsersMapping string `mapstructure:"users_mapping" docs:"; The optional OIDC users mapping file path"` - GroupClaim string `mapstructure:"group_claim" docs:"; The group claim to be looked up to map the user (default to 'groups')."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` + Issuer string `docs:";The issuer of the OIDC token." mapstructure:"issuer"` + IDClaim string `docs:"sub;The claim containing the ID of the user." mapstructure:"id_claim"` + UIDClaim string `docs:";The claim containing the UID of the user." mapstructure:"uid_claim"` + GIDClaim string `docs:";The claim containing the GID of the user." mapstructure:"gid_claim"` + GatewaySvc string `docs:";The endpoint at which the GRPC gateway is exposed." mapstructure:"gatewaysvc"` + UsersMapping string `docs:"; The optional OIDC users mapping file path" mapstructure:"users_mapping"` + GroupClaim string `docs:"; The group claim to be looked up to map the user (default to 'groups')." mapstructure:"group_claim"` } type oidcUserMapping struct { - OIDCIssuer string `mapstructure:"oidc_issuer" json:"oidc_issuer"` - OIDCGroup string `mapstructure:"oidc_group" json:"oidc_group"` - Username string `mapstructure:"username" json:"username"` + OIDCIssuer string `json:"oidc_issuer" mapstructure:"oidc_issuer"` + OIDCGroup string `json:"oidc_group" mapstructure:"oidc_group"` + Username string `json:"username" mapstructure:"username"` } func (c *config) ApplyDefaults() { diff --git a/pkg/auth/manager/owncloudsql/accounts/accounts.go b/pkg/auth/manager/owncloudsql/accounts/accounts.go deleted file mode 100644 index dd5e8bee99..0000000000 --- a/pkg/auth/manager/owncloudsql/accounts/accounts.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts - -import ( - "context" - "database/sql" - "strings" - "time" - - "github.com/cs3org/reva/pkg/appctx" - "github.com/pkg/errors" -) - -// Accounts represents oc10-style Accounts. -type Accounts struct { - driver string - db *sql.DB - joinUsername, joinUUID, enableMedialSearch bool - selectSQL string -} - -// NewMysql returns a new accounts instance connecting to a MySQL database. -func NewMysql(dsn string, joinUsername, joinUUID, enableMedialSearch bool) (*Accounts, error) { - sqldb, err := sql.Open("mysql", dsn) - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - sqldb.SetConnMaxLifetime(time.Minute * 3) - sqldb.SetMaxOpenConns(10) - sqldb.SetMaxIdleConns(10) - - err = sqldb.Ping() - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - - return New("mysql", sqldb, joinUsername, joinUUID, enableMedialSearch) -} - -// New returns a new accounts instance connecting to the given sql.DB. -func New(driver string, sqldb *sql.DB, joinUsername, joinUUID, enableMedialSearch bool) (*Accounts, error) { - sel := "SELECT id, email, user_id, display_name, quota, last_login, backend, home, state, password" - from := ` - FROM oc_accounts a - LEFT JOIN oc_users u - ON a.user_id=u.uid - ` - if joinUsername { - sel += ", p.configvalue AS username" - from += `LEFT JOIN oc_preferences p - ON a.user_id=p.userid - AND p.appid='core' - AND p.configkey='username'` - } else { - // fallback to user_id as username - sel += ", user_id AS username" - } - if joinUUID { - sel += ", p2.configvalue AS ownclouduuid" - from += `LEFT JOIN oc_preferences p2 - ON a.user_id=p2.userid - AND p2.appid='core' - AND p2.configkey='ownclouduuid'` - } else { - // fallback to user_id as ownclouduuid - sel += ", user_id AS ownclouduuid" - } - - return &Accounts{ - driver: driver, - db: sqldb, - joinUsername: joinUsername, - joinUUID: joinUUID, - enableMedialSearch: enableMedialSearch, - selectSQL: sel + from, - }, nil -} - -// Account stores information about accounts. -type Account struct { - ID uint64 - Email sql.NullString - UserID string - DisplayName sql.NullString - Quota sql.NullString - LastLogin int - Backend string - Home string - State int8 - PasswordHash string // from oc_users - Username sql.NullString // optional comes from the oc_preferences - OwnCloudUUID sql.NullString // optional comes from the oc_preferences -} - -func (as *Accounts) rowToAccount(ctx context.Context, row Scannable) (*Account, error) { - a := Account{} - if err := row.Scan(&a.ID, &a.Email, &a.UserID, &a.DisplayName, &a.Quota, &a.LastLogin, &a.Backend, &a.Home, &a.State, &a.PasswordHash, &a.Username, &a.OwnCloudUUID); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("could not scan row, skipping") - return nil, err - } - - return &a, nil -} - -// Scannable describes the interface providing a Scan method. -type Scannable interface { - Scan(...interface{}) error -} - -// GetAccountByLogin fetches an account by mail or username. -func (as *Accounts) GetAccountByLogin(ctx context.Context, login string) (*Account, error) { - var row *sql.Row - username := strings.ToLower(login) // usernames are lowercased in owncloud classic - if as.joinUsername { - row = as.db.QueryRowContext(ctx, as.selectSQL+" WHERE a.email=? OR a.lower_user_id=? OR p.configvalue=?", login, username, login) - } else { - row = as.db.QueryRowContext(ctx, as.selectSQL+" WHERE a.email=? OR a.lower_user_id=?", login, username) - } - - return as.rowToAccount(ctx, row) -} - -// GetAccountGroups reads the groups for an account. -func (as *Accounts) GetAccountGroups(ctx context.Context, uid string) ([]string, error) { - rows, err := as.db.QueryContext(ctx, "SELECT gid FROM oc_group_user WHERE uid=?", uid) - if err != nil { - return nil, err - } - defer rows.Close() - - var group string - groups := []string{} - for rows.Next() { - if err := rows.Scan(&group); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("could not scan row, skipping") - continue - } - groups = append(groups, group) - } - if err = rows.Err(); err != nil { - return nil, err - } - return groups, nil -} diff --git a/pkg/auth/manager/owncloudsql/accounts/accounts_suite_test.go b/pkg/auth/manager/owncloudsql/accounts/accounts_suite_test.go deleted file mode 100644 index 4f15e148d5..0000000000 --- a/pkg/auth/manager/owncloudsql/accounts/accounts_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestAccounts(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Accounts Suite") -} diff --git a/pkg/auth/manager/owncloudsql/accounts/accounts_test.go b/pkg/auth/manager/owncloudsql/accounts/accounts_test.go deleted file mode 100644 index 322e59a00b..0000000000 --- a/pkg/auth/manager/owncloudsql/accounts/accounts_test.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts_test - -import ( - "context" - "database/sql" - "os" - - "github.com/cs3org/reva/pkg/auth/manager/owncloudsql/accounts" - _ "github.com/mattn/go-sqlite3" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Accounts", func() { - var ( - conn *accounts.Accounts - testDBFile *os.File - sqldb *sql.DB - ) - - BeforeEach(func() { - var err error - testDBFile, err = os.CreateTemp("", "example") - Expect(err).ToNot(HaveOccurred()) - - dbData, err := os.ReadFile("test.sqlite") - Expect(err).ToNot(HaveOccurred()) - - _, err = testDBFile.Write(dbData) - Expect(err).ToNot(HaveOccurred()) - err = testDBFile.Close() - Expect(err).ToNot(HaveOccurred()) - - sqldb, err = sql.Open("sqlite3", testDBFile.Name()) - Expect(err).ToNot(HaveOccurred()) - - }) - - AfterEach(func() { - os.Remove(testDBFile.Name()) - }) - - Describe("GetAccountByLogin", func() { - - Context("without any joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, false, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("gets existing account by username", func() { - value := "admin" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - }) - - Context("with username joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("gets existing account by username", func() { - value := "admin" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - }) - - Context("with uuid joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, false, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by username", func() { - value := "admin" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - }) - - Context("with username and uuid joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by username", func() { - value := "Administrator" - account, err := conn.GetAccountByLogin(context.Background(), value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - }) - - }) - - Describe("GetAccountGroups", func() { - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - It("get admin group for admin account", func() { - accounts, err := conn.GetAccountGroups(context.Background(), "admin") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(1)) - Expect(accounts[0]).To(Equal("admin")) - }) - It("handles not existing account", func() { - accounts, err := conn.GetAccountGroups(context.Background(), "__notexisting__") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(0)) - }) - }) -}) diff --git a/pkg/auth/manager/owncloudsql/accounts/test.sqlite b/pkg/auth/manager/owncloudsql/accounts/test.sqlite deleted file mode 100644 index c68bb753774fc28eda95f71768f5d41d29830363..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 90112 zcmeI*T~8a?8Nl%wFc_S`u)86nYK1ZisfZN_HUtP+Z8t!i4O+rW78;0FGJ`!hPHc}e zV=pi54QZwQ2z$|s_F~n3fJ&8C>X&G*Dpl%brD|2FdzIUsIm6gvjDc!ZwOIcVG4pcH z%yWKo=FFTiGx>aNDPOUSUE3*|6=PBvP*hcUXc&s3OpAZl#lK+qjTngp6XLfT8gDf^ zt=vsK=-jK5vNqxD8 zBGxQIh`ldJ;_A^za|mX(m@lRMO#ZmYB4k5zJ~$(eQ?s9lMW(3-)B9dj|7`3;FvnFR zU9p^^D?e)>4U67CHI{}Xq1?4V8WMP+c-s*$9}YzI=OUI*!Y~bqgG!qY?V^SR?_D6G zAVaW@)^XKSis)ARqx#U0`qr(OI|WPZ6tBLx!Efz&!|QYNODSVJn7wV>jv0pB@y3^? zlX+=6x2NyLjpdbfV|im~$#|4{Jh!p5Zj6k?Mn%Ho+Ugw-+YK3lbY3QzvTVBUp6z7o z$tI?zTCRBY+2Yf=XIsW+sjZ+M#^U2zx4tl>Dw5lMQxMk^X|r0f<+RsC$P%WL!Pvmt zTUVocJg)w-GcYJS)~@APrHtiCgr@qMJVFZt6#*!Yh=y{uf-%eGaMH<%mPOsp*rnZk z?zMGru{Hb2zcdR~L0+&+xr()4sqL4*`vjmL`RKME)@W#(@mTz$zHWUbeuis^Lh6R5 z`wwGL{l*RTXdxh$$G)fQk6dYj_2(a(=E6#6)U?L(c)3L2$t&G@>c$y?`>Izh~GqWAQ;<+A@U_6r|Q&4nEG+u&Gywzpe_yuH&{WZAOT$}Fd%Eeb6c{trt1QS_XeDfenxh8HTfobLI>&pM;}@UZ&T$3eM| z+efc*2BVRt5(i6;i|f_Te5qpPEXSxfgW)|jx3RvmxGdK5G_|}g+dBVZqSj8xhOVhW zl&9}-?Qdug<$4PCo;bx{AYsEr!#Uf1Q?)B*y{Yk9rl!~mrdvrD>|DN7FMLx=myj{; zn3>mBsYRKWB6L2H3B(zyBHFS*`ScBKEHAEYq`Z@XciAJ4e{lk;&26L- zZNpSox4t-h#sL%T=k!FVI&@D(E}G3d%>?V{5*N@t-bM6kN4I|Gj=Cjl-0>;Owcb?a z8OJ|k1(Tf(HSa{)+uj*J zFd%>c0tg_000IagfB*srAb>!d2=uF+!@c!CKXCp2|2K;EO`8;uVjzG30tg_000Iag zfB*srAaF?qUZ_`;ab4Gc6d9Z8l@HN8wD!$nxnSM3ot&Ie|41eh#;5b!M<1`OEPSyc z9?JOQktrT~FkNfBSgMR?#l-k-zF@iIGEYpHsMi(Ss`v2{c|9_kAT#AJN0tg_000IagfB*srAb`NJfdBr#?-lKPNDx2(0R#|0 z009ILKmY**5I~@91p3rYU9Z0`(0~4){(sx-UkZW%0tg_000IagfB*srAb@};&>zu{ zU;QU<0{Gwm^ZtJ-2LcEnfB*srAb#A$@}K)y_w16J+Z-|@J1WPhdR3S_@E|NzMFT%Caa~r z{GsV&-Iw-WVpwequ#zuYhjz(IRUNx*jn7+7Az$*o|Bq;2E85rEKeRt-zi-$869^!H z00IagfB*srAbGt{eM+^C!YUrorWa{Abhash($message); - switch len(hash) { - case 60: // legacy PHPass hash - return nil == bcrypt.CompareHashAndPassword([]byte(hash), []byte(password+m.c.LegacySalt)) - case 40: // legacy sha1 hash - h := sha1.Sum([]byte(password)) - return hmac.Equal([]byte(hash), []byte(hex.EncodeToString(h[:]))) - } - return false -} -func (m *manager) verifyHashV1(password, hash string) bool { - // TODO implement password_needs_rehash - return nil == bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) -} diff --git a/pkg/auth/manager/owncloudsql/owncloudsql_test.go b/pkg/auth/manager/owncloudsql/owncloudsql_test.go deleted file mode 100644 index 33acd8e3c8..0000000000 --- a/pkg/auth/manager/owncloudsql/owncloudsql_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloudsql - -import ( - "testing" - - "github.com/cs3org/reva/pkg/auth/manager/owncloudsql/accounts" - "github.com/pkg/errors" -) - -// new returns a dummy auth manager for testing. -func new(m map[string]interface{}) (*manager, error) { - mgr := &manager{} - err := mgr.Configure(m) - if err != nil { - err = errors.Wrap(err, "error creating a new auth manager") - return nil, err - } - - mgr.db, err = accounts.New("unused", nil, false, false, false) - if err != nil { - return nil, err - } - - return mgr, nil -} - -func TestVerify(t *testing.T) { - tests := map[string]struct { - password string - hash string - expected bool - }{ - // Bogus values - "bogus-1": {"", "asf32äà$$a.|3", false}, - "bogus-2": {"", "", false}, - - // Valid SHA1 strings - "valid-sha1-1": {"password", "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", true}, - "valid-sha1-2": {"owncloud.com", "27a4643e43046c3569e33b68c1a4b15d31306d29", true}, - - // Invalid SHA1 strings - "invalid-sha1-1": {"InvalidString", "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", false}, - "invalid-sha1-2": {"AnotherInvalidOne", "27a4643e43046c3569e33b68c1a4b15d31306d29", false}, - - // Valid legacy password string with password salt "6Wow67q1wZQZpUUeI6G2LsWUu4XKx" - "valid-legacy-1": {"password", "$2a$08$emCpDEl.V.QwPWt5gPrqrOhdpH6ailBmkj2Hd2vD5U8qIy20HBe7.", true}, - "valid-legacy-2": {"password", "$2a$08$yjaLO4ev70SaOsWZ9gRS3eRSEpHVsmSWTdTms1949mylxJ279hzo2", true}, - "valid-legacy-3": {"password", "$2a$08$.jNRG/oB4r7gHJhAyb.mDupNUAqTnBIW/tWBqFobaYflKXiFeG0A6", true}, - "valid-legacy-4": {"owncloud.com", "$2a$08$YbEsyASX/hXVNMv8hXQo7ezreN17T8Jl6PjecGZvpX.Ayz2aUyaZ2", true}, - "valid-legacy-5": {"owncloud.com", "$2a$11$cHdDA2IkUP28oNGBwlL7jO/U3dpr8/0LIjTZmE8dMPA7OCUQsSTqS", true}, - "valid-legacy-6": {"owncloud.com", "$2a$08$GH.UoIfJ1e.qeZ85KPqzQe6NR8XWRgJXWIUeE1o/j1xndvyTA1x96", true}, - - // Invalid legacy passwords - "invalid-legacy": {"password", "$2a$08$oKAQY5IhnZocP.61MwP7xu7TNeOb7Ostvk3j6UpacvaNMs.xRj7O2", false}, - - // Valid passwords "6Wow67q1wZQZpUUeI6G2LsWUu4XKx" - "valid-1": {"password", "1|$2a$05$ezAE0dkwk57jlfo6z5Pql.gcIK3ReXT15W7ITNxVS0ksfhO/4E4Kq", true}, - "valid-2": {"password", "1|$2a$05$4OQmloFW4yTVez2MEWGIleDO9Z5G9tWBXxn1vddogmKBQq/Mq93pe", true}, - "valid-3": {"password", "1|$2a$11$yj0hlp6qR32G9exGEXktB.yW2rgt2maRBbPgi3EyxcDwKrD14x/WO", true}, - "valid-4": {"owncloud.com", "1|$2a$10$Yiss2WVOqGakxuuqySv5UeOKpF8d8KmNjuAPcBMiRJGizJXjA2bKm", true}, - "valid-5": {"owncloud.com", "1|$2a$10$v9mh8/.mF/Ut9jZ7pRnpkuac3bdFCnc4W/gSumheQUi02Sr.xMjPi", true}, - "valid-6": {"owncloud.com", "1|$2a$05$ST5E.rplNRfDCzRpzq69leRzsTGtY7k88h9Vy2eWj0Ug/iA9w5kGK", true}, - - // Invalid passwords - "invalid-1": {"password", "0|$2a$08$oKAQY5IhnZocP.61MwP7xu7TNeOb7Ostvk3j6UpacvaNMs.xRj7O2", false}, - "invalid-2": {"password", "1|$2a$08$oKAQY5IhnZocP.61MwP7xu7TNeOb7Ostvk3j6UpacvaNMs.xRj7O2", false}, - "invalid-3": {"password", "2|$2a$08$oKAQY5IhnZocP.61MwP7xu7TNeOb7Ostvk3j6UpacvaNMs.xRj7O2", false}, - } - - u, err := new(map[string]interface{}{ - "legacy_salt": "6Wow67q1wZQZpUUeI6G2LsWUu4XKx", - }) - if err != nil { - t.Fatalf("could not initialize owncloudsql auth manager: %v", err) - } - - for name := range tests { - var tc = tests[name] - t.Run(name, func(t *testing.T) { - actual := u.verify(tc.password, tc.hash) - if actual != tc.expected { - t.Fatalf("%v returned wrong verification:\n\tAct: %v\n\tExp: %v", t.Name(), actual, tc.expected) - } - }) - } -} diff --git a/pkg/auth/scope/resourceinfo.go b/pkg/auth/scope/resourceinfo.go index 8e908f9b4e..62afbac86e 100644 --- a/pkg/auth/scope/resourceinfo.go +++ b/pkg/auth/scope/resourceinfo.go @@ -112,6 +112,7 @@ func checkResourcePath(path string) bool { "/ocs/v2.php/cloud/capabilities", "/ocs/v1.php/cloud/capabilities", "/ocs/v1.php/cloud/user", + "/thumbnails", } for _, p := range paths { if strings.HasPrefix(path, p) { diff --git a/pkg/cbox/group/rest/cache.go b/pkg/cbox/group/rest/cache.go deleted file mode 100644 index a9fb561678..0000000000 --- a/pkg/cbox/group/rest/cache.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" - - grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/gomodule/redigo/redis" -) - -const ( - groupPrefix = "group:" - idPrefix = "id:" - namePrefix = "name:" - gidPrefix = "gid:" - groupMembersPrefix = "members:" - groupInternalIDPrefix = "internal:" -) - -func initRedisPool(address, username, password string) *redis.Pool { - return &redis.Pool{ - - MaxIdle: 50, - MaxActive: 1000, - IdleTimeout: 240 * time.Second, - - Dial: func() (redis.Conn, error) { - var c redis.Conn - var err error - switch { - case username != "": - c, err = redis.Dial("tcp", address, - redis.DialUsername(username), - redis.DialPassword(password), - ) - case password != "": - c, err = redis.Dial("tcp", address, - redis.DialPassword(password), - ) - default: - c, err = redis.Dial("tcp", address) - } - - if err != nil { - return nil, err - } - return c, err - }, - - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } -} - -func (m *manager) setVal(key, val string, expiration int) error { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - args := []interface{}{key, val} - if expiration != -1 { - args = append(args, "EX", expiration) - } - if _, err := conn.Do("SET", args...); err != nil { - return err - } - return nil - } - return errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) getVal(key string) (string, error) { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - val, err := redis.String(conn.Do("GET", key)) - if err != nil { - return "", err - } - return val, nil - } - return "", errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) findCachedGroups(query string) ([]*grouppb.Group, error) { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - query = fmt.Sprintf("%s*%s*", groupPrefix, strings.ReplaceAll(strings.ToLower(query), " ", "_")) - keys, err := redis.Strings(conn.Do("KEYS", query)) - if err != nil { - return nil, err - } - var args []interface{} - for _, k := range keys { - args = append(args, k) - } - - // Fetch the groups for all these keys - groupStrings, err := redis.Strings(conn.Do("MGET", args...)) - if err != nil { - return nil, err - } - groupMap := make(map[string]*grouppb.Group) - for _, group := range groupStrings { - g := grouppb.Group{} - if err = json.Unmarshal([]byte(group), &g); err == nil { - groupMap[g.Id.OpaqueId] = &g - } - } - - var groups []*grouppb.Group - for _, g := range groupMap { - groups = append(groups, g) - } - - return groups, nil - } - - return nil, errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) fetchCachedGroupDetails(gid *grouppb.GroupId) (*grouppb.Group, error) { - group, err := m.getVal(groupPrefix + idPrefix + gid.OpaqueId) - if err != nil { - return nil, err - } - - g := grouppb.Group{} - if err = json.Unmarshal([]byte(group), &g); err != nil { - return nil, err - } - return &g, nil -} - -func (m *manager) cacheGroupDetails(g *grouppb.Group) error { - expiration := (m.conf.GroupFetchInterval + 1) * 3600 - encodedGroup, err := json.Marshal(&g) - if err != nil { - return err - } - if err = m.setVal(groupPrefix+idPrefix+strings.ToLower(g.Id.OpaqueId), string(encodedGroup), expiration); err != nil { - return err - } - - if g.GidNumber != 0 { - if err = m.setVal(groupPrefix+gidPrefix+strconv.FormatInt(g.GidNumber, 10), g.Id.OpaqueId, expiration); err != nil { - return err - } - } - if g.DisplayName != "" { - if err = m.setVal(groupPrefix+namePrefix+g.Id.OpaqueId+"_"+strings.ToLower(g.DisplayName), g.Id.OpaqueId, expiration); err != nil { - return err - } - } - return nil -} - -func (m *manager) fetchCachedGroupByParam(field, claim string) (*grouppb.Group, error) { - group, err := m.getVal(groupPrefix + field + ":" + strings.ToLower(claim)) - if err != nil { - return nil, err - } - - g := grouppb.Group{} - if err = json.Unmarshal([]byte(group), &g); err != nil { - return nil, err - } - return &g, nil -} - -func (m *manager) fetchCachedGroupMembers(gid *grouppb.GroupId) ([]*userpb.UserId, error) { - members, err := m.getVal(groupPrefix + groupMembersPrefix + strings.ToLower(gid.OpaqueId)) - if err != nil { - return nil, err - } - u := []*userpb.UserId{} - if err = json.Unmarshal([]byte(members), &u); err != nil { - return nil, err - } - return u, nil -} - -func (m *manager) cacheGroupMembers(gid *grouppb.GroupId, members []*userpb.UserId) error { - u, err := json.Marshal(&members) - if err != nil { - return err - } - return m.setVal(groupPrefix+groupMembersPrefix+strings.ToLower(gid.OpaqueId), string(u), m.conf.GroupMembersCacheExpiration*60) -} diff --git a/pkg/cbox/group/rest/rest.go b/pkg/cbox/group/rest/rest.go deleted file mode 100644 index 43acf7a2a3..0000000000 --- a/pkg/cbox/group/rest/rest.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package rest - -import ( - "context" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "time" - - grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - user "github.com/cs3org/reva/pkg/cbox/user/rest" - utils "github.com/cs3org/reva/pkg/cbox/utils" - "github.com/cs3org/reva/pkg/group" - "github.com/cs3org/reva/pkg/group/manager/registry" - "github.com/cs3org/reva/pkg/utils/cfg" - "github.com/cs3org/reva/pkg/utils/list" - "github.com/gomodule/redigo/redis" - "github.com/rs/zerolog/log" -) - -func init() { - registry.Register("rest", New) -} - -type manager struct { - conf *config - redisPool *redis.Pool - apiTokenManager *utils.APITokenManager -} - -type config struct { - // The address at which the redis server is running - RedisAddress string `mapstructure:"redis_address" docs:"localhost:6379"` - // The username for connecting to the redis server - RedisUsername string `mapstructure:"redis_username" docs:""` - // The password for connecting to the redis server - RedisPassword string `mapstructure:"redis_password" docs:""` - // The time in minutes for which the members of a group would be cached - GroupMembersCacheExpiration int `mapstructure:"group_members_cache_expiration" docs:"5"` - // The OIDC Provider - IDProvider string `mapstructure:"id_provider" docs:"http://cernbox.cern.ch"` - // Base API Endpoint - APIBaseURL string `mapstructure:"api_base_url" docs:"https://authorization-service-api-dev.web.cern.ch"` - // Client ID needed to authenticate - ClientID string `mapstructure:"client_id" docs:"-"` - // Client Secret - ClientSecret string `mapstructure:"client_secret" docs:"-"` - - // Endpoint to generate token to access the API - OIDCTokenEndpoint string `mapstructure:"oidc_token_endpoint" docs:"https://keycloak-dev.cern.ch/auth/realms/cern/api-access/token"` - // The target application for which token needs to be generated - TargetAPI string `mapstructure:"target_api" docs:"authorization-service-api"` - // The time in seconds between bulk fetch of groups - GroupFetchInterval int `mapstructure:"group_fetch_interval" docs:"3600"` -} - -func (c *config) ApplyDefaults() { - if c.GroupMembersCacheExpiration == 0 { - c.GroupMembersCacheExpiration = 5 - } - if c.RedisAddress == "" { - c.RedisAddress = ":6379" - } - if c.APIBaseURL == "" { - c.APIBaseURL = "https://authorization-service-api-dev.web.cern.ch" - } - if c.TargetAPI == "" { - c.TargetAPI = "authorization-service-api" - } - if c.OIDCTokenEndpoint == "" { - c.OIDCTokenEndpoint = "https://keycloak-dev.cern.ch/auth/realms/cern/api-access/token" - } - if c.IDProvider == "" { - c.IDProvider = "http://cernbox.cern.ch" - } - if c.GroupFetchInterval == 0 { - c.GroupFetchInterval = 3600 - } -} - -// New returns a user manager implementation that makes calls to the GRAPPA API. -func New(ctx context.Context, m map[string]interface{}) (group.Manager, error) { - var c config - if err := cfg.Decode(m, &c); err != nil { - return nil, err - } - - redisPool := initRedisPool(c.RedisAddress, c.RedisUsername, c.RedisPassword) - apiTokenManager, err := utils.InitAPITokenManager(m) - if err != nil { - return nil, err - } - - mgr := &manager{ - conf: &c, - redisPool: redisPool, - apiTokenManager: apiTokenManager, - } - go mgr.fetchAllGroups(context.Background()) - return mgr, nil -} - -func (m *manager) fetchAllGroups(ctx context.Context) { - _ = m.fetchAllGroupAccounts(ctx) - ticker := time.NewTicker(time.Duration(m.conf.GroupFetchInterval) * time.Second) - work := make(chan os.Signal, 1) - signal.Notify(work, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT) - - for { - select { - case <-work: - return - case <-ticker.C: - _ = m.fetchAllGroupAccounts(ctx) - } - } -} - -// Group contains the information about a group. -type Group struct { - GroupIdentifier string `json:"groupIdentifier"` - DisplayName string `json:"displayName"` - GID int `json:"gid,omitempty"` - IsComputingGroup bool `json:"isComputingGroup"` -} - -// GroupsResponse contains the expected response from grappa -// when getting the list of groups. -type GroupsResponse struct { - Pagination struct { - Links struct { - Next *string `json:"next"` - } `json:"links"` - } `json:"pagination"` - Data []*Group `json:"data"` -} - -func (m *manager) fetchAllGroupAccounts(ctx context.Context) error { - url := fmt.Sprintf("%s/api/v1.0/Group?field=groupIdentifier&field=displayName&field=gid&field=isComputingGroup", m.conf.APIBaseURL) - - for { - var r GroupsResponse - if err := m.apiTokenManager.SendAPIGetRequest(ctx, url, false, &r); err != nil { - return err - } - - for _, g := range r.Data { - if g.IsComputingGroup { - continue - } - if _, err := m.parseAndCacheGroup(ctx, g); err != nil { - continue - } - } - - if r.Pagination.Links.Next == nil { - break - } - url = fmt.Sprintf("%s%s", m.conf.APIBaseURL, *r.Pagination.Links.Next) - } - - return nil -} - -func (m *manager) parseAndCacheGroup(ctx context.Context, g *Group) (*grouppb.Group, error) { - groupID := &grouppb.GroupId{ - Idp: m.conf.IDProvider, - OpaqueId: g.GroupIdentifier, - } - - group := &grouppb.Group{ - Id: groupID, - GroupName: g.GroupIdentifier, - Mail: g.GroupIdentifier + "@cern.ch", - DisplayName: g.DisplayName, - GidNumber: int64(g.GID), - } - - if err := m.cacheGroupDetails(group); err != nil { - log.Error().Err(err).Msg("rest: error caching group details") - } - - return group, nil -} - -func (m *manager) GetGroup(ctx context.Context, gid *grouppb.GroupId, skipFetchingMembers bool) (*grouppb.Group, error) { - g, err := m.fetchCachedGroupDetails(gid) - if err != nil { - return nil, err - } - - if !skipFetchingMembers { - groupMembers, err := m.GetMembers(ctx, gid) - if err != nil { - return nil, err - } - g.Members = groupMembers - } - - return g, nil -} - -func (m *manager) GetGroupByClaim(ctx context.Context, claim, value string, skipFetchingMembers bool) (*grouppb.Group, error) { - if claim == "group_name" { - return m.GetGroup(ctx, &grouppb.GroupId{OpaqueId: value}, skipFetchingMembers) - } - - g, err := m.fetchCachedGroupByParam(claim, value) - if err != nil { - return nil, err - } - - if !skipFetchingMembers { - groupMembers, err := m.GetMembers(ctx, g.Id) - if err != nil { - return nil, err - } - g.Members = groupMembers - } - - return g, nil -} - -func (m *manager) FindGroups(ctx context.Context, query string, skipFetchingMembers bool) ([]*grouppb.Group, error) { - // Look at namespaces filters. If the query starts with: - // "a" or none => get egroups - // other filters => get empty list - - parts := strings.SplitN(query, ":", 2) - - if len(parts) == 2 { - if parts[0] == "a" { - query = parts[1] - } else { - return []*grouppb.Group{}, nil - } - } - - return m.findCachedGroups(query) -} - -func (m *manager) GetMembers(ctx context.Context, gid *grouppb.GroupId) ([]*userpb.UserId, error) { - users, err := m.fetchCachedGroupMembers(gid) - if err == nil { - return users, nil - } - - url := fmt.Sprintf("%s/api/v1.0/Group/%s/memberidentities/precomputed?limit=10&field=upn&field=primaryAccountEmail&field=displayName&field=uid&field=gid&field=type&field=source", m.conf.APIBaseURL, gid.OpaqueId) - - var r user.IdentitiesResponse - members := []*userpb.UserId{} - for { - if err := m.apiTokenManager.SendAPIGetRequest(ctx, url, false, &r); err != nil { - return nil, err - } - - users := list.Map(r.Data, func(i *user.Identity) *userpb.UserId { - return &userpb.UserId{OpaqueId: i.Upn, Idp: m.conf.IDProvider, Type: i.UserType()} - }) - members = append(members, users...) - - if r.Pagination.Links.Next == nil { - break - } - url = fmt.Sprintf("%s%s", m.conf.APIBaseURL, *r.Pagination.Links.Next) - } - - if err = m.cacheGroupMembers(gid, members); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("rest: error caching group members") - } - - return users, nil -} - -func (m *manager) HasMember(ctx context.Context, gid *grouppb.GroupId, uid *userpb.UserId) (bool, error) { - // TODO (gdelmont): this can be improved storing the users a group is composed of as a list in redis - // and, instead of returning all the members, use the redis apis to check if the user is in the list. - groupMemers, err := m.GetMembers(ctx, gid) - if err != nil { - return false, err - } - - for _, u := range groupMemers { - if uid.OpaqueId == u.OpaqueId { - return true, nil - } - } - return false, nil -} diff --git a/pkg/cbox/loader/loader.go b/pkg/cbox/loader/loader.go deleted file mode 100644 index efeeb83db2..0000000000 --- a/pkg/cbox/loader/loader.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package loader - -import ( - // Load cbox specific drivers. - _ "github.com/cs3org/reva/pkg/cbox/favorite/sql" - _ "github.com/cs3org/reva/pkg/cbox/group/rest" - _ "github.com/cs3org/reva/pkg/cbox/preferences/sql" - _ "github.com/cs3org/reva/pkg/cbox/publicshare/sql" - _ "github.com/cs3org/reva/pkg/cbox/share/sql" - _ "github.com/cs3org/reva/pkg/cbox/storage/eoshomewrapper" - _ "github.com/cs3org/reva/pkg/cbox/storage/eoswrapper" - _ "github.com/cs3org/reva/pkg/cbox/user/rest" -) diff --git a/pkg/cbox/share/sql/sql.go b/pkg/cbox/share/sql/sql.go deleted file mode 100644 index 0133ed8ddb..0000000000 --- a/pkg/cbox/share/sql/sql.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sql - -import ( - "context" - "database/sql" - "fmt" - "path" - "strconv" - "strings" - "time" - - gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - conversions "github.com/cs3org/reva/pkg/cbox/utils" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/cs3org/reva/pkg/share" - "github.com/cs3org/reva/pkg/share/manager/registry" - "github.com/cs3org/reva/pkg/sharedconf" - "github.com/cs3org/reva/pkg/utils" - "github.com/cs3org/reva/pkg/utils/cfg" - - // Provides mysql drivers. - _ "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" - "google.golang.org/genproto/protobuf/field_mask" -) - -const ( - shareTypeUser = 0 - shareTypeGroup = 1 - - projectInstancesPrefix = "newproject" - projectSpaceGroupsPrefix = "cernbox-project-" - projectSpaceAdminGroupsSuffix = "-admins" -) - -func init() { - registry.Register("sql", New) -} - -type config struct { - DBUsername string `mapstructure:"db_username"` - DBPassword string `mapstructure:"db_password"` - DBHost string `mapstructure:"db_host"` - DBPort int `mapstructure:"db_port"` - DBName string `mapstructure:"db_name"` - GatewaySvc string `mapstructure:"gatewaysvc"` -} - -type mgr struct { - c *config - db *sql.DB - client gatewayv1beta1.GatewayAPIClient -} - -func (c *config) ApplyDefaults() { - c.GatewaySvc = sharedconf.GetGatewaySVC(c.GatewaySvc) -} - -// New returns a new share manager. -func New(ctx context.Context, m map[string]interface{}) (share.Manager, error) { - var c config - if err := cfg.Decode(m, &c); err != nil { - return nil, err - } - - db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", c.DBUsername, c.DBPassword, c.DBHost, c.DBPort, c.DBName)) - if err != nil { - return nil, err - } - - gw, err := pool.GetGatewayServiceClient(pool.Endpoint(c.GatewaySvc)) - if err != nil { - return nil, err - } - - return &mgr{ - c: &c, - db: db, - client: gw, - }, nil -} - -func (m *mgr) Share(ctx context.Context, md *provider.ResourceInfo, g *collaboration.ShareGrant) (*collaboration.Share, error) { - user := ctxpkg.ContextMustGetUser(ctx) - - // do not allow share to myself or the owner if share is for a user - // TODO(labkode): should not this be caught already at the gw level? - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER && - (utils.UserEqual(g.Grantee.GetUserId(), user.Id) || utils.UserEqual(g.Grantee.GetUserId(), md.Owner)) { - return nil, errors.New("sql: owner/creator and grantee are the same") - } - - // check if share already exists. - key := &collaboration.ShareKey{ - Owner: md.Owner, - ResourceId: md.Id, - Grantee: g.Grantee, - } - _, err := m.getByKey(ctx, key) - - // share already exists - if err == nil { - return nil, errtypes.AlreadyExists(key.String()) - } - - now := time.Now().Unix() - ts := &typespb.Timestamp{ - Seconds: uint64(now), - } - - shareType, shareWith := conversions.FormatGrantee(g.Grantee) - itemType := conversions.ResourceTypeToItem(md.Type) - targetPath := path.Join("/", path.Base(md.Path)) - permissions := conversions.SharePermToInt(g.Permissions.Permissions) - prefix := md.Id.StorageId - itemSource := md.Id.OpaqueId - fileSource, err := strconv.ParseUint(itemSource, 10, 64) - if err != nil { - // it can be the case that the item source may be a character string - // we leave fileSource blank in that case - fileSource = 0 - } - - stmtString := "insert into oc_share set share_type=?,uid_owner=?,uid_initiator=?,item_type=?,fileid_prefix=?,item_source=?,file_source=?,permissions=?,stime=?,share_with=?,file_target=?" - stmtValues := []interface{}{shareType, conversions.FormatUserID(md.Owner), conversions.FormatUserID(user.Id), itemType, prefix, itemSource, fileSource, permissions, now, shareWith, targetPath} - - stmt, err := m.db.Prepare(stmtString) - if err != nil { - return nil, err - } - result, err := stmt.Exec(stmtValues...) - if err != nil { - return nil, err - } - lastID, err := result.LastInsertId() - if err != nil { - return nil, err - } - - return &collaboration.Share{ - Id: &collaboration.ShareId{ - OpaqueId: strconv.FormatInt(lastID, 10), - }, - ResourceId: md.Id, - Permissions: g.Permissions, - Grantee: g.Grantee, - Owner: md.Owner, - Creator: user.Id, - Ctime: ts, - Mtime: ts, - }, nil -} - -func (m *mgr) getByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.Share, error) { - uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) - s := conversions.DBShare{ID: id.OpaqueId} - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, stime, permissions, share_type FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND id=? AND (uid_owner=? or uid_initiator=?)" - if err := m.db.QueryRow(query, id.OpaqueId, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.STime, &s.Permissions, &s.ShareType); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(id.OpaqueId) - } - return nil, err - } - share, err := conversions.ConvertToCS3Share(ctx, m.client, s) - if err != nil { - return nil, err - } - return share, nil -} - -func (m *mgr) getByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.Share, error) { - owner := conversions.FormatUserID(key.Owner) - uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) - - s := conversions.DBShare{} - shareType, shareWith := conversions.FormatGrantee(key.Grantee) - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, id, stime, permissions, share_type FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - if err := m.db.QueryRow(query, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(key.String()) - } - return nil, err - } - share, err := conversions.ConvertToCS3Share(ctx, m.client, s) - if err != nil { - return nil, err - } - return share, nil -} - -func (m *mgr) GetShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.Share, error) { - var s *collaboration.Share - var err error - switch { - case ref.GetId() != nil: - s, err = m.getByID(ctx, ref.GetId()) - case ref.GetKey() != nil: - s, err = m.getByKey(ctx, ref.GetKey()) - default: - err = errtypes.NotFound(ref.String()) - } - - if err != nil { - return nil, err - } - - return s, nil -} - -func (m *mgr) Unshare(ctx context.Context, ref *collaboration.ShareReference) error { - uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) - var query string - params := []interface{}{} - switch { - case ref.GetId() != nil: - query = "delete from oc_share where id=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, ref.GetId().OpaqueId, uid, uid) - case ref.GetKey() != nil: - key := ref.GetKey() - shareType, shareWith := conversions.FormatGrantee(key.Grantee) - owner := conversions.FormatUserID(key.Owner) - query = "delete from oc_share where uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid) - default: - return errtypes.NotFound(ref.String()) - } - - stmt, err := m.db.Prepare(query) - if err != nil { - return err - } - res, err := stmt.Exec(params...) - if err != nil { - return err - } - - rowCnt, err := res.RowsAffected() - if err != nil { - return err - } - if rowCnt == 0 { - return errtypes.NotFound(ref.String()) - } - return nil -} - -func (m *mgr) UpdateShare(ctx context.Context, ref *collaboration.ShareReference, p *collaboration.SharePermissions) (*collaboration.Share, error) { - permissions := conversions.SharePermToInt(p.Permissions) - uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) - - var query string - params := []interface{}{} - switch { - case ref.GetId() != nil: - query = "update oc_share set permissions=?,stime=? where id=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, permissions, time.Now().Unix(), ref.GetId().OpaqueId, uid, uid) - case ref.GetKey() != nil: - key := ref.GetKey() - shareType, shareWith := conversions.FormatGrantee(key.Grantee) - owner := conversions.FormatUserID(key.Owner) - query = "update oc_share set permissions=?,stime=? where (uid_owner=? or uid_initiator=?) AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, permissions, time.Now().Unix(), owner, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid) - default: - return nil, errtypes.NotFound(ref.String()) - } - - stmt, err := m.db.Prepare(query) - if err != nil { - return nil, err - } - if _, err = stmt.Exec(params...); err != nil { - return nil, err - } - - return m.GetShare(ctx, ref) -} - -func (m *mgr) ListShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.Share, error) { - query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, - coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, - id, stime, permissions, share_type - FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND (share_type=? OR share_type=?)` - params := []interface{}{shareTypeUser, shareTypeGroup} - - groupedFilters := share.GroupFiltersByType(filters) - if len(groupedFilters) > 0 { - filterQuery, filterParams, err := translateFilters(groupedFilters) - if err != nil { - return nil, err - } - params = append(params, filterParams...) - if filterQuery != "" { - query = fmt.Sprintf("%s AND (%s)", query, filterQuery) - } - } - - uidOwnersQuery, uidOwnersParams, err := m.uidOwnerFilters(ctx, groupedFilters) - if err != nil { - return nil, err - } - params = append(params, uidOwnersParams...) - if uidOwnersQuery != "" { - query = fmt.Sprintf("%s AND (%s)", query, uidOwnersQuery) - } - - rows, err := m.db.Query(query, params...) - if err != nil { - return nil, err - } - defer rows.Close() - - var s conversions.DBShare - shares := []*collaboration.Share{} - for rows.Next() { - if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { - continue - } - share, err := conversions.ConvertToCS3Share(ctx, m.client, s) - if err != nil { - continue - } - shares = append(shares, share) - } - if err = rows.Err(); err != nil { - return nil, err - } - - return shares, nil -} - -// we list the shares that are targeted to the user in context or to the user groups. -func (m *mgr) ListReceivedShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.ReceivedShare, error) { - user := ctxpkg.ContextMustGetUser(ctx) - uid := conversions.FormatUserID(user.Id) - - params := []interface{}{uid, uid, uid, uid} - for _, v := range user.Groups { - params = append(params, v) - } - - query := `SELECT coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, - coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, - ts.id, stime, permissions, share_type, coalesce(tr.state, 0) as state - FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) - WHERE (orphan = 0 or orphan IS NULL) AND (uid_owner != ? AND uid_initiator != ?)` - if len(user.Groups) > 0 { - query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" - } else { - query += " AND (share_with=? AND share_type = 0)" - } - - groupedFilters := share.GroupFiltersByType(filters) - filterQuery, filterParams, err := translateFilters(groupedFilters) - if err != nil { - return nil, err - } - params = append(params, filterParams...) - - if filterQuery != "" { - query = fmt.Sprintf("%s AND (%s)", query, filterQuery) - } - - rows, err := m.db.Query(query, params...) - if err != nil { - return nil, err - } - defer rows.Close() - - var s conversions.DBShare - shares := []*collaboration.ReceivedShare{} - for rows.Next() { - if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { - continue - } - share, err := conversions.ConvertToCS3ReceivedShare(ctx, m.client, s) - if err != nil { - continue - } - shares = append(shares, share) - } - if err = rows.Err(); err != nil { - return nil, err - } - - return shares, nil -} - -func (m *mgr) getReceivedByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.ReceivedShare, error) { - user := ctxpkg.ContextMustGetUser(ctx) - uid := conversions.FormatUserID(user.Id) - - params := []interface{}{uid, id.OpaqueId, uid} - for _, v := range user.Groups { - params = append(params, v) - } - - s := conversions.DBShare{ID: id.OpaqueId} - query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, - coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, - stime, permissions, share_type, coalesce(tr.state, 0) as state - FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) - WHERE (orphan = 0 or orphan IS NULL) AND ts.id=?` - if len(user.Groups) > 0 { - query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" - } else { - query += " AND (share_with=? AND share_type = 0)" - } - if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(id.OpaqueId) - } - return nil, err - } - share, err := conversions.ConvertToCS3ReceivedShare(ctx, m.client, s) - if err != nil { - return nil, err - } - return share, nil -} - -func (m *mgr) getReceivedByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.ReceivedShare, error) { - user := ctxpkg.ContextMustGetUser(ctx) - uid := conversions.FormatUserID(user.Id) - - shareType, shareWith := conversions.FormatGrantee(key.Grantee) - params := []interface{}{uid, conversions.FormatUserID(key.Owner), key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, shareWith} - for _, v := range user.Groups { - params = append(params, v) - } - - s := conversions.DBShare{} - query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, - coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, - ts.id, stime, permissions, share_type, coalesce(tr.state, 0) as state - FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) - WHERE (orphan = 0 or orphan IS NULL) AND uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=?` - if len(user.Groups) > 0 { - query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" - } else { - query += " AND (share_with=? AND share_type = 0)" - } - - if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(key.String()) - } - return nil, err - } - - share, err := conversions.ConvertToCS3ReceivedShare(ctx, m.client, s) - if err != nil { - return nil, err - } - return share, nil -} - -func (m *mgr) GetReceivedShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.ReceivedShare, error) { - var s *collaboration.ReceivedShare - var err error - switch { - case ref.GetId() != nil: - s, err = m.getReceivedByID(ctx, ref.GetId()) - case ref.GetKey() != nil: - s, err = m.getReceivedByKey(ctx, ref.GetKey()) - default: - err = errtypes.NotFound(ref.String()) - } - - if err != nil { - return nil, err - } - - return s, nil -} - -func (m *mgr) UpdateReceivedShare(ctx context.Context, share *collaboration.ReceivedShare, fieldMask *field_mask.FieldMask) (*collaboration.ReceivedShare, error) { - user := ctxpkg.ContextMustGetUser(ctx) - - rs, err := m.GetReceivedShare(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{Id: share.Share.Id}}) - if err != nil { - return nil, err - } - - for i := range fieldMask.Paths { - switch fieldMask.Paths[i] { - case "state": - rs.State = share.State - default: - return nil, errtypes.NotSupported("updating " + fieldMask.Paths[i] + " is not supported") - } - } - - state := 0 - switch rs.GetState() { - case collaboration.ShareState_SHARE_STATE_REJECTED: - state = -1 - case collaboration.ShareState_SHARE_STATE_ACCEPTED: - state = 1 - } - - params := []interface{}{rs.Share.Id.OpaqueId, conversions.FormatUserID(user.Id), state, state} - query := "insert into oc_share_status(id, recipient, state) values(?, ?, ?) ON DUPLICATE KEY UPDATE state = ?" - - stmt, err := m.db.Prepare(query) - if err != nil { - return nil, err - } - _, err = stmt.Exec(params...) - if err != nil { - return nil, err - } - - return rs, nil -} - -func (m *mgr) uidOwnerFilters(ctx context.Context, filters map[collaboration.Filter_Type][]*collaboration.Filter) (string, []interface{}, error) { - user := ctxpkg.ContextMustGetUser(ctx) - uid := conversions.FormatUserID(user.Id) - - query := "uid_owner=? or uid_initiator=?" - params := []interface{}{uid, uid} - - client, err := pool.GetGatewayServiceClient(pool.Endpoint(m.c.GatewaySvc)) - if err != nil { - return "", nil, err - } - - if resourceFilters, ok := filters[collaboration.Filter_TYPE_RESOURCE_ID]; ok { - for _, f := range resourceFilters { - // For shares inside project spaces, if the user is an admin, we try to list all shares created by other admins - if strings.HasPrefix(f.GetResourceId().GetStorageId(), projectInstancesPrefix) { - res, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: f.GetResourceId()}}) - if err != nil || res.Status.Code != rpc.Code_CODE_OK { - continue - } - - // The path will look like /eos/project/c/cernbox, we need to extract the project name - parts := strings.SplitN(res.Info.Path, "/", 6) - if len(parts) < 5 { - continue - } - - adminGroup := projectSpaceGroupsPrefix + parts[4] + projectSpaceAdminGroupsSuffix - for _, g := range user.Groups { - if g == adminGroup { - // User belongs to the admin group, list all shares for the resource - - // TODO: this only works if shares for a single project are requested. - // If shares for multiple projects are requested, then we're not checking if the - // user is an admin for all of those. We can append the query ` or uid_owner=?` - // for all the project owners, which works fine for new reva - // but won't work for revaold since there, we store the uid of the share creator as uid_owner. - // For this to work across the two versions, this change would have to be made in revaold - // but it won't be straightforward as there, the storage provider doesn't return the - // resource owners. - return "", []interface{}{}, nil - } - } - } - } - } - - return query, params, nil -} - -func granteeTypeToShareType(granteeType provider.GranteeType) int { - switch granteeType { - case provider.GranteeType_GRANTEE_TYPE_USER: - return shareTypeUser - case provider.GranteeType_GRANTEE_TYPE_GROUP: - return shareTypeGroup - } - return -1 -} - -// translateFilters translates the filters to sql queries. -func translateFilters(filters map[collaboration.Filter_Type][]*collaboration.Filter) (string, []interface{}, error) { - var ( - filterQuery string - params []interface{} - ) - - // If multiple filters of the same type are passed to this function, they need to be combined with the `OR` operator. - // That is why the filters got grouped by type. - // For every given filter type, iterate over the filters and if there are more than one combine them. - // Combine the different filter types using `AND` - var filterCounter = 0 - for filterType, currFilters := range filters { - switch filterType { - case collaboration.Filter_TYPE_RESOURCE_ID: - filterQuery += "(" - for i, f := range currFilters { - filterQuery += "(fileid_prefix =? AND item_source=?)" - params = append(params, f.GetResourceId().StorageId, f.GetResourceId().OpaqueId) - - if i != len(currFilters)-1 { - filterQuery += " OR " - } - } - filterQuery += ")" - case collaboration.Filter_TYPE_GRANTEE_TYPE: - filterQuery += "(" - for i, f := range currFilters { - filterQuery += "share_type=?" - params = append(params, granteeTypeToShareType(f.GetGranteeType())) - - if i != len(currFilters)-1 { - filterQuery += " OR " - } - } - filterQuery += ")" - case collaboration.Filter_TYPE_EXCLUDE_DENIALS: - // TODO this may change once the mapping of permission to share types is completed (cf. pkg/cbox/utils/conversions.go) - filterQuery += "(permissions > 0)" - default: - return "", nil, fmt.Errorf("filter type is not supported") - } - if filterCounter != len(filters)-1 { - filterQuery += " AND " - } - filterCounter++ - } - return filterQuery, params, nil -} diff --git a/pkg/cbox/storage/eoshomewrapper/eoshomewrapper.go b/pkg/cbox/storage/eoshomewrapper/eoshomewrapper.go deleted file mode 100644 index ec385dcef4..0000000000 --- a/pkg/cbox/storage/eoshomewrapper/eoshomewrapper.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package eoshomewrapper - -import ( - "bytes" - "context" - "text/template" - - "github.com/Masterminds/sprig" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/eosfs" - "github.com/cs3org/reva/pkg/utils/cfg" -) - -func init() { - registry.Register("eoshomewrapper", New) -} - -type wrapper struct { - storage.FS - mountIDTemplate *template.Template -} - -// New returns an implementation of the storage.FS interface that forms a wrapper -// around separate connections to EOS. -func New(ctx context.Context, m map[string]interface{}) (storage.FS, error) { - var c eosfs.Config - if err := cfg.Decode(m, &c); err != nil { - return nil, err - } - // default to version invariance if not configured - if _, ok := m["version_invariant"]; !ok { - c.VersionInvariant = true - } - - t, ok := m["mount_id_template"].(string) - if !ok || t == "" { - t = "eoshome-{{substr 0 1 .Username}}" - } - - eos, err := eosfs.NewEOSFS(ctx, &c) - if err != nil { - return nil, err - } - - mountIDTemplate, err := template.New("mountID").Funcs(sprig.TxtFuncMap()).Parse(t) - if err != nil { - return nil, err - } - - return &wrapper{FS: eos, mountIDTemplate: mountIDTemplate}, nil -} - -// We need to override the two methods, GetMD and ListFolder to fill the -// StorageId in the ResourceInfo objects. - -func (w *wrapper) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { - res, err := w.FS.GetMD(ctx, ref, mdKeys) - if err != nil { - return nil, err - } - - // We need to extract the mount ID based on the mapping template. - // - // Take the first letter of the username of the logged-in user, as the home - // storage provider restricts requests only to the home namespace. - res.Id.StorageId = w.getMountID(ctx, res) - return res, nil -} - -func (w *wrapper) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { - res, err := w.FS.ListFolder(ctx, ref, mdKeys) - if err != nil { - return nil, err - } - for _, r := range res { - r.Id.StorageId = w.getMountID(ctx, r) - } - return res, nil -} - -func (w *wrapper) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - return errtypes.NotSupported("eos: deny grant is only enabled for project spaces") -} - -func (w *wrapper) getMountID(ctx context.Context, r *provider.ResourceInfo) string { - u := ctxpkg.ContextMustGetUser(ctx) - b := bytes.Buffer{} - if err := w.mountIDTemplate.Execute(&b, u); err != nil { - return "" - } - return b.String() -} diff --git a/pkg/cbox/storage/eoswrapper/eoswrapper.go b/pkg/cbox/storage/eoswrapper/eoswrapper.go deleted file mode 100644 index d4b518db40..0000000000 --- a/pkg/cbox/storage/eoswrapper/eoswrapper.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package eoswrapper - -import ( - "bytes" - "context" - "io" - "strings" - "text/template" - - "github.com/Masterminds/sprig" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/eosfs" - "github.com/cs3org/reva/pkg/utils/cfg" -) - -func init() { - registry.Register("eoswrapper", New) -} - -const ( - eosProjectsNamespace = "/eos/project" - - // We can use a regex for these, but that might have inferior performance. - projectSpaceGroupsPrefix = "cernbox-project-" - projectSpaceAdminGroupsSuffix = "-admins" -) - -type wrapper struct { - storage.FS - conf *eosfs.Config - mountIDTemplate *template.Template -} - -// New returns an implementation of the storage.FS interface that forms a wrapper -// around separate connections to EOS. -func New(ctx context.Context, m map[string]interface{}) (storage.FS, error) { - var c eosfs.Config - if err := cfg.Decode(m, &c); err != nil { - return nil, err - } - - // default to version invariance if not configured - if _, ok := m["version_invariant"]; !ok { - c.VersionInvariant = true - } - - // allow recycle operations for project spaces - if !c.EnableHome && strings.HasPrefix(c.Namespace, eosProjectsNamespace) { - c.AllowPathRecycleOperations = true - c.ImpersonateOwnerforRevisions = true - } - - t, ok := m["mount_id_template"].(string) - if !ok || t == "" { - t = "eoshome-{{ trimAll \"/\" .Path | substr 0 1 }}" - } - - eos, err := eosfs.NewEOSFS(ctx, &c) - if err != nil { - return nil, err - } - - mountIDTemplate, err := template.New("mountID").Funcs(sprig.TxtFuncMap()).Parse(t) - if err != nil { - return nil, err - } - - return &wrapper{FS: eos, conf: &c, mountIDTemplate: mountIDTemplate}, nil -} - -// We need to override the two methods, GetMD and ListFolder to fill the -// StorageId in the ResourceInfo objects. - -func (w *wrapper) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { - res, err := w.FS.GetMD(ctx, ref, mdKeys) - if err != nil { - return nil, err - } - - // We need to extract the mount ID based on the mapping template. - // - // Take the first letter of the resource path after the namespace has been removed. - // If it's empty, leave it empty to be filled by storageprovider. - res.Id.StorageId = w.getMountID(ctx, res) - - if err = w.setProjectSharingPermissions(ctx, res); err != nil { - return nil, err - } - - return res, nil -} - -func (w *wrapper) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { - res, err := w.FS.ListFolder(ctx, ref, mdKeys) - if err != nil { - return nil, err - } - for _, r := range res { - r.Id.StorageId = w.getMountID(ctx, r) - if err = w.setProjectSharingPermissions(ctx, r); err != nil { - continue - } - } - return res, nil -} - -func (w *wrapper) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { - if err := w.userIsProjectAdmin(ctx, ref); err != nil { - return nil, err - } - - return w.FS.ListRevisions(ctx, ref) -} - -func (w *wrapper) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - if err := w.userIsProjectAdmin(ctx, ref); err != nil { - return nil, err - } - - return w.FS.DownloadRevision(ctx, ref, revisionKey) -} - -func (w *wrapper) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { - if err := w.userIsProjectAdmin(ctx, ref); err != nil { - return err - } - - return w.FS.RestoreRevision(ctx, ref, revisionKey) -} - -func (w *wrapper) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - // This is only allowed for project space admins - if strings.HasPrefix(w.conf.Namespace, eosProjectsNamespace) { - if err := w.userIsProjectAdmin(ctx, ref); err != nil { - return err - } - return w.FS.DenyGrant(ctx, ref, g) - } - - return errtypes.NotSupported("eos: deny grant is only enabled for project spaces") -} - -func (w *wrapper) getMountID(ctx context.Context, r *provider.ResourceInfo) string { - if r == nil { - return "" - } - b := bytes.Buffer{} - if err := w.mountIDTemplate.Execute(&b, r); err != nil { - return "" - } - return b.String() -} - -func (w *wrapper) setProjectSharingPermissions(ctx context.Context, r *provider.ResourceInfo) error { - // Check if this storage provider corresponds to a project spaces instance - if strings.HasPrefix(w.conf.Namespace, eosProjectsNamespace) { - // Extract project name from the path resembling /c/cernbox or /c/cernbox/minutes/.. - parts := strings.SplitN(r.Path, "/", 4) - if len(parts) != 4 && len(parts) != 3 { - // The request might be for / or /$letter - // Nothing to do in that case - return nil - } - adminGroup := projectSpaceGroupsPrefix + parts[2] + projectSpaceAdminGroupsSuffix - user := ctxpkg.ContextMustGetUser(ctx) - - for _, g := range user.Groups { - if g == adminGroup { - r.PermissionSet.AddGrant = true - r.PermissionSet.RemoveGrant = true - r.PermissionSet.UpdateGrant = true - r.PermissionSet.ListGrants = true - r.PermissionSet.GetQuota = true - r.PermissionSet.DenyGrant = true - return nil - } - } - } - return nil -} - -func (w *wrapper) userIsProjectAdmin(ctx context.Context, ref *provider.Reference) error { - // Check if this storage provider corresponds to a project spaces instance - if !strings.HasPrefix(w.conf.Namespace, eosProjectsNamespace) { - return nil - } - - res, err := w.FS.GetMD(ctx, ref, nil) - if err != nil { - return err - } - - // Extract project name from the path resembling /c/cernbox or /c/cernbox/minutes/.. - parts := strings.SplitN(res.Path, "/", 4) - if len(parts) != 4 && len(parts) != 3 { - // The request might be for / or /$letter - // Nothing to do in that case - return nil - } - adminGroup := projectSpaceGroupsPrefix + parts[2] + projectSpaceAdminGroupsSuffix - user := ctxpkg.ContextMustGetUser(ctx) - - for _, g := range user.Groups { - if g == adminGroup { - return nil - } - } - - return errtypes.PermissionDenied("eosfs: project spaces revisions can only be accessed by admins") -} diff --git a/pkg/cbox/user/rest/cache.go b/pkg/cbox/user/rest/cache.go deleted file mode 100644 index 6b6f12ac64..0000000000 --- a/pkg/cbox/user/rest/cache.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/gomodule/redigo/redis" -) - -const ( - userPrefix = "user:" - usernamePrefix = "username:" - namePrefix = "name:" - mailPrefix = "mail:" - uidPrefix = "uid:" - userGroupsPrefix = "groups:" -) - -func initRedisPool(address, username, password string) *redis.Pool { - return &redis.Pool{ - - MaxIdle: 50, - MaxActive: 1000, - IdleTimeout: 240 * time.Second, - - Dial: func() (redis.Conn, error) { - var opts []redis.DialOption - if username != "" { - opts = append(opts, redis.DialUsername(username)) - } - if password != "" { - opts = append(opts, redis.DialPassword(password)) - } - - c, err := redis.Dial("tcp", address, opts...) - if err != nil { - return nil, err - } - return c, err - }, - - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } -} - -func (m *manager) setVal(key, val string, expiration int) error { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - args := []interface{}{key, val} - if expiration != -1 { - args = append(args, "EX", expiration) - } - if _, err := conn.Do("SET", args...); err != nil { - return err - } - return nil - } - return errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) getVal(key string) (string, error) { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - val, err := redis.String(conn.Do("GET", key)) - if err != nil { - return "", err - } - return val, nil - } - return "", errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) findCachedUsers(query string) ([]*userpb.User, error) { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - query = fmt.Sprintf("%s*%s*", userPrefix, strings.ReplaceAll(strings.ToLower(query), " ", "_")) - keys, err := redis.Strings(conn.Do("KEYS", query)) - if err != nil { - return nil, err - } - var args []interface{} - for _, k := range keys { - args = append(args, k) - } - - // Fetch the users for all these keys - userStrings, err := redis.Strings(conn.Do("MGET", args...)) - if err != nil { - return nil, err - } - userMap := make(map[string]*userpb.User) - for _, user := range userStrings { - u := userpb.User{} - if err = json.Unmarshal([]byte(user), &u); err == nil { - userMap[u.Id.OpaqueId] = &u - } - } - - var users []*userpb.User - for _, u := range userMap { - users = append(users, u) - } - - return users, nil - } - - return nil, errors.New("rest: unable to get connection from redis pool") -} - -func (m *manager) fetchCachedUserDetails(uid *userpb.UserId) (*userpb.User, error) { - user, err := m.getVal(userPrefix + usernamePrefix + strings.ToLower(uid.OpaqueId)) - if err != nil { - return nil, err - } - - u := userpb.User{} - if err = json.Unmarshal([]byte(user), &u); err != nil { - return nil, err - } - return &u, nil -} - -func (m *manager) cacheUserDetails(u *userpb.User) error { - expiration := (m.conf.UserFetchInterval + 1) * 3600 - encodedUser, err := json.Marshal(&u) - if err != nil { - return err - } - if err = m.setVal(userPrefix+usernamePrefix+strings.ToLower(u.Id.OpaqueId), string(encodedUser), expiration); err != nil { - return err - } - - if u.Mail != "" { - if err = m.setVal(userPrefix+mailPrefix+strings.ToLower(u.Mail), string(encodedUser), expiration); err != nil { - return err - } - } - if u.DisplayName != "" { - if err = m.setVal(userPrefix+namePrefix+u.Id.OpaqueId+"_"+strings.ReplaceAll(strings.ToLower(u.DisplayName), " ", "_"), string(encodedUser), expiration); err != nil { - return err - } - } - if u.UidNumber != 0 { - if err = m.setVal(userPrefix+uidPrefix+strconv.FormatInt(u.UidNumber, 10), string(encodedUser), expiration); err != nil { - return err - } - } - return nil -} - -func (m *manager) fetchCachedUserByParam(field, claim string) (*userpb.User, error) { - user, err := m.getVal(userPrefix + field + ":" + strings.ToLower(claim)) - if err != nil { - return nil, err - } - - u := userpb.User{} - if err = json.Unmarshal([]byte(user), &u); err != nil { - return nil, err - } - return &u, nil -} - -func (m *manager) fetchCachedUserGroups(uid *userpb.UserId) ([]string, error) { - groups, err := m.getVal(userPrefix + userGroupsPrefix + strings.ToLower(uid.OpaqueId)) - if err != nil { - return nil, err - } - g := []string{} - if err = json.Unmarshal([]byte(groups), &g); err != nil { - return nil, err - } - return g, nil -} - -func (m *manager) cacheUserGroups(uid *userpb.UserId, groups []string) error { - g, err := json.Marshal(&groups) - if err != nil { - return err - } - return m.setVal(userPrefix+userGroupsPrefix+strings.ToLower(uid.OpaqueId), string(g), m.conf.UserGroupsCacheExpiration*60) -} diff --git a/pkg/cbox/user/rest/rest.go b/pkg/cbox/user/rest/rest.go deleted file mode 100644 index d15ed7eeab..0000000000 --- a/pkg/cbox/user/rest/rest.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package rest - -import ( - "context" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - utils "github.com/cs3org/reva/pkg/cbox/utils" - "github.com/cs3org/reva/pkg/user" - "github.com/cs3org/reva/pkg/user/manager/registry" - "github.com/cs3org/reva/pkg/utils/cfg" - "github.com/cs3org/reva/pkg/utils/list" - "github.com/gomodule/redigo/redis" - "github.com/rs/zerolog/log" -) - -func init() { - registry.Register("rest", New) -} - -type manager struct { - conf *config - redisPool *redis.Pool - apiTokenManager *utils.APITokenManager -} - -type config struct { - // The address at which the redis server is running - RedisAddress string `mapstructure:"redis_address" docs:"localhost:6379"` - // The username for connecting to the redis server - RedisUsername string `mapstructure:"redis_username" docs:""` - // The password for connecting to the redis server - RedisPassword string `mapstructure:"redis_password" docs:""` - // The time in minutes for which the groups to which a user belongs would be cached - UserGroupsCacheExpiration int `mapstructure:"user_groups_cache_expiration" docs:"5"` - // The OIDC Provider - IDProvider string `mapstructure:"id_provider" docs:"http://cernbox.cern.ch"` - // Base API Endpoint - APIBaseURL string `mapstructure:"api_base_url" docs:"https://authorization-service-api-dev.web.cern.ch"` - // Client ID needed to authenticate - ClientID string `mapstructure:"client_id" docs:"-"` - // Client Secret - ClientSecret string `mapstructure:"client_secret" docs:"-"` - - // Endpoint to generate token to access the API - OIDCTokenEndpoint string `mapstructure:"oidc_token_endpoint" docs:"https://keycloak-dev.cern.ch/auth/realms/cern/api-access/token"` - // The target application for which token needs to be generated - TargetAPI string `mapstructure:"target_api" docs:"authorization-service-api"` - // The time in seconds between bulk fetch of user accounts - UserFetchInterval int `mapstructure:"user_fetch_interval" docs:"3600"` -} - -func (c *config) ApplyDefaults() { - if c.UserGroupsCacheExpiration == 0 { - c.UserGroupsCacheExpiration = 5 - } - if c.RedisAddress == "" { - c.RedisAddress = ":6379" - } - if c.APIBaseURL == "" { - c.APIBaseURL = "https://authorization-service-api-dev.web.cern.ch" - } - if c.TargetAPI == "" { - c.TargetAPI = "authorization-service-api" - } - if c.OIDCTokenEndpoint == "" { - c.OIDCTokenEndpoint = "https://keycloak-dev.cern.ch/auth/realms/cern/api-access/token" - } - if c.IDProvider == "" { - c.IDProvider = "http://cernbox.cern.ch" - } - if c.UserFetchInterval == 0 { - c.UserFetchInterval = 3600 - } -} - -// New returns a user manager implementation that makes calls to the GRAPPA API. -func New(ctx context.Context, m map[string]interface{}) (user.Manager, error) { - mgr := &manager{} - err := mgr.Configure(m) - if err != nil { - return nil, err - } - return mgr, err -} - -func (m *manager) Configure(ml map[string]interface{}) error { - var c config - if err := cfg.Decode(ml, &c); err != nil { - return err - } - redisPool := initRedisPool(c.RedisAddress, c.RedisUsername, c.RedisPassword) - apiTokenManager, err := utils.InitAPITokenManager(ml) - if err != nil { - return err - } - m.conf = &c - m.redisPool = redisPool - m.apiTokenManager = apiTokenManager - - // Since we're starting a subroutine which would take some time to execute, - // we can't wait to see if it works before returning the user.Manager object - // TODO: return err if the fetch fails - go m.fetchAllUsers(context.Background()) - return nil -} - -func (m *manager) fetchAllUsers(ctx context.Context) { - _ = m.fetchAllUserAccounts(ctx) - ticker := time.NewTicker(time.Duration(m.conf.UserFetchInterval) * time.Second) - work := make(chan os.Signal, 1) - signal.Notify(work, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT) - - for { - select { - case <-work: - return - case <-ticker.C: - _ = m.fetchAllUserAccounts(ctx) - } - } -} - -// Identity contains the information of a single user. -type Identity struct { - PrimaryAccountEmail string `json:"primaryAccountEmail,omitempty"` - Type string `json:"type,omitempty"` - Upn string `json:"upn"` - DisplayName string `json:"displayName"` - Source string `json:"source,omitempty"` - UID int `json:"uid,omitempty"` - GID int `json:"gid,omitempty"` -} - -// IdentitiesResponse contains the expected response from grappa -// when getting the list of users. -type IdentitiesResponse struct { - Pagination struct { - Links struct { - Next *string `json:"next"` - } `json:"links"` - } `json:"pagination"` - Data []*Identity `json:"data"` -} - -// UserType convert the user type in grappa to CS3APIs. -func (i *Identity) UserType() userpb.UserType { - switch i.Type { - case "Application": - return userpb.UserType_USER_TYPE_APPLICATION - case "Service": - return userpb.UserType_USER_TYPE_SERVICE - case "Secondary": - return userpb.UserType_USER_TYPE_SECONDARY - case "Person": - if i.Source == "cern" { - return userpb.UserType_USER_TYPE_PRIMARY - } - return userpb.UserType_USER_TYPE_LIGHTWEIGHT - default: - return userpb.UserType_USER_TYPE_INVALID - } -} - -func (m *manager) fetchAllUserAccounts(ctx context.Context) error { - url := fmt.Sprintf("%s/api/v1.0/Identity?field=upn&field=primaryAccountEmail&field=displayName&field=uid&field=gid&field=type&field=source", m.conf.APIBaseURL) - - for { - var r IdentitiesResponse - if err := m.apiTokenManager.SendAPIGetRequest(ctx, url, false, &r); err != nil { - return err - } - - for _, usr := range r.Data { - if _, err := m.parseAndCacheUser(ctx, usr); err != nil { - continue - } - } - - if r.Pagination.Links.Next == nil { - break - } - url = fmt.Sprintf("%s%s", m.conf.APIBaseURL, *r.Pagination.Links.Next) - } - - return nil -} - -func (m *manager) parseAndCacheUser(ctx context.Context, i *Identity) (*userpb.User, error) { - u := &userpb.User{ - Id: &userpb.UserId{ - OpaqueId: i.Upn, - Idp: m.conf.IDProvider, - Type: i.UserType(), - }, - Username: i.Upn, - Mail: i.PrimaryAccountEmail, - DisplayName: i.DisplayName, - UidNumber: int64(i.UID), - GidNumber: int64(i.GID), - } - - if err := m.cacheUserDetails(u); err != nil { - log.Error().Err(err).Msg("rest: error caching user details") - } - - return u, nil -} - -func (m *manager) GetUser(ctx context.Context, uid *userpb.UserId, skipFetchingGroups bool) (*userpb.User, error) { - u, err := m.fetchCachedUserDetails(uid) - if err != nil { - return nil, err - } - - if !skipFetchingGroups { - userGroups, err := m.GetUserGroups(ctx, uid) - if err != nil { - return nil, err - } - u.Groups = userGroups - } - - return u, nil -} - -func (m *manager) GetUserByClaim(ctx context.Context, claim, value string, skipFetchingGroups bool) (*userpb.User, error) { - u, err := m.fetchCachedUserByParam(claim, value) - if err != nil { - return nil, err - } - - if !skipFetchingGroups { - userGroups, err := m.GetUserGroups(ctx, u.Id) - if err != nil { - return nil, err - } - u.Groups = userGroups - } - - return u, nil -} - -func (m *manager) FindUsers(ctx context.Context, query string, skipFetchingGroups bool) ([]*userpb.User, error) { - // Look at namespaces filters. If the query starts with: - // "a" => look into primary/secondary/service accounts - // "l" => look into lightweight/federated accounts - // none => look into primary - - parts := strings.SplitN(query, ":", 2) - - var namespace string - if len(parts) == 2 { - // the query contains a namespace filter - namespace, query = parts[0], parts[1] - } - - users, err := m.findCachedUsers(query) - if err != nil { - return nil, err - } - - userSlice := []*userpb.User{} - - var accountsFilters []userpb.UserType - switch namespace { - case "": - accountsFilters = []userpb.UserType{userpb.UserType_USER_TYPE_PRIMARY} - case "a": - accountsFilters = []userpb.UserType{userpb.UserType_USER_TYPE_PRIMARY, userpb.UserType_USER_TYPE_SECONDARY, userpb.UserType_USER_TYPE_SERVICE} - case "l": - accountsFilters = []userpb.UserType{userpb.UserType_USER_TYPE_LIGHTWEIGHT, userpb.UserType_USER_TYPE_FEDERATED} - } - - for _, u := range users { - if isUserAnyType(u, accountsFilters) { - userSlice = append(userSlice, u) - } - } - - return userSlice, nil -} - -// isUserAnyType returns true if the user's type is one of types list. -func isUserAnyType(user *userpb.User, types []userpb.UserType) bool { - for _, t := range types { - if user.GetId().Type == t { - return true - } - } - return false -} - -// Group contains the information about a group. -type Group struct { - DisplayName string `json:"displayName"` -} - -// GroupsResponse contains the expected response from grappa -// when getting the list of groups. -type GroupsResponse struct { - Pagination struct { - Links struct { - Next *string `json:"next"` - } `json:"links"` - } `json:"pagination"` - Data []Group `json:"data"` -} - -func (m *manager) GetUserGroups(ctx context.Context, uid *userpb.UserId) ([]string, error) { - groups, err := m.fetchCachedUserGroups(uid) - if err == nil { - return groups, nil - } - - // TODO (gdelmont): support pagination! we may have problems with users having more than 1000 groups - url := fmt.Sprintf("%s/api/v1.0/Identity/%s/groups?field=displayName&recursive=true", m.conf.APIBaseURL, uid.OpaqueId) - - var r GroupsResponse - if err := m.apiTokenManager.SendAPIGetRequest(ctx, url, false, &r); err != nil { - return nil, err - } - - groups = list.Map(r.Data, func(g Group) string { return g.DisplayName }) - - if err = m.cacheUserGroups(uid, groups); err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Msg("rest: error caching user groups") - } - - return groups, nil -} - -func (m *manager) IsInGroup(ctx context.Context, uid *userpb.UserId, group string) (bool, error) { - // TODO (gdelmont): this can be improved storing the groups a user belong to as a list in redis - // and, instead of returning all the groups, use the redis apis to check if the group is in the list. - userGroups, err := m.GetUserGroups(ctx, uid) - if err != nil { - return false, err - } - - for _, g := range userGroups { - if group == g { - return true, nil - } - } - return false, nil -} diff --git a/pkg/cbox/utils/conversions.go b/pkg/cbox/utils/conversions.go index 9a63109b3b..84752ba9e5 100644 --- a/pkg/cbox/utils/conversions.go +++ b/pkg/cbox/utils/conversions.go @@ -19,14 +19,12 @@ package utils import ( - "context" - "errors" + "database/sql" + "strings" "time" - gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -54,7 +52,7 @@ type DBShare struct { Quicklink bool Description string NotifyUploads bool - NotifyUploadsExtraRecipients string + NotifyUploadsExtraRecipients sql.NullString } // FormatGrantee formats a CS3API grantee to a string. @@ -75,27 +73,19 @@ func FormatGrantee(g *provider.Grantee) (int, string) { } // ExtractGrantee retrieves the CS3API grantee from a formatted string. -func ExtractGrantee(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, t int, g string) (*provider.Grantee, error) { +func ExtractGrantee(t int, g string) *provider.Grantee { var grantee provider.Grantee switch t { case 0: grantee.Type = provider.GranteeType_GRANTEE_TYPE_USER - user, err := ExtractUserID(ctx, gateway, g) - if err != nil { - return nil, err - } - grantee.Id = &provider.Grantee_UserId{UserId: user} + grantee.Id = &provider.Grantee_UserId{UserId: ExtractUserID(g)} case 1: grantee.Type = provider.GranteeType_GRANTEE_TYPE_GROUP - group, err := ExtractGroupID(ctx, gateway, g) - if err != nil { - return nil, err - } - grantee.Id = &provider.Grantee_GroupId{GroupId: group} + grantee.Id = &provider.Grantee_GroupId{GroupId: ExtractGroupID(g)} default: grantee.Type = provider.GranteeType_GRANTEE_TYPE_INVALID } - return &grantee, nil + return &grantee } // ResourceTypeToItem maps a resource type to a string. @@ -179,18 +169,14 @@ func FormatUserID(u *userpb.UserId) string { } // ExtractUserID retrieves a CS3API user ID from a string. -func ExtractUserID(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, u string) (*userpb.UserId, error) { - userRes, err := gateway.GetUser(ctx, &userpb.GetUserRequest{ - UserId: &userpb.UserId{OpaqueId: u}, - }) - if err != nil { - return nil, err +func ExtractUserID(u string) *userpb.UserId { + t := userpb.UserType_USER_TYPE_PRIMARY + if strings.HasPrefix(u, "guest:") { + t = userpb.UserType_USER_TYPE_LIGHTWEIGHT + } else if strings.Contains(u, "@") { + t = userpb.UserType_USER_TYPE_FEDERATED } - if userRes.Status.Code != rpcv1beta1.Code_CODE_OK { - return nil, errors.New(userRes.Status.Message) - } - - return userRes.User.Id, nil + return &userpb.UserId{OpaqueId: u, Type: t} } // FormatGroupID formats a CS3API group ID to a string. @@ -199,37 +185,15 @@ func FormatGroupID(u *grouppb.GroupId) string { } // ExtractGroupID retrieves a CS3API group ID from a string. -func ExtractGroupID(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, u string) (*grouppb.GroupId, error) { - groupRes, err := gateway.GetGroup(ctx, &grouppb.GetGroupRequest{ - GroupId: &grouppb.GroupId{OpaqueId: u}, - }) - if err != nil { - return nil, err - } - if groupRes.Status.Code != rpcv1beta1.Code_CODE_OK { - return nil, errors.New(groupRes.Status.Message) - } - return groupRes.Group.Id, nil +func ExtractGroupID(u string) *grouppb.GroupId { + return &grouppb.GroupId{OpaqueId: u} } // ConvertToCS3Share converts a DBShare to a CS3API collaboration share. -func ConvertToCS3Share(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, s DBShare) (*collaboration.Share, error) { +func ConvertToCS3Share(s DBShare) *collaboration.Share { ts := &typespb.Timestamp{ Seconds: uint64(s.STime), } - owner, err := ExtractUserID(ctx, gateway, s.UIDOwner) - if err != nil { - return nil, err - } - creator, err := ExtractUserID(ctx, gateway, s.UIDInitiator) - if err != nil { - return nil, err - } - grantee, err := ExtractGrantee(ctx, gateway, s.ShareType, s.ShareWith) - if err != nil { - return nil, err - } - return &collaboration.Share{ Id: &collaboration.ShareId{ OpaqueId: s.ID, @@ -240,28 +204,24 @@ func ConvertToCS3Share(ctx context.Context, gateway gatewayv1beta1.GatewayAPICli OpaqueId: s.ItemSource, }, Permissions: &collaboration.SharePermissions{Permissions: IntTosharePerm(s.Permissions, s.ItemType)}, - Grantee: grantee, - Owner: owner, - Creator: creator, + Grantee: ExtractGrantee(s.ShareType, s.ShareWith), + Owner: ExtractUserID(s.UIDOwner), + Creator: ExtractUserID(s.UIDInitiator), Ctime: ts, Mtime: ts, - }, nil + } } // ConvertToCS3ReceivedShare converts a DBShare to a CS3API collaboration received share. -func ConvertToCS3ReceivedShare(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, s DBShare) (*collaboration.ReceivedShare, error) { - share, err := ConvertToCS3Share(ctx, gateway, s) - if err != nil { - return nil, err - } +func ConvertToCS3ReceivedShare(s DBShare) *collaboration.ReceivedShare { return &collaboration.ReceivedShare{ - Share: share, + Share: ConvertToCS3Share(s), State: IntToShareState(s.State), - }, nil + } } // ConvertToCS3PublicShare converts a DBShare to a CS3API public share. -func ConvertToCS3PublicShare(ctx context.Context, gateway gatewayv1beta1.GatewayAPIClient, s DBShare) (*link.PublicShare, error) { +func ConvertToCS3PublicShare(s DBShare) *link.PublicShare { ts := &typespb.Timestamp{ Seconds: uint64(s.STime), } @@ -278,14 +238,6 @@ func ConvertToCS3PublicShare(ctx context.Context, gateway gatewayv1beta1.Gateway } } } - owner, err := ExtractUserID(ctx, gateway, s.UIDOwner) - if err != nil { - return nil, err - } - creator, err := ExtractUserID(ctx, gateway, s.UIDInitiator) - if err != nil { - return nil, err - } return &link.PublicShare{ Id: &link.PublicShareId{ OpaqueId: s.ID, @@ -295,8 +247,8 @@ func ConvertToCS3PublicShare(ctx context.Context, gateway gatewayv1beta1.Gateway OpaqueId: s.ItemSource, }, Permissions: &link.PublicSharePermissions{Permissions: IntTosharePerm(s.Permissions, s.ItemType)}, - Owner: owner, - Creator: creator, + Owner: ExtractUserID(s.UIDOwner), + Creator: ExtractUserID(s.UIDInitiator), Token: s.Token, DisplayName: s.ShareName, PasswordProtected: pwd, @@ -306,6 +258,6 @@ func ConvertToCS3PublicShare(ctx context.Context, gateway gatewayv1beta1.Gateway Quicklink: s.Quicklink, Description: s.Description, NotifyUploads: s.NotifyUploads, - NotifyUploadsExtraRecipients: s.NotifyUploadsExtraRecipients, - }, nil + NotifyUploadsExtraRecipients: s.NotifyUploadsExtraRecipients.String, + } } diff --git a/pkg/storage/utils/decomposedfs/node/permissions_darwin.go b/pkg/ctx/pathctx.go similarity index 59% rename from pkg/storage/utils/decomposedfs/node/permissions_darwin.go rename to pkg/ctx/pathctx.go index f0b03391fb..944be416c6 100644 --- a/pkg/storage/utils/decomposedfs/node/permissions_darwin.go +++ b/pkg/ctx/pathctx.go @@ -16,22 +16,22 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -//go:build darwin -// +build darwin - -package node +package ctx import ( - "syscall" - - "github.com/pkg/xattr" + "context" ) -func isAttrUnset(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOATTR - } - } - return false +// ResoucePathCtx is the key used in the opaque id for passing the resource path. +const ResoucePathCtx = "resource_path" + +// ContextGetResourcePath returns the resource path if set in the given context. +func ContextGetResourcePath(ctx context.Context) (string, bool) { + p, ok := ctx.Value(pathKey).(string) + return p, ok +} + +// ContextGetResourcePath stores the resource path in the context. +func ContextSetResourcePath(ctx context.Context, path string) context.Context { + return context.WithValue(ctx, pathKey, path) } diff --git a/pkg/ctx/userctx.go b/pkg/ctx/userctx.go index 2cca7f5660..5551987ae5 100644 --- a/pkg/ctx/userctx.go +++ b/pkg/ctx/userctx.go @@ -32,6 +32,7 @@ const ( tokenKey scopeKey idKey + pathKey ) // ContextGetUser returns the user if set in the given context. diff --git a/pkg/eosclient/eosbinary/eosbinary.go b/pkg/eosclient/eosbinary/eosbinary.go index 1a47aa8240..48deb910a1 100644 --- a/pkg/eosclient/eosbinary/eosbinary.go +++ b/pkg/eosclient/eosbinary/eosbinary.go @@ -239,7 +239,7 @@ func (c *Client) executeEOS(ctx context.Context, cmdArgs []string, auth eosclien } // add application label - cmd.Args = append(cmd.Args, "-a", "reva_eosclient::meta") + // cmd.Args = append(cmd.Args, "-a", "reva_eosclient::meta") cmd.Args = append(cmd.Args, cmdArgs...) @@ -1192,27 +1192,17 @@ func (c *Client) mapToFileInfo(ctx context.Context, kv, attrs map[string]string, var ctimesec, ctimenanos uint64 if val, ok := kv["ctime"]; ok && val != "" { - split := strings.Split(val, ".") - ctimesec, err = strconv.ParseUint(split[0], 10, 64) - if err != nil { - return nil, err - } - ctimenanos, _ = strconv.ParseUint(split[1], 10, 32) - if err != nil { - return nil, err + if split := strings.Split(val, "."); len(split) >= 2 { + ctimesec, _ = strconv.ParseUint(split[0], 10, 64) + ctimenanos, _ = strconv.ParseUint(split[1], 10, 32) } } var atimesec, atimenanos uint64 if val, ok := kv["atime"]; ok && val != "" { - split := strings.Split(val, ".") - atimesec, err = strconv.ParseUint(split[0], 10, 64) - if err != nil { - return nil, err - } - atimenanos, err = strconv.ParseUint(split[1], 10, 32) - if err != nil { - return nil, err + if split := strings.Split(val, "."); len(split) >= 2 { + atimesec, _ = strconv.ParseUint(split[0], 10, 64) + atimenanos, _ = strconv.ParseUint(split[1], 10, 32) } } diff --git a/pkg/eosclient/eosgrpc/eosgrpc.go b/pkg/eosclient/eosgrpc/eosgrpc.go index a8b0240931..882bca0613 100644 --- a/pkg/eosclient/eosgrpc/eosgrpc.go +++ b/pkg/eosclient/eosgrpc/eosgrpc.go @@ -438,13 +438,13 @@ func (c *Client) GetFileInfoByInode(ctx context.Context, auth eosclient.Authoriz // Now send the req and see what happens resp, err := c.cl.MD(context.Background(), mdrq) if err != nil { - log.Error().Err(err).Uint64("inode", inode).Str("err", err.Error()) + log.Error().Err(err).Uint64("inode", inode).Str("err", err.Error()).Send() return nil, err } rsp, err := resp.Recv() if err != nil { - log.Error().Err(err).Uint64("inode", inode).Str("err", err.Error()) + log.Error().Err(err).Uint64("inode", inode).Str("err", err.Error()).Send() return nil, err } @@ -1079,7 +1079,7 @@ func (c *Client) Remove(ctx context.Context, auth eosclient.Authorization, path nfo, err := c.GetFileInfoByPath(ctx, auth, path) if err != nil { - log.Warn().Err(err).Str("func", "Remove").Str("path", path).Str("err", err.Error()) + log.Warn().Err(err).Str("func", "Remove").Str("path", path).Str("err", err.Error()).Send() return err } diff --git a/pkg/eosclient/eosgrpc/eoshttp.go b/pkg/eosclient/eosgrpc/eoshttp.go index 8e5e2f2b62..d6f2cbcb53 100644 --- a/pkg/eosclient/eosgrpc/eoshttp.go +++ b/pkg/eosclient/eosgrpc/eoshttp.go @@ -34,6 +34,7 @@ import ( "github.com/cs3org/reva/pkg/eosclient" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" + "github.com/pkg/errors" ) // HTTPOptions to configure the Client. @@ -372,6 +373,12 @@ func (c *EOSHTTPClient) PUTFile(ctx context.Context, remoteuser string, auth eos // Execute the request. I don't like that there is no explicit timeout or buffer control on the input stream log.Debug().Str("func", "PUTFile").Msg("sending req") resp, err := c.cl.Do(req) + if err != nil { + return errors.Wrap(err, "error doing request") + } + if resp != nil { + defer resp.Body.Close() + } // Let's support redirections... and if we retry we retry at the same FST if resp != nil && resp.StatusCode == 307 { @@ -471,6 +478,9 @@ func (c *EOSHTTPClient) Head(ctx context.Context, remoteuser string, auth eoscli log.Error().Str("func", "Head").Str("url", finalurl).Str("err", e.Error()).Msg("") return e } + if resp != nil { + defer resp.Body.Close() + } log.Debug().Str("func", "Head").Str("url", finalurl).Str("resp:", fmt.Sprintf("%#v", resp)).Msg("") if resp == nil { diff --git a/pkg/notification/handler/emailhandler/emailhandler.go b/pkg/notification/handler/emailhandler/emailhandler.go index 3d8f3fc97b..7e7dd7e148 100644 --- a/pkg/notification/handler/emailhandler/emailhandler.go +++ b/pkg/notification/handler/emailhandler/emailhandler.go @@ -43,11 +43,11 @@ type EmailHandler struct { } type config struct { - SMTPAddress string `mapstructure:"smtp_server" docs:";The hostname and port of the SMTP server."` - SenderLogin string `mapstructure:"sender_login" docs:";The email to be used to send mails."` - SenderPassword string `mapstructure:"sender_password" docs:";The sender's password."` - DisableAuth bool `mapstructure:"disable_auth" docs:"false;Whether to disable SMTP auth."` - DefaultSender string `mapstructure:"default_sender" docs:"no-reply@cernbox.cern.ch;Default sender when not specified in the trigger."` + SMTPAddress string `docs:";The hostname and port of the SMTP server." mapstructure:"smtp_server"` + SenderLogin string `docs:";The email to be used to send mails." mapstructure:"sender_login"` + SenderPassword string `docs:";The sender's password." mapstructure:"sender_password"` + DisableAuth bool `docs:"false;Whether to disable SMTP auth." mapstructure:"disable_auth"` + DefaultSender string `docs:"no-reply@cernbox.cern.ch;Default sender when not specified in the trigger." mapstructure:"default_sender"` } func (c *config) ApplyDefaults() { diff --git a/pkg/notification/notificationhelper/notificationhelper.go b/pkg/notification/notificationhelper/notificationhelper.go index c39e97b8a6..80ecf1a1d0 100644 --- a/pkg/notification/notificationhelper/notificationhelper.go +++ b/pkg/notification/notificationhelper/notificationhelper.go @@ -44,10 +44,10 @@ type NotificationHelper struct { // Config contains the configuration for the Notification Helper. type Config struct { - NatsAddress string `mapstructure:"nats_address" docs:";The NATS server address."` - NatsToken string `mapstructure:"nats_token" docs:";The token to authenticate against the NATS server"` - NatsStream string `mapstructure:"nats_stream" docs:"reva-notifications;The notifications NATS stream."` - Templates map[string]interface{} `mapstructure:"templates" docs:";Notification templates for the service."` + NatsAddress string `docs:";The NATS server address." mapstructure:"nats_address"` + NatsToken string `docs:";The token to authenticate against the NATS server" mapstructure:"nats_token"` + NatsStream string `docs:"reva-notifications;The notifications NATS stream." mapstructure:"nats_stream"` + Templates map[string]interface{} `docs:";Notification templates for the service." mapstructure:"templates"` } func defaultConfig() *Config { @@ -130,7 +130,7 @@ func (nh *NotificationHelper) Stop() { return } if err := nh.nc.Drain(); err != nil { - nh.Log.Error().Err(err) + nh.Log.Error().Err(err).Send() } } diff --git a/pkg/notification/template/template.go b/pkg/notification/template/template.go index d1ad5f85ff..4da191c092 100644 --- a/pkg/notification/template/template.go +++ b/pkg/notification/template/template.go @@ -37,11 +37,11 @@ const validTemplateNameRegex = "[a-zA-Z0-9-]" // RegistrationRequest represents a Template registration request. type RegistrationRequest struct { - Name string `mapstructure:"name" json:"name"` - Handler string `mapstructure:"handler" json:"handler"` - BodyTmplPath string `mapstructure:"body_template_path" json:"body_template_path"` - SubjectTmplPath string `mapstructure:"subject_template_path" json:"subject_template_path"` - Persistent bool `mapstructure:"persistent" json:"persistent"` + Name string `json:"name" mapstructure:"name"` + Handler string `json:"handler" mapstructure:"handler"` + BodyTmplPath string `json:"body_template_path" mapstructure:"body_template_path"` + SubjectTmplPath string `json:"subject_template_path" mapstructure:"subject_template_path"` + Persistent bool `json:"persistent" mapstructure:"persistent"` } // Template represents a notification template. diff --git a/pkg/ocm/invite/repository/nextcloud/nextcloud.go b/pkg/ocm/invite/repository/nextcloud/nextcloud.go index 9429af2696..13bfa16c8a 100644 --- a/pkg/ocm/invite/repository/nextcloud/nextcloud.go +++ b/pkg/ocm/invite/repository/nextcloud/nextcloud.go @@ -54,7 +54,7 @@ type Client struct { } type config struct { - BaseURL string `mapstructure:"base_url" default:"http://localhost"` + BaseURL string `default:"http://localhost" mapstructure:"base_url"` APIKey string `mapstructure:"api_key"` GatewaySvc string `mapstructure:"gatewaysvc"` } @@ -107,10 +107,7 @@ func timestampToTime(ctx context.Context, t *types.Timestamp) time.Time { } func (c *Client) convertToInviteToken(ctx context.Context, tkn *apiToken) (*invitepb.InviteToken, error) { - usr, err := conversions.ExtractUserID(ctx, c.GatewayClient, tkn.Initiator) - if err != nil { - return nil, err - } + usr := conversions.ExtractUserID(tkn.Initiator) return &invitepb.InviteToken{ Token: tkn.Token, UserId: usr, diff --git a/pkg/ocm/invite/repository/sql/sql.go b/pkg/ocm/invite/repository/sql/sql.go index dc5b2e6f49..7c81a936e4 100644 --- a/pkg/ocm/invite/repository/sql/sql.go +++ b/pkg/ocm/invite/repository/sql/sql.go @@ -24,14 +24,12 @@ import ( "fmt" "time" - gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" invitepb "github.com/cs3org/go-cs3apis/cs3/ocm/invite/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" conversions "github.com/cs3org/reva/pkg/cbox/utils" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/ocm/invite" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/utils/cfg" "github.com/go-sql-driver/mysql" @@ -53,9 +51,8 @@ func init() { } type mgr struct { - c *config - db *sql.DB - client gatewayv1beta1.GatewayAPIClient + c *config + db *sql.DB } type config struct { @@ -82,15 +79,9 @@ func New(ctx context.Context, m map[string]interface{}) (invite.Repository, erro return nil, errors.Wrap(err, "sql: error opening connection to mysql database") } - gw, err := pool.GetGatewayServiceClient(pool.Endpoint(c.GatewaySvc)) - if err != nil { - return nil, err - } - mgr := mgr{ - c: &c, - db: db, - client: gw, + c: &c, + db: db, } return &mgr, nil } @@ -124,22 +115,18 @@ func (m *mgr) GetToken(ctx context.Context, token string) (*invitepb.InviteToken } return nil, err } - return m.convertToInviteToken(ctx, tkn) + return convertToInviteToken(tkn), nil } -func (m *mgr) convertToInviteToken(ctx context.Context, tkn dbToken) (*invitepb.InviteToken, error) { - user, err := conversions.ExtractUserID(ctx, m.client, tkn.Initiator) - if err != nil { - return nil, err - } +func convertToInviteToken(tkn dbToken) *invitepb.InviteToken { return &invitepb.InviteToken{ Token: tkn.Token, - UserId: user, + UserId: conversions.ExtractUserID(tkn.Initiator), Expiration: &types.Timestamp{ Seconds: uint64(tkn.Expiration.Unix()), }, Description: tkn.Description, - }, nil + } } func (m *mgr) ListTokens(ctx context.Context, initiator *userpb.UserId) ([]*invitepb.InviteToken, error) { @@ -156,11 +143,7 @@ func (m *mgr) ListTokens(ctx context.Context, initiator *userpb.UserId) ([]*invi if err := rows.Scan(&tkn.Token, &tkn.Initiator, &tkn.Expiration, &tkn.Description); err != nil { continue } - token, err := m.convertToInviteToken(ctx, tkn) - if err != nil { - return nil, err - } - tokens = append(tokens, token) + tokens = append(tokens, convertToInviteToken(tkn)) } return tokens, nil diff --git a/pkg/ocm/provider/authorizer/mentix/mentix.go b/pkg/ocm/provider/authorizer/mentix/mentix.go index 88347270f5..b89ee87482 100644 --- a/pkg/ocm/provider/authorizer/mentix/mentix.go +++ b/pkg/ocm/provider/authorizer/mentix/mentix.go @@ -76,7 +76,7 @@ type config struct { Timeout int64 `mapstructure:"timeout"` RefreshInterval int64 `mapstructure:"refresh"` VerifyRequestHostname bool `mapstructure:"verify_request_hostname"` - Insecure bool `mapstructure:"insecure" docs:"false;Whether to skip certificate checks when sending requests."` + Insecure bool `docs:"false;Whether to skip certificate checks when sending requests." mapstructure:"insecure"` } func (c *config) ApplyDefaults() { diff --git a/pkg/ocm/share/repository/nextcloud/nextcloud.go b/pkg/ocm/share/repository/nextcloud/nextcloud.go index 3977af7638..27e70e71b2 100644 --- a/pkg/ocm/share/repository/nextcloud/nextcloud.go +++ b/pkg/ocm/share/repository/nextcloud/nextcloud.go @@ -60,11 +60,11 @@ type Manager struct { // ShareManagerConfig contains config for a Nextcloud-based ShareManager. type ShareManagerConfig struct { - EndPoint string `mapstructure:"endpoint" docs:";The Nextcloud backend endpoint for user check"` + EndPoint string `docs:";The Nextcloud backend endpoint for user check" mapstructure:"endpoint"` SharedSecret string `mapstructure:"shared_secret"` WebDAVHost string `mapstructure:"webdav_host"` MockHTTP bool `mapstructure:"mock_http"` - MountID string `mapstructure:"mount_id" docs:";The Reva mount id to identify the storage provider proxying the EFSS. Note that only one EFSS can be proxied by a given Reva process."` + MountID string `docs:";The Reva mount id to identify the storage provider proxying the EFSS. Note that only one EFSS can be proxied by a given Reva process." mapstructure:"mount_id"` } // Action describes a REST request to forward to the Nextcloud backend. diff --git a/pkg/cbox/preferences/sql/sql.go b/pkg/preferences/sql/sql.go similarity index 100% rename from pkg/cbox/preferences/sql/sql.go rename to pkg/preferences/sql/sql.go diff --git a/pkg/publicshare/manager/loader/loader.go b/pkg/publicshare/manager/loader/loader.go index 4ce46e626a..bf3902a58f 100644 --- a/pkg/publicshare/manager/loader/loader.go +++ b/pkg/publicshare/manager/loader/loader.go @@ -22,5 +22,6 @@ import ( // Load core share manager drivers. _ "github.com/cs3org/reva/pkg/publicshare/manager/json" _ "github.com/cs3org/reva/pkg/publicshare/manager/memory" + _ "github.com/cs3org/reva/pkg/publicshare/manager/sql" // Add your own here. ) diff --git a/pkg/cbox/publicshare/sql/sql.go b/pkg/publicshare/manager/sql/sql.go similarity index 97% rename from pkg/cbox/publicshare/sql/sql.go rename to pkg/publicshare/manager/sql/sql.go index 3602224ab0..9c4e3d3b8d 100644 --- a/pkg/cbox/publicshare/sql/sql.go +++ b/pkg/publicshare/manager/sql/sql.go @@ -288,11 +288,7 @@ func (m *manager) getByToken(ctx context.Context, token string, u *user.User) (* } return nil, "", err } - share, err := conversions.ConvertToCS3PublicShare(ctx, m.client, s) - if err != nil { - return nil, "", err - } - return share, s.ShareWith, nil + return conversions.ConvertToCS3PublicShare(s), s.ShareWith, nil } func (m *manager) getByID(ctx context.Context, id *link.PublicShareId, u *user.User) (*link.PublicShare, string, error) { @@ -305,11 +301,7 @@ func (m *manager) getByID(ctx context.Context, id *link.PublicShareId, u *user.U } return nil, "", err } - share, err := conversions.ConvertToCS3PublicShare(ctx, m.client, s) - if err != nil { - return nil, "", err - } - return share, s.ShareWith, nil + return conversions.ConvertToCS3PublicShare(s), s.ShareWith, nil } func (m *manager) GetPublicShare(ctx context.Context, u *user.User, ref *link.PublicShareReference, sign bool) (*link.PublicShare, error) { @@ -406,10 +398,7 @@ func (m *manager) ListPublicShares(ctx context.Context, u *user.User, filters [] if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.Token, &s.Expiration, &s.ShareName, &s.ID, &s.STime, &s.Permissions, &s.Quicklink, &s.Description, &s.NotifyUploads, &s.NotifyUploadsExtraRecipients); err != nil { continue } - cs3Share, err := conversions.ConvertToCS3PublicShare(ctx, m.client, s) - if err != nil { - return nil, err - } + cs3Share := conversions.ConvertToCS3PublicShare(s) if expired(cs3Share) { _ = m.cleanupExpiredShares() } else { @@ -472,10 +461,7 @@ func (m *manager) GetPublicShareByToken(ctx context.Context, token string, auth } return nil, err } - cs3Share, err := conversions.ConvertToCS3PublicShare(ctx, m.client, s) - if err != nil { - return nil, err - } + cs3Share := conversions.ConvertToCS3PublicShare(s) if expired(cs3Share) { if err := m.cleanupExpiredShares(); err != nil { return nil, err diff --git a/pkg/share/manager/sql/conversions.go b/pkg/share/manager/sql/conversions.go deleted file mode 100644 index a09cae34e5..0000000000 --- a/pkg/share/manager/sql/conversions.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sql - -import ( - "context" - - grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" - "github.com/cs3org/reva/pkg/rgrpc/status" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" -) - -//go:generate mockery -name UserConverter - -// DBShare stores information about user and public shares. -type DBShare struct { - ID string - UIDOwner string - UIDInitiator string - ItemStorage string - ItemSource string - ShareWith string - Token string - Expiration string - Permissions int - ShareType int - ShareName string - STime int - FileTarget string - RejectedBy string - State int -} - -// UserConverter describes an interface for converting user ids to names and back. -type UserConverter interface { - UserNameToUserID(ctx context.Context, username string) (*userpb.UserId, error) - UserIDToUserName(ctx context.Context, userid *userpb.UserId) (string, error) -} - -// GatewayUserConverter converts usernames and ids using the gateway. -type GatewayUserConverter struct { - gwAddr string -} - -// NewGatewayUserConverter returns a instance of GatewayUserConverter. -func NewGatewayUserConverter(gwAddr string) *GatewayUserConverter { - return &GatewayUserConverter{ - gwAddr: gwAddr, - } -} - -// UserIDToUserName converts a user ID to an username. -func (c *GatewayUserConverter) UserIDToUserName(ctx context.Context, userid *userpb.UserId) (string, error) { - gwConn, err := pool.GetGatewayServiceClient(pool.Endpoint(c.gwAddr)) - if err != nil { - return "", err - } - getUserResponse, err := gwConn.GetUser(ctx, &userpb.GetUserRequest{ - UserId: userid, - SkipFetchingUserGroups: true, - }) - if err != nil { - return "", err - } - if getUserResponse.Status.Code != rpc.Code_CODE_OK { - return "", status.NewErrorFromCode(getUserResponse.Status.Code, "gateway") - } - return getUserResponse.User.Username, nil -} - -// UserNameToUserID converts a username to an user ID. -func (c *GatewayUserConverter) UserNameToUserID(ctx context.Context, username string) (*userpb.UserId, error) { - gwConn, err := pool.GetGatewayServiceClient(pool.Endpoint(c.gwAddr)) - if err != nil { - return nil, err - } - getUserResponse, err := gwConn.GetUserByClaim(ctx, &userpb.GetUserByClaimRequest{ - Claim: "username", - Value: username, - SkipFetchingUserGroups: true, - }) - if err != nil { - return nil, err - } - if getUserResponse.Status.Code != rpc.Code_CODE_OK { - return nil, status.NewErrorFromCode(getUserResponse.Status.Code, "gateway") - } - return getUserResponse.User.Id, nil -} - -func (m *mgr) formatGrantee(ctx context.Context, g *provider.Grantee) (int, string, error) { - var granteeType int - var formattedID string - switch g.Type { - case provider.GranteeType_GRANTEE_TYPE_USER: - granteeType = 0 - var err error - formattedID, err = m.userConverter.UserIDToUserName(ctx, g.GetUserId()) - if err != nil { - return 0, "", err - } - case provider.GranteeType_GRANTEE_TYPE_GROUP: - granteeType = 1 - formattedID = formatGroupID(g.GetGroupId()) - default: - granteeType = -1 - } - return granteeType, formattedID, nil -} - -func (m *mgr) extractGrantee(ctx context.Context, t int, g string) (*provider.Grantee, error) { - var grantee provider.Grantee - switch t { - case 0: - userid, err := m.userConverter.UserNameToUserID(ctx, g) - if err != nil { - return nil, err - } - grantee.Type = provider.GranteeType_GRANTEE_TYPE_USER - grantee.Id = &provider.Grantee_UserId{UserId: userid} - case 1: - grantee.Type = provider.GranteeType_GRANTEE_TYPE_GROUP - grantee.Id = &provider.Grantee_GroupId{GroupId: extractGroupID(g)} - default: - grantee.Type = provider.GranteeType_GRANTEE_TYPE_INVALID - } - return &grantee, nil -} - -func resourceTypeToItem(r provider.ResourceType) string { - switch r { - case provider.ResourceType_RESOURCE_TYPE_FILE: - return "file" - case provider.ResourceType_RESOURCE_TYPE_CONTAINER: - return "folder" - case provider.ResourceType_RESOURCE_TYPE_REFERENCE: - return "reference" - case provider.ResourceType_RESOURCE_TYPE_SYMLINK: - return "symlink" - default: - return "" - } -} - -func sharePermToInt(p *provider.ResourcePermissions) int { - return int(conversions.RoleFromResourcePermissions(p).OCSPermissions()) -} - -func intTosharePerm(p int) (*provider.ResourcePermissions, error) { - perms, err := conversions.NewPermissions(p) - if err != nil { - return nil, err - } - - return conversions.RoleFromOCSPermissions(perms).CS3ResourcePermissions(), nil -} - -func intToShareState(g int) collaboration.ShareState { - switch g { - case 0: - return collaboration.ShareState_SHARE_STATE_ACCEPTED - case 1: - return collaboration.ShareState_SHARE_STATE_PENDING - case 2: - return collaboration.ShareState_SHARE_STATE_REJECTED - default: - return collaboration.ShareState_SHARE_STATE_INVALID - } -} - -func formatUserID(u *userpb.UserId) string { - return u.OpaqueId -} - -func formatGroupID(u *grouppb.GroupId) string { - return u.OpaqueId -} - -func extractGroupID(u string) *grouppb.GroupId { - return &grouppb.GroupId{OpaqueId: u} -} - -func (m *mgr) convertToCS3Share(ctx context.Context, s DBShare, storageMountID string) (*collaboration.Share, error) { - ts := &typespb.Timestamp{ - Seconds: uint64(s.STime), - } - permissions, err := intTosharePerm(s.Permissions) - if err != nil { - return nil, err - } - grantee, err := m.extractGrantee(ctx, s.ShareType, s.ShareWith) - if err != nil { - return nil, err - } - owner, err := m.userConverter.UserNameToUserID(ctx, s.UIDOwner) - if err != nil { - return nil, err - } - var creator *userpb.UserId - if s.UIDOwner == s.UIDInitiator { - creator = owner - } else { - creator, err = m.userConverter.UserNameToUserID(ctx, s.UIDOwner) - if err != nil { - return nil, err - } - } - return &collaboration.Share{ - Id: &collaboration.ShareId{ - OpaqueId: s.ID, - }, - ResourceId: &provider.ResourceId{ - StorageId: storageMountID + "!" + s.ItemStorage, - OpaqueId: s.ItemSource, - }, - Permissions: &collaboration.SharePermissions{Permissions: permissions}, - Grantee: grantee, - Owner: owner, - Creator: creator, - Ctime: ts, - Mtime: ts, - }, nil -} - -func (m *mgr) convertToCS3ReceivedShare(ctx context.Context, s DBShare, storageMountID string) (*collaboration.ReceivedShare, error) { - share, err := m.convertToCS3Share(ctx, s, storageMountID) - if err != nil { - return nil, err - } - var state collaboration.ShareState - if s.RejectedBy != "" { - state = collaboration.ShareState_SHARE_STATE_REJECTED - } else { - state = intToShareState(s.State) - } - return &collaboration.ReceivedShare{ - Share: share, - State: state, - }, nil -} diff --git a/pkg/share/manager/sql/mocks/UserConverter.go b/pkg/share/manager/sql/mocks/UserConverter.go deleted file mode 100644 index 48da015a80..0000000000 --- a/pkg/share/manager/sql/mocks/UserConverter.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" -) - -// UserConverter is an autogenerated mock type for the UserConverter type -type UserConverter struct { - mock.Mock -} - -// UserIDToUserName provides a mock function with given fields: ctx, userid -func (_m *UserConverter) UserIDToUserName(ctx context.Context, userid *userv1beta1.UserId) (string, error) { - ret := _m.Called(ctx, userid) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, *userv1beta1.UserId) string); ok { - r0 = rf(ctx, userid) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *userv1beta1.UserId) error); ok { - r1 = rf(ctx, userid) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UserNameToUserID provides a mock function with given fields: ctx, username -func (_m *UserConverter) UserNameToUserID(ctx context.Context, username string) (*userv1beta1.UserId, error) { - ret := _m.Called(ctx, username) - - var r0 *userv1beta1.UserId - if rf, ok := ret.Get(0).(func(context.Context, string) *userv1beta1.UserId); ok { - r0 = rf(ctx, username) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*userv1beta1.UserId) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, username) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/pkg/share/manager/sql/sql.go b/pkg/share/manager/sql/sql.go index 984e0f6425..ca06a838d9 100644 --- a/pkg/share/manager/sql/sql.go +++ b/pkg/share/manager/sql/sql.go @@ -27,11 +27,15 @@ import ( "strings" "time" + gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + conversions "github.com/cs3org/reva/pkg/cbox/utils" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/share" "github.com/cs3org/reva/pkg/share/manager/registry" "github.com/cs3org/reva/pkg/sharedconf" @@ -47,35 +51,37 @@ import ( const ( shareTypeUser = 0 shareTypeGroup = 1 + + projectInstancesPrefix = "newproject" + projectSpaceGroupsPrefix = "cernbox-project-" + projectSpaceAdminGroupsSuffix = "-admins" ) func init() { - registry.Register("oc10-sql", NewMysql) + registry.Register("sql", New) } type config struct { - GatewayAddr string `mapstructure:"gatewaysvc"` - StorageMountID string `mapstructure:"storage_mount_id"` - DBUsername string `mapstructure:"db_username"` - DBPassword string `mapstructure:"db_password"` - DBHost string `mapstructure:"db_host"` - DBPort int `mapstructure:"db_port"` - DBName string `mapstructure:"db_name"` + DBUsername string `mapstructure:"db_username"` + DBPassword string `mapstructure:"db_password"` + DBHost string `mapstructure:"db_host"` + DBPort int `mapstructure:"db_port"` + DBName string `mapstructure:"db_name"` + GatewaySvc string `mapstructure:"gatewaysvc"` } -func (c *config) ApplyDefaults() { - c.GatewayAddr = sharedconf.GetGatewaySVC(c.GatewayAddr) +type mgr struct { + c *config + db *sql.DB + client gatewayv1beta1.GatewayAPIClient } -type mgr struct { - driver string - db *sql.DB - storageMountID string - userConverter UserConverter +func (c *config) ApplyDefaults() { + c.GatewaySvc = sharedconf.GetGatewaySVC(c.GatewaySvc) } -// NewMysql returns a new share manager connection to a mysql database. -func NewMysql(ctx context.Context, m map[string]interface{}) (share.Manager, error) { +// New returns a new share manager. +func New(ctx context.Context, m map[string]interface{}) (share.Manager, error) { var c config if err := cfg.Decode(m, &c); err != nil { return nil, err @@ -86,18 +92,15 @@ func NewMysql(ctx context.Context, m map[string]interface{}) (share.Manager, err return nil, err } - userConverter := NewGatewayUserConverter(c.GatewayAddr) - - return New("mysql", db, c.StorageMountID, userConverter) -} + gw, err := pool.GetGatewayServiceClient(pool.Endpoint(c.GatewaySvc)) + if err != nil { + return nil, err + } -// New returns a new Cache instance connecting to the given sql.DB. -func New(driver string, db *sql.DB, storageMountID string, userConverter UserConverter) (share.Manager, error) { return &mgr{ - driver: driver, - db: db, - storageMountID: storageMountID, - userConverter: userConverter, + c: &c, + db: db, + client: gw, }, nil } @@ -129,17 +132,11 @@ func (m *mgr) Share(ctx context.Context, md *provider.ResourceInfo, g *collabora Seconds: uint64(now), } - owner, err := m.userConverter.UserIDToUserName(ctx, md.Owner) - if err != nil { - return nil, err - } - shareType, shareWith, err := m.formatGrantee(ctx, g.Grantee) - if err != nil { - return nil, err - } - itemType := resourceTypeToItem(md.Type) + shareType, shareWith := conversions.FormatGrantee(g.Grantee) + itemType := conversions.ResourceTypeToItem(md.Type) targetPath := path.Join("/", path.Base(md.Path)) - permissions := sharePermToInt(g.Permissions.Permissions) + permissions := conversions.SharePermToInt(g.Permissions.Permissions) + prefix := md.Id.StorageId itemSource := md.Id.OpaqueId fileSource, err := strconv.ParseUint(itemSource, 10, 64) if err != nil { @@ -148,8 +145,8 @@ func (m *mgr) Share(ctx context.Context, md *provider.ResourceInfo, g *collabora fileSource = 0 } - stmtString := "INSERT INTO oc_share (share_type,uid_owner,uid_initiator,item_type,item_source,file_source,permissions,stime,share_with,file_target) VALUES (?,?,?,?,?,?,?,?,?,?)" - stmtValues := []interface{}{shareType, owner, user.Username, itemType, itemSource, fileSource, permissions, now, shareWith, targetPath} + stmtString := "insert into oc_share set share_type=?,uid_owner=?,uid_initiator=?,item_type=?,fileid_prefix=?,item_source=?,file_source=?,permissions=?,stime=?,share_with=?,file_target=?" + stmtValues := []interface{}{shareType, conversions.FormatUserID(md.Owner), conversions.FormatUserID(user.Id), itemType, prefix, itemSource, fileSource, permissions, now, shareWith, targetPath} stmt, err := m.db.Prepare(stmtString) if err != nil { @@ -178,6 +175,35 @@ func (m *mgr) Share(ctx context.Context, md *provider.ResourceInfo, g *collabora }, nil } +func (m *mgr) getByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.Share, error) { + uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) + s := conversions.DBShare{ID: id.OpaqueId} + query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, stime, permissions, share_type FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND id=? AND (uid_owner=? or uid_initiator=?)" + if err := m.db.QueryRow(query, id.OpaqueId, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.STime, &s.Permissions, &s.ShareType); err != nil { + if err == sql.ErrNoRows { + return nil, errtypes.NotFound(id.OpaqueId) + } + return nil, err + } + return conversions.ConvertToCS3Share(s), nil +} + +func (m *mgr) getByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.Share, error) { + owner := conversions.FormatUserID(key.Owner) + uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) + + s := conversions.DBShare{} + shareType, shareWith := conversions.FormatGrantee(key.Grantee) + query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, id, stime, permissions, share_type FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" + if err := m.db.QueryRow(query, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { + if err == sql.ErrNoRows { + return nil, errtypes.NotFound(key.String()) + } + return nil, err + } + return conversions.ConvertToCS3Share(s), nil +} + func (m *mgr) GetShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.Share, error) { var s *collaboration.Share var err error @@ -198,22 +224,19 @@ func (m *mgr) GetShare(ctx context.Context, ref *collaboration.ShareReference) ( } func (m *mgr) Unshare(ctx context.Context, ref *collaboration.ShareReference) error { - uid := ctxpkg.ContextMustGetUser(ctx).Username + uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) var query string params := []interface{}{} switch { case ref.GetId() != nil: - query = "DELETE FROM oc_share where id=? AND (uid_owner=? or uid_initiator=?)" + query = "delete from oc_share where id=? AND (uid_owner=? or uid_initiator=?)" params = append(params, ref.GetId().OpaqueId, uid, uid) case ref.GetKey() != nil: key := ref.GetKey() - shareType, shareWith, err := m.formatGrantee(ctx, key.Grantee) - if err != nil { - return err - } - owner := formatUserID(key.Owner) - query = "DELETE FROM oc_share WHERE uid_owner=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, owner, key.ResourceId.StorageId, shareType, shareWith, uid, uid) + shareType, shareWith := conversions.FormatGrantee(key.Grantee) + owner := conversions.FormatUserID(key.Owner) + query = "delete from oc_share where uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" + params = append(params, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid) default: return errtypes.NotFound(ref.String()) } @@ -238,8 +261,8 @@ func (m *mgr) Unshare(ctx context.Context, ref *collaboration.ShareReference) er } func (m *mgr) UpdateShare(ctx context.Context, ref *collaboration.ShareReference, p *collaboration.SharePermissions) (*collaboration.Share, error) { - permissions := sharePermToInt(p.Permissions) - uid := ctxpkg.ContextMustGetUser(ctx).Username + permissions := conversions.SharePermToInt(p.Permissions) + uid := conversions.FormatUserID(ctxpkg.ContextMustGetUser(ctx).Id) var query string params := []interface{}{} @@ -249,13 +272,10 @@ func (m *mgr) UpdateShare(ctx context.Context, ref *collaboration.ShareReference params = append(params, permissions, time.Now().Unix(), ref.GetId().OpaqueId, uid, uid) case ref.GetKey() != nil: key := ref.GetKey() - shareType, shareWith, err := m.formatGrantee(ctx, key.Grantee) - if err != nil { - return nil, err - } - owner := formatUserID(key.Owner) - query = "update oc_share set permissions=?,stime=? where (uid_owner=? or uid_initiator=?) AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - params = append(params, permissions, time.Now().Unix(), owner, owner, key.ResourceId.StorageId, shareType, shareWith, uid, uid) + shareType, shareWith := conversions.FormatGrantee(key.Grantee) + owner := conversions.FormatUserID(key.Owner) + query = "update oc_share set permissions=?,stime=? where (uid_owner=? or uid_initiator=?) AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" + params = append(params, permissions, time.Now().Unix(), owner, owner, key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, uid, uid) default: return nil, errtypes.NotFound(ref.String()) } @@ -272,29 +292,31 @@ func (m *mgr) UpdateShare(ctx context.Context, ref *collaboration.ShareReference } func (m *mgr) ListShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.Share, error) { - uid := ctxpkg.ContextMustGetUser(ctx).Username - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, id, stime, permissions, share_type FROM oc_share WHERE (uid_owner=? or uid_initiator=?)" - params := []interface{}{uid, uid} + query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, + coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, + id, stime, permissions, share_type + FROM oc_share WHERE (orphan = 0 or orphan IS NULL) AND (share_type=? OR share_type=?)` + params := []interface{}{shareTypeUser, shareTypeGroup} - var ( - filterQuery string - filterParams []interface{} - err error - ) - if len(filters) == 0 { - filterQuery += "(share_type=? OR share_type=?)" - params = append(params, shareTypeUser) - params = append(params, shareTypeGroup) - } else { - filterQuery, filterParams, err = translateFilters(filters) + groupedFilters := share.GroupFiltersByType(filters) + if len(groupedFilters) > 0 { + filterQuery, filterParams, err := translateFilters(groupedFilters) if err != nil { return nil, err } params = append(params, filterParams...) + if filterQuery != "" { + query = fmt.Sprintf("%s AND (%s)", query, filterQuery) + } } - if filterQuery != "" { - query = fmt.Sprintf("%s AND (%s)", query, filterQuery) + uidOwnersQuery, uidOwnersParams, err := m.uidOwnerFilters(ctx, groupedFilters) + if err != nil { + return nil, err + } + params = append(params, uidOwnersParams...) + if uidOwnersQuery != "" { + query = fmt.Sprintf("%s AND (%s)", query, uidOwnersQuery) } rows, err := m.db.Query(query, params...) @@ -303,17 +325,13 @@ func (m *mgr) ListShares(ctx context.Context, filters []*collaboration.Filter) ( } defer rows.Close() - var s DBShare + var s conversions.DBShare shares := []*collaboration.Share{} for rows.Next() { - if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { + if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { continue } - share, err := m.convertToCS3Share(ctx, s, m.storageMountID) - if err != nil { - return nil, err - } - shares = append(shares, share) + shares = append(shares, conversions.ConvertToCS3Share(s)) } if err = rows.Err(); err != nil { return nil, err @@ -325,27 +343,26 @@ func (m *mgr) ListShares(ctx context.Context, filters []*collaboration.Filter) ( // we list the shares that are targeted to the user in context or to the user groups. func (m *mgr) ListReceivedShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.ReceivedShare, error) { user := ctxpkg.ContextMustGetUser(ctx) - uid := user.Username + uid := conversions.FormatUserID(user.Id) - params := []interface{}{uid, uid, uid} + params := []interface{}{uid, uid, uid, uid} for _, v := range user.Groups { params = append(params, v) } - homeConcat := "" - if m.driver == "mysql" { // mysql upsert - homeConcat = "storages.id = CONCAT('home::', ts.uid_owner)" - } else { // sqlite3 upsert - homeConcat = "storages.id = 'home::' || ts.uid_owner" - } - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, ts.id, stime, permissions, share_type, accepted, storages.numeric_id FROM oc_share ts LEFT JOIN oc_storages storages ON " + homeConcat + " WHERE (uid_owner != ? AND uid_initiator != ?) " + query := `SELECT coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, + coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, + ts.id, stime, permissions, share_type, coalesce(tr.state, 0) as state + FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) + WHERE (orphan = 0 or orphan IS NULL) AND (uid_owner != ? AND uid_initiator != ?)` if len(user.Groups) > 0 { - query += "AND (share_with=? OR share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + "))" + query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" } else { - query += "AND (share_with=?)" + query += " AND (share_with=? AND share_type = 0)" } - filterQuery, filterParams, err := translateFilters(filters) + groupedFilters := share.GroupFiltersByType(filters) + filterQuery, filterParams, err := translateFilters(groupedFilters) if err != nil { return nil, err } @@ -361,17 +378,13 @@ func (m *mgr) ListReceivedShares(ctx context.Context, filters []*collaboration.F } defer rows.Close() - var s DBShare + var s conversions.DBShare shares := []*collaboration.ReceivedShare{} for rows.Next() { - if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State, &s.ItemStorage); err != nil { + if err := rows.Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { continue } - share, err := m.convertToCS3ReceivedShare(ctx, s, m.storageMountID) - if err != nil { - return nil, err - } - shares = append(shares, share) + shares = append(shares, conversions.ConvertToCS3ReceivedShare(s)) } if err = rows.Err(); err != nil { return nil, err @@ -380,6 +393,67 @@ func (m *mgr) ListReceivedShares(ctx context.Context, filters []*collaboration.F return shares, nil } +func (m *mgr) getReceivedByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.ReceivedShare, error) { + user := ctxpkg.ContextMustGetUser(ctx) + uid := conversions.FormatUserID(user.Id) + + params := []interface{}{uid, id.OpaqueId, uid} + for _, v := range user.Groups { + params = append(params, v) + } + + s := conversions.DBShare{ID: id.OpaqueId} + query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, + coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, + stime, permissions, share_type, coalesce(tr.state, 0) as state + FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) + WHERE (orphan = 0 or orphan IS NULL) AND ts.id=?` + if len(user.Groups) > 0 { + query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" + } else { + query += " AND (share_with=? AND share_type = 0)" + } + if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { + if err == sql.ErrNoRows { + return nil, errtypes.NotFound(id.OpaqueId) + } + return nil, err + } + return conversions.ConvertToCS3ReceivedShare(s), nil +} + +func (m *mgr) getReceivedByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.ReceivedShare, error) { + user := ctxpkg.ContextMustGetUser(ctx) + uid := conversions.FormatUserID(user.Id) + + shareType, shareWith := conversions.FormatGrantee(key.Grantee) + params := []interface{}{uid, conversions.FormatUserID(key.Owner), key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, shareWith} + for _, v := range user.Groups { + params = append(params, v) + } + + s := conversions.DBShare{} + query := `select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, + coalesce(fileid_prefix, '') as fileid_prefix, coalesce(item_source, '') as item_source, coalesce(item_type, '') as item_type, + ts.id, stime, permissions, share_type, coalesce(tr.state, 0) as state + FROM oc_share ts LEFT JOIN oc_share_status tr ON (ts.id = tr.id AND tr.recipient = ?) + WHERE (orphan = 0 or orphan IS NULL) AND uid_owner=? AND fileid_prefix=? AND item_source=? AND share_type=? AND share_with=?` + if len(user.Groups) > 0 { + query += " AND ((share_with=? AND share_type = 0) OR (share_type = 1 AND share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + ")))" + } else { + query += " AND (share_with=? AND share_type = 0)" + } + + if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.Prefix, &s.ItemSource, &s.ItemType, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { + if err == sql.ErrNoRows { + return nil, errtypes.NotFound(key.String()) + } + return nil, err + } + + return conversions.ConvertToCS3ReceivedShare(s), nil +} + func (m *mgr) GetReceivedShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.ReceivedShare, error) { var s *collaboration.ReceivedShare var err error @@ -400,6 +474,8 @@ func (m *mgr) GetReceivedShare(ctx context.Context, ref *collaboration.ShareRefe } func (m *mgr) UpdateReceivedShare(ctx context.Context, share *collaboration.ReceivedShare, fieldMask *field_mask.FieldMask) (*collaboration.ReceivedShare, error) { + user := ctxpkg.ContextMustGetUser(ctx) + rs, err := m.GetReceivedShare(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{Id: share.Share.Id}}) if err != nil { return nil, err @@ -409,122 +485,82 @@ func (m *mgr) UpdateReceivedShare(ctx context.Context, share *collaboration.Rece switch fieldMask.Paths[i] { case "state": rs.State = share.State - // TODO case "mount_point": default: return nil, errtypes.NotSupported("updating " + fieldMask.Paths[i] + " is not supported") } } - var queryAccept string + state := 0 switch rs.GetState() { case collaboration.ShareState_SHARE_STATE_REJECTED: - queryAccept = "update oc_share set accepted=2 where id=?" + state = -1 case collaboration.ShareState_SHARE_STATE_ACCEPTED: - queryAccept = "update oc_share set accepted=0 where id=?" - } - - if queryAccept != "" { - stmt, err := m.db.Prepare(queryAccept) - if err != nil { - return nil, err - } - _, err = stmt.Exec(rs.Share.Id.OpaqueId) - if err != nil { - return nil, err - } + state = 1 } - return rs, nil -} - -func (m *mgr) getByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.Share, error) { - uid := ctxpkg.ContextMustGetUser(ctx).Username - s := DBShare{ID: id.OpaqueId} - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, stime, permissions, share_type FROM oc_share WHERE id=? AND (uid_owner=? or uid_initiator=?)" - if err := m.db.QueryRow(query, id.OpaqueId, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.STime, &s.Permissions, &s.ShareType); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(id.OpaqueId) - } - return nil, err - } - return m.convertToCS3Share(ctx, s, m.storageMountID) -} + params := []interface{}{rs.Share.Id.OpaqueId, conversions.FormatUserID(user.Id), state, state} + query := "insert into oc_share_status(id, recipient, state) values(?, ?, ?) ON DUPLICATE KEY UPDATE state = ?" -func (m *mgr) getByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.Share, error) { - owner, err := m.userConverter.UserIDToUserName(ctx, key.Owner) + stmt, err := m.db.Prepare(query) if err != nil { return nil, err } - uid := ctxpkg.ContextMustGetUser(ctx).Username - - s := DBShare{} - shareType, shareWith, err := m.formatGrantee(ctx, key.Grantee) + _, err = stmt.Exec(params...) if err != nil { return nil, err } - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, id, stime, permissions, share_type FROM oc_share WHERE uid_owner=? AND item_source=? AND share_type=? AND share_with=? AND (uid_owner=? or uid_initiator=?)" - if err = m.db.QueryRow(query, owner, key.ResourceId.StorageId, shareType, shareWith, uid, uid).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.ID, &s.STime, &s.Permissions, &s.ShareType); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(key.String()) - } - return nil, err - } - return m.convertToCS3Share(ctx, s, m.storageMountID) -} - -func (m *mgr) getReceivedByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.ReceivedShare, error) { - user := ctxpkg.ContextMustGetUser(ctx) - uid := user.Username - - params := []interface{}{id.OpaqueId, uid} - for _, v := range user.Groups { - params = append(params, v) - } - s := DBShare{ID: id.OpaqueId} - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, stime, permissions, share_type, accepted FROM oc_share ts WHERE ts.id=? " - if len(user.Groups) > 0 { - query += "AND (share_with=? OR share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + "))" - } else { - query += "AND (share_with=?)" - } - if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(id.OpaqueId) - } - return nil, err - } - return m.convertToCS3ReceivedShare(ctx, s, m.storageMountID) + return rs, nil } -func (m *mgr) getReceivedByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.ReceivedShare, error) { +func (m *mgr) uidOwnerFilters(ctx context.Context, filters map[collaboration.Filter_Type][]*collaboration.Filter) (string, []interface{}, error) { user := ctxpkg.ContextMustGetUser(ctx) - uid := user.Username + uid := conversions.FormatUserID(user.Id) - shareType, shareWith, err := m.formatGrantee(ctx, key.Grantee) + query := "uid_owner=? or uid_initiator=?" + params := []interface{}{uid, uid} + + client, err := pool.GetGatewayServiceClient(pool.Endpoint(m.c.GatewaySvc)) if err != nil { - return nil, err - } - params := []interface{}{uid, formatUserID(key.Owner), key.ResourceId.StorageId, key.ResourceId.OpaqueId, shareType, shareWith, shareWith} - for _, v := range user.Groups { - params = append(params, v) + return "", nil, err } - s := DBShare{} - query := "select coalesce(uid_owner, '') as uid_owner, coalesce(uid_initiator, '') as uid_initiator, coalesce(share_with, '') as share_with, coalesce(item_source, '') as item_source, ts.id, stime, permissions, share_type, accepted FROM oc_share ts WHERE uid_owner=? AND item_source=? AND share_type=? AND share_with=? " - if len(user.Groups) > 0 { - query += "AND (share_with=? OR share_with in (?" + strings.Repeat(",?", len(user.Groups)-1) + "))" - } else { - query += "AND (share_with=?)" - } + if resourceFilters, ok := filters[collaboration.Filter_TYPE_RESOURCE_ID]; ok { + for _, f := range resourceFilters { + // For shares inside project spaces, if the user is an admin, we try to list all shares created by other admins + if strings.HasPrefix(f.GetResourceId().GetStorageId(), projectInstancesPrefix) { + res, err := client.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: f.GetResourceId()}}) + if err != nil || res.Status.Code != rpc.Code_CODE_OK { + continue + } - if err := m.db.QueryRow(query, params...).Scan(&s.UIDOwner, &s.UIDInitiator, &s.ShareWith, &s.ItemSource, &s.ID, &s.STime, &s.Permissions, &s.ShareType, &s.State); err != nil { - if err == sql.ErrNoRows { - return nil, errtypes.NotFound(key.String()) + // The path will look like /eos/project/c/cernbox, we need to extract the project name + parts := strings.SplitN(res.Info.Path, "/", 6) + if len(parts) < 5 { + continue + } + + adminGroup := projectSpaceGroupsPrefix + parts[4] + projectSpaceAdminGroupsSuffix + for _, g := range user.Groups { + if g == adminGroup { + // User belongs to the admin group, list all shares for the resource + + // TODO: this only works if shares for a single project are requested. + // If shares for multiple projects are requested, then we're not checking if the + // user is an admin for all of those. We can append the query ` or uid_owner=?` + // for all the project owners, which works fine for new reva + // but won't work for revaold since there, we store the uid of the share creator as uid_owner. + // For this to work across the two versions, this change would have to be made in revaold + // but it won't be straightforward as there, the storage provider doesn't return the + // resource owners. + return "", []interface{}{}, nil + } + } + } } - return nil, err } - return m.convertToCS3ReceivedShare(ctx, s, m.storageMountID) + + return query, params, nil } func granteeTypeToShareType(granteeType provider.GranteeType) int { @@ -538,49 +574,48 @@ func granteeTypeToShareType(granteeType provider.GranteeType) int { } // translateFilters translates the filters to sql queries. -func translateFilters(filters []*collaboration.Filter) (string, []interface{}, error) { +func translateFilters(filters map[collaboration.Filter_Type][]*collaboration.Filter) (string, []interface{}, error) { var ( filterQuery string params []interface{} ) - groupedFilters := share.GroupFiltersByType(filters) // If multiple filters of the same type are passed to this function, they need to be combined with the `OR` operator. // That is why the filters got grouped by type. // For every given filter type, iterate over the filters and if there are more than one combine them. // Combine the different filter types using `AND` var filterCounter = 0 - for filterType, filters := range groupedFilters { + for filterType, currFilters := range filters { switch filterType { case collaboration.Filter_TYPE_RESOURCE_ID: filterQuery += "(" - for i, f := range filters { - filterQuery += "item_source=?" - params = append(params, f.GetResourceId().OpaqueId) + for i, f := range currFilters { + filterQuery += "(fileid_prefix =? AND item_source=?)" + params = append(params, f.GetResourceId().StorageId, f.GetResourceId().OpaqueId) - if i != len(filters)-1 { + if i != len(currFilters)-1 { filterQuery += " OR " } } filterQuery += ")" case collaboration.Filter_TYPE_GRANTEE_TYPE: filterQuery += "(" - for i, f := range filters { + for i, f := range currFilters { filterQuery += "share_type=?" params = append(params, granteeTypeToShareType(f.GetGranteeType())) - if i != len(filters)-1 { + if i != len(currFilters)-1 { filterQuery += " OR " } } filterQuery += ")" case collaboration.Filter_TYPE_EXCLUDE_DENIALS: // TODO this may change once the mapping of permission to share types is completed (cf. pkg/cbox/utils/conversions.go) - filterQuery += "permissions > 0" + filterQuery += "(permissions > 0)" default: return "", nil, fmt.Errorf("filter type is not supported") } - if filterCounter != len(groupedFilters)-1 { + if filterCounter != len(filters)-1 { filterQuery += " AND " } filterCounter++ diff --git a/pkg/share/manager/sql/sql_suite_test.go b/pkg/share/manager/sql/sql_suite_test.go deleted file mode 100644 index e3c6fbd8e9..0000000000 --- a/pkg/share/manager/sql/sql_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sql_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestSql(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Sql Suite") -} diff --git a/pkg/share/manager/sql/sql_test.go b/pkg/share/manager/sql/sql_test.go deleted file mode 100644 index bd70c9a358..0000000000 --- a/pkg/share/manager/sql/sql_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package sql_test - -import ( - "context" - "database/sql" - "os" - - user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ruser "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/share" - sqlmanager "github.com/cs3org/reva/pkg/share/manager/sql" - mocks "github.com/cs3org/reva/pkg/share/manager/sql/mocks" - _ "github.com/mattn/go-sqlite3" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stretchr/testify/mock" - "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -var _ = Describe("SQL manager", func() { - var ( - mgr share.Manager - ctx context.Context - testDBFile *os.File - - loginAs = func(user *user.User) { - ctx = ruser.ContextSetUser(context.Background(), user) - } - admin = &user.User{ - Id: &user.UserId{ - Idp: "idp", - OpaqueId: "userid", - Type: user.UserType_USER_TYPE_PRIMARY, - }, - Username: "admin", - } - otherUser = &user.User{ - Id: &user.UserId{ - Idp: "idp", - OpaqueId: "userid", - Type: user.UserType_USER_TYPE_PRIMARY, - }, - Username: "einstein", - } - - shareRef = &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{ - Id: &collaboration.ShareId{ - OpaqueId: "1", - }, - }} - ) - - AfterEach(func() { - os.Remove(testDBFile.Name()) - }) - - BeforeEach(func() { - var err error - testDBFile, err = os.CreateTemp("", "example") - Expect(err).ToNot(HaveOccurred()) - - dbData, err := os.ReadFile("test.db") - Expect(err).ToNot(HaveOccurred()) - - _, err = testDBFile.Write(dbData) - Expect(err).ToNot(HaveOccurred()) - err = testDBFile.Close() - Expect(err).ToNot(HaveOccurred()) - - sqldb, err := sql.Open("sqlite3", testDBFile.Name()) - Expect(err).ToNot(HaveOccurred()) - - userConverter := &mocks.UserConverter{} - userConverter.On("UserIDToUserName", mock.Anything, mock.Anything).Return("username", nil) - userConverter.On("UserNameToUserID", mock.Anything, mock.Anything).Return( - func(_ context.Context, username string) *user.UserId { - return &user.UserId{ - OpaqueId: username, - } - }, - func(_ context.Context, username string) error { return nil }) - mgr, err = sqlmanager.New("sqlite3", sqldb, "abcde", userConverter) - Expect(err).ToNot(HaveOccurred()) - - loginAs(admin) - }) - - It("creates manager instances", func() { - Expect(mgr).ToNot(BeNil()) - }) - - Describe("GetShare", func() { - It("returns the share", func() { - share, err := mgr.GetShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - }) - - It("returns an error if the share does not exis", func() { - share, err := mgr.GetShare(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{ - Id: &collaboration.ShareId{ - OpaqueId: "2", - }, - }}) - Expect(err).To(HaveOccurred()) - Expect(share).To(BeNil()) - }) - }) - - Describe("Share", func() { - It("creates a share", func() { - grant := &collaboration.ShareGrant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{UserId: &user.UserId{ - OpaqueId: "someone", - }}, - }, - Permissions: &collaboration.SharePermissions{ - Permissions: &provider.ResourcePermissions{ - GetPath: true, - InitiateFileDownload: true, - ListFileVersions: true, - ListContainer: true, - Stat: true, - }, - }, - } - info := &provider.ResourceInfo{ - Id: &provider.ResourceId{ - StorageId: "/", - OpaqueId: "something", - }, - } - share, err := mgr.Share(ctx, info, grant) - - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - Expect(share.Id.OpaqueId).To(Equal("2")) - }) - }) - - Describe("ListShares", func() { - It("lists shares", func() { - shares, err := mgr.ListShares(ctx, []*collaboration.Filter{}) - Expect(err).ToNot(HaveOccurred()) - Expect(len(shares)).To(Equal(1)) - - shares, err = mgr.ListShares(ctx, []*collaboration.Filter{ - share.ResourceIDFilter(&provider.ResourceId{ - StorageId: "/", - OpaqueId: "somethingElse", - }), - }) - Expect(err).ToNot(HaveOccurred()) - Expect(len(shares)).To(Equal(0)) - }) - }) - - Describe("ListReceivedShares", func() { - It("lists received shares", func() { - loginAs(otherUser) - shares, err := mgr.ListReceivedShares(ctx, []*collaboration.Filter{}) - Expect(err).ToNot(HaveOccurred()) - Expect(len(shares)).To(Equal(1)) - }) - }) - - Describe("GetReceivedShare", func() { - It("returns the received share", func() { - loginAs(otherUser) - share, err := mgr.GetReceivedShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - }) - }) - - Describe("UpdateReceivedShare", func() { - It("returns an error when no valid field is set in the mask", func() { - loginAs(otherUser) - - share, err := mgr.GetReceivedShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - Expect(share.State).To(Equal(collaboration.ShareState_SHARE_STATE_ACCEPTED)) - - share.State = collaboration.ShareState_SHARE_STATE_REJECTED - _, err = mgr.UpdateReceivedShare(ctx, share, &fieldmaskpb.FieldMask{Paths: []string{"foo"}}) - Expect(err).To(HaveOccurred()) - }) - - It("updates the state when the state is set in the mask", func() { - loginAs(otherUser) - - share, err := mgr.GetReceivedShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - Expect(share.State).To(Equal(collaboration.ShareState_SHARE_STATE_ACCEPTED)) - - share.State = collaboration.ShareState_SHARE_STATE_REJECTED - share, err = mgr.UpdateReceivedShare(ctx, share, &fieldmaskpb.FieldMask{Paths: []string{"state"}}) - Expect(err).ToNot(HaveOccurred()) - Expect(share.State).To(Equal(collaboration.ShareState_SHARE_STATE_REJECTED)) - - share, err = mgr.GetReceivedShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share).ToNot(BeNil()) - Expect(share.State).To(Equal(collaboration.ShareState_SHARE_STATE_REJECTED)) - }) - }) - - Describe("Unshare", func() { - It("deletes shares", func() { - loginAs(otherUser) - shares, err := mgr.ListReceivedShares(ctx, []*collaboration.Filter{}) - Expect(err).ToNot(HaveOccurred()) - Expect(len(shares)).To(Equal(1)) - - loginAs(admin) - err = mgr.Unshare(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{ - Id: &collaboration.ShareId{ - OpaqueId: shares[0].Share.Id.OpaqueId, - }, - }}) - Expect(err).ToNot(HaveOccurred()) - - loginAs(otherUser) - shares, err = mgr.ListReceivedShares(ctx, []*collaboration.Filter{}) - Expect(err).ToNot(HaveOccurred()) - Expect(len(shares)).To(Equal(0)) - }) - }) - - Describe("UpdateShare", func() { - It("updates permissions", func() { - share, err := mgr.GetShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share.Permissions.Permissions.Delete).To(BeTrue()) - - share, err = mgr.UpdateShare(ctx, shareRef, &collaboration.SharePermissions{ - Permissions: &provider.ResourcePermissions{ - InitiateFileUpload: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - }}) - Expect(err).ToNot(HaveOccurred()) - Expect(share.Permissions.Permissions.Delete).To(BeFalse()) - - share, err = mgr.GetShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(share.Permissions.Permissions.Delete).To(BeFalse()) - }) - }) -}) diff --git a/pkg/share/manager/sql/test.db b/pkg/share/manager/sql/test.db deleted file mode 100644 index fba76fdcc4fc7816bf2b281f9c2074bf1955842c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 655360 zcmeFa3w#^*b?1v0i8n^FM8T9q36e}xuqYCPH>AiGMN+mbiL_%kfpSg^n+z^=KRl`|M`Fa=l`EMe*iPjJ$p`+g;+&u);Kwqa%^)rosN^S zn8V?4(*Hj||Ld<7ZFuw@`m2<0^l~~T&VJ_rY5ADnQgpwc`5N=L%$J$JU_Q(I3G)Y< za3doq2LTWO0T2KI5C8!X009sH0T2KI5Lk=AYpa7)Bq~|^g;(h9sVC_zeS+Q&j?>%D zVS3xagz24jC;hd2$r~}uruO5s+86xS*(aWCyP?OeM#(cDFmO8sKGdS(I0bwQGz^Yd_Z!1sSI<;ML~2l>Fbte3OIu8G6GD1V8`;KmY_l z00ck)1V8`;KmY_l;5G>qT|q}?WMstQJ9@$|i1n6CZzqKtT&+u9U7jq{&ZM%+(3;fsr=3BEBN~l5JV$eWu3QuAmN)PFhBmUJY6qxQ zF6Pf2%vb0QFAx9$5C8!X009sH0T2KI5C8!X0D*g)K#0D6pf4S`eD0{WWPtqt9`9DT z4+0js*?sDdPRr`oF7*#$ z)NgdEzeW}7i&}Z{x+pIdODiwE=^&dvcYzfth zoo{-R?$EsfzvA8~eXn&iMee<%vK!r}xKret0w)U^LDR{ObPdOD8&(I?exKpT`SA9P z=Ettv@gt>no^-uFU6tD9*mJ#6sK$oHrbu3hvC(v*YNPA@b)P!%5KZU zr0U@_OOh_UZ|;~cjz`oOUb-UZ+;U>LOo$<;}!Rs19tdTD8_eCI5FaA9m0`yg&d1KmY_l00ck)1V8`;KmY_l00jP}30!je9UjlH zvZSmmCUf2oaoW1DzV52#^S8Z}tmOZ0CI9~n^UGKR_?PZv)DHq600JNY0w4eaAOHd& z00JQJz!Mm84?1#Azf->};4wY#pIGKAZr5IyTI{zic0KGIbg+KES*m_}N;y)y|Nj*S z^L@;f2c9tifB*=900@8p2!H?xfB*=900@A<9TPa}@;VCiH~@d6Db{(h!Btz6$`7hq zlWHI|au@zGuN1*)5$D`f<@24HiA22_C$4!I*5C8!X009sH0T2KI5C8!X z009sHfxAoKkf$(WT^q10CFn~Cu7jQv!`9^noAm?b^Z&Dh6AtFjm|tK%$b1{qV&)l^ z8H#)%@@tWQA9*dZ5SfYW3;(b1$HE^Bza!iTp9}8|{Z;69LLUizN2n2cDRe9p3%P>7 z9sFqUyMv3tv%&G8Bk-BPuLS;W;4Ohy0&HNH|EvCA_y2qUd;Glrr2nArtG++-{fh5H zzHjvjzB%6sU(EXz?{9m5%KL8bi{8h*4-fvs;2#eDUxV)-Tp3i{!V3gI00cl_OAy#| z*26}_?FK!KR;bgBmntI9$s(;-uQY_#_|qPCGTL9Lt~FPyLbqs99!XKQ)nfc-nJd&kj?|w4FTVIU0SU z(Ug`&rO9Nm$TdW@hDK9Zrh6L|7n@SMQEUn=xhe7r#mA;RM<=7b$Jo@InMmwA={dS* zN3X6~|Hu=bvFILaA9Xcv>~YW7WVEZ;ww~mk6P~d>JGvCjN|-lzhNIyc*SspoYSV^} zdxnRjnm~I`Ok}CAk9meCJ7Sw^vJ;8@1<&x&j)wL_Mp?K)w|Z0hJUNk?VE5-e!$*cS zo04X(fibHO&eRq^N=enn#`R)v#SDS8Q;;)vN1D!9kSl_2dc>Z?TQ zw8Xw6N|5&In$^S}qaf|TQ+~`!qeQL4(x5oS>9A&@qtRCt^*;^uJ%^P*?b0+WdU(up zL~oA~H(GNZ^&HVFFiPyIpja3g^&HWwFm%mIc0b~o)GMh`=yFHP;ilX^WCWwWLO2h2!NhDo!|ReQn3%t+8x`f5u|)J+VT?3n4Oc&Bg4wnY^wCtP60tv^TW#2Y-*VeYLZrkP*!WU4Zew8 zo{UjZm!eITG$$V%@??xEyY$Vq3bX=$~=}P_bT-CYRYstxPMLXe>3xw|Ry}Pgsspo8L8T zI>#v5k1$ln5ktzu0o3rel~ z1D??+`BATki$sO47J48*>i3LJMy(2lQ+j2}wMk;P&ojDbhgHwSzSrv+i5}vmntFdi zwSvwi!#JToIOrKsYS2n;tkbNxYrr$2Sl4t+%ey@^gKh6>n?jwM$maIAsn!GZ{Qz;9 zE+TrGb3VwY2?RM#;pRX+be>-{SS^B2s2Wj@CI0IdL2naj*cW`r4x zd@k~G{D0;DAO2tP zf5iWN{ZJu3oj4=0T2KI5CDO@Ltrnh;W+yj zUet?q-yzUrYGPFM`<1$~AmErxZaj^PP4l6rBf zrjM$T)LYbNd|ZvBUbT+-BWfgRh1P|V$J9vblJ^;otC7^D)3JJZm{vucT8Yums?TGz z(lOav(r5Y6J+vOutJSeRx`!4+I%SqM4AuH0W3(93U)E>;&BO1LG zU47+d*yr)6KVDbgmXmKzX9OWxsbn$&UrLs9g(RO$l?tUozEa^xz`Z$F$(4k1rkqLT zlj$;>Or%*ZQA%W!rF=GD7IJS+r7NjQk`)RiE}70Ia%>@+re~Vwl5A4Qb9t_u&14ev z?Sm94RD^7)T;UU3flDXYR7R-eg$&16(%HPgB@0}xP);TC<#MW!&2t=EN#-l1G%v8Z zLONSwOG%-^R?-!=oKFk%%lRyyP4cPp(yL;%%1zSOIbw%jWZC1fb7H-HBX%R7EoRg4 z*i@rY6l50th8k0(=b9}nYlVdDb;WIflT_(5qLb=2h3Yh{YRI+R?ndVYi zHlHZ4*;KN?XA^89EeNcT%(1ydC0*tcxpX?qrE*l7OIFx)CPyUsLZOgKaG6Rn$7O^< zCXvsjIW}L)=jj;&AE*C!DWCs4nO7b34=)e^0T2KI5C8!X009sH0T2KI5CDO@PGHsJ zxf%BVORobcpZ`0y-Sx)7AP9f}2!H?xfB*=900@8p2!H?xfWQ_afc$?8bp~7k0T2KI z5C8!X009sH0T2KI5CDO15CP@;|M>j>8w3|-K>!3m00ck)1V8`;KmY_l00cl_ix3D$ z{?NfV{SGD{`An!8`Cz2s|9v0p4Gg?@;04cBPtx^@>k%sc1^VxGy{5MhMqgl^4zXSq zZnUmdMOi4)<2)pFzbNrVo~sJ=GS`$!S6){IbWr3E2T#A=V~yi(KFoNdEbF}WlB_&{ z%HkQn#MKvtR=;H1^o7}}i?gwdQ>V_&#uoY&7Gj43vDkuGUWn17dW1!x8H>%$UyRLN zI(IJi%!M=Or!HKMot?cLo4Ryy{>&ViJ3l*jF)&WYw435WY?*6P<;}y%OeP+inLRyq z>6~`3s?u7i^RjeRsM}fBwOLhFhivskcSKc?8lC5@n6`}acrG#+Jv+KiA1cphQ9UWL z1i{#2Ze0j_qobqFZ&}ot8f8|!NFPT-?M?_PjnxS%x9-pzWRM&iQ`=6D?-1+7I#&}+ zeU$=n=^HqtSSVXsvHB2f!s~XkJ8RfbuwXD-=x^KxuVr}P?8+)34(%*)J z)&#mUYEi$l=O)$pCAw9~hPs#L&OCc*Hgz&neJ{K!2GwY| zHDpACUZzBYz8&h12EC{g4a(zfEPb0wu8mJ~g2!c1S`MuF2N1SgTHc(lL6kF3m)>TkwzAOTn_@$GD4yAIcRC9iq1@H3 zan&E{vYU_iywP0FdCQ!vOpN9^z(T$w^smRiqjLwnunG9LaVD_1|N2e_w+?%~(RkeX zEkPqRJ5MDl)~+d$Y3}*@!I*^|A~jB|7VSLQ;QYwdt`%jmMh{QqY7KK1)8|=ZLe}O- zMO%4@B+Zg7y4u*3zBcn3leBUQ)~%~^u40p`R0N(LQ>Z+A*$hKB;$obA%Z&z9R$-8DJ>9eigXRo7|Hs3=u zCZ#4l+G@kDvFVon8K$p0`cyXCa}5fvwPRot{nb&?`^q?}bj1?Iaf@5*z5eKPx}(-- zt4~q6pStO{78iX=Rk2Rzf4y;QDA8p(1!QqE)H|xSVWL-_2*hu_$*o4^x1KPglBZ|6 z$;uTr$7`D&o2b-E?qDVrl*z_yvHkf(8OrK}q=V@nf>nv0I$GR>V&|l2cWtQy(XS~M zuC*nZGo4c|YWj4PTfZ08%he5siE_($b*9ss(KjhA(GBX%`q)I$a*3=?NHn#|q@ian zdcYcPsZ*IW(EeMXAU{n&Ik1fC?+l8FqrXd_AoUA=|s^0Wp_R$ z!+XXxs9a86RwJ&Hg|>uiZK2K!J-hC%$m=OQtX$uW(~QkykP zC~Nz!E~`qa*lDL$dbkfoywtmQ-&4+p;A5pcf6<-6qDHt@fDf6Mht z1E-yDiF`P8GVnzz`oZA83w)xJBM08^ywKVFXZ%A>{Qf^+Sj;P0YrNPG?FFSadfF$=O3GQQRFz7`*f*xFOSgySKO$qc2ZZl48C%~5Ys%1tp!#w`ln|FGnCr&uuXHS3y-NA$5$fRyW+|s;b0QIhtlS)RJY6 zUs9jcx{m4^0jiIRt`k~UCmFfp{Lo;OKjB>atfo`w3MV!1w=H@2wG$7iYbEb6PA`TY zojZ)}p|#`o4q~;djT;1_ZHU=-?k0x7Z6va{u|%Nb`bI>olHt56)oIWeSN44YOHTo6 zkP2Pln$ug-ZfMa%#p*4(x+hgLT=g*Q9#_T{R##^k0h!ye!6Kc`L|c3^_IBOQdOalu z-G^%#KF;38^O~{BuN-BG;m$GJ*No(j!X3o3-nzSRF4Ij}&z{E2r(QL$ zF`4uQ#T}gOl+Oo@nCi~V@-|bu`aCR&=i3xl$~i-ySdD-(PUed5n^J5m?bkC4`_aym zu@;^4dG#^wX01Ai?(D(B)revBQta8gdO>z`eptOsc+CvV&J((dO}YnoQGI-3Qw>i$ z`QH8@wNvYas-6)UpsXpF0l)JhgjRErLR3(ul!chSWhffG)XB5EJAOUeodc|WwvM_n zv*`YhFIa0d_Q)LNbFWn%*4CJI7(=~-L>c3q%{^;!yMs}!&68bghsH=WplR12vec+a zlsezWA^eh9r4{-*wSkr<%p))6^f%ch3+9#Q1p;K? z$1&^IX|X~{xuQu{M4)#=kNIaU&z@=}>wY0)=Q6+zJu96C*}=_#ve<@f(C=Eoe&|3hzh zfdB}A00@8p2!H?xfB*=900@8p2y6iYhdhN5r~NX5zI@<1>Nzp#x9D`O5%ety;Qs$x zpj+Vr2!H?xfB*=900@8p2!H?xfB*=5g9+gN|KDK1Fb@JC00JNY0w4eaAOHd&00JNY z0$YH9`uqRLjDyM18(ts)0w4eaAOHd&00JNY0w4eaAg~UB)rQOO@ZS`2p7X9#vrnFx zJNEqa)PX-{*tMAvMSLnM3?9$kH_+fd?GM=e(KCQflm}F zd?Ay{3t7iUS5MPJ@%%TV?A>azdMf~>z1l|Sucf7`ysw%4ZBg|)4GD9lL?y|mletnl z$(G9LOgWX!m-G2Th0Pbz>73&|O8)O>4mg-EF@MDT8uRa%?_pM%E6fau@B#r4009sH z0T2KI5C8!X009sHfd_}cfw0%%cSbWlm%65)FHKmN6dnnwD#v{;by31l>0Xl<3MyL6 zsLwU0YbfdcLjgs+eZ=QFZOC`{71<7w(Q1gU9P^OSPmZXS*ewTacPkpqe&3v0s#uS> z6w&sm@3bakoXY-=sE-V*O8EW%gA>YVDF}c72!H?xfB*=900@8p2!OzYO91)*gWLCL zIS7CN2!H?xfB*=900@8p2!OzYLjd>xdvN+1Ed>D(009sH0T2KI5C8!X009tqa0%e^ z{|C44(Q*&~0T2KI5C8!X009sH0T2Lz2ZsPY|9^1$8Z89@5C8!X009sH0T2KI5C8!X zcyI~e^Zy68@6mD)009sH0T2KI5C8!X009sHfd_{GKL3Ak`Wh_-0T2KI5C8!X009sH z0T2KI5O{D2D4+lDaDB|-ey{6e%+E(Y9(h~%c<`5k?+#P~!~US}6W;5Ci-WO&^1!g? zp!>6qA8|NBZ~Ygf<~J6!TWGh0=0a?lYw}B6^KdpDkIl_rjLls- zcP=na#iDAtBCe}sabO`l2icNtl7i(Pes?cl|*|kbW3gd&( z^JD86tcX>iNDnN%s#!626RYwOZ**+Td23APY!*^|=FVgvWwWdUSu8Ka#JVgj3eA|o zH)aJgb?M^#nK`m_es=DnN?viwJWB5rgK9!mx+<)!(|NL7UB|*vRxxpD?##27W~oCI zQ`7}#9vh6ll3R}$1zVGog);R)i(*FTkn0mqtQO+lXfEfxx!oYxr=*bV6CUj++NaYY zt9nqh^3_SHe40W{q8L)j^$<^HGTkw55HM^JhS|e|(aWiIFi?aG%{o^tN)1_*>KcSz z;rPw)m^YeAId4tr2zpfrK(FXXU-Nr4I*r$=wr%|e!Bvymb=mg7O@!4lS8WScpxHHm z8nuk;(RK6OquwaXI$xXa%hcKG769eWLBy#@VJiWzBKD z{=C-{py@S#Vm|RseBDK3btqbcZL=q78pCy7Y|w<*6wS#)J;n5rSkORrord~! zIGz_;Epw>%DbVRwYBzbo)>(D#ZnR4jmK*zH^h{S9#uqF#jB7rRFPJu|yKYYH_eLit zov*Q0=8dM*5Sp?`{%PDz!694rX1NW2VNCr4UgvMN}a29 zox>~_1x66}9MTi+eHRi7x;yqm#xTnU;#2zuqn9SvnPrsdydsr~Et!+s+Qmh;=)me* zN7Qb(b;Q~Y<}saaFab$-N;s+sM52aSxFE4zSCu=iDJDXnSGc} zBof^xU?X1D+9(c>?;VWJ$Jb$KNlUa8!OWBPby75C8!X009sH z0T2KI5C8!X0D-$t0H6QgeWtJo0w4eaAOHd&00JNY0w4eaAOHeejQ~FX-)cPqe?R~P zKmY_l00ck)1V8`;KmY_l;O-N^=l^$~DJ+5j2!H?xfB*=900@8p2!H?xfWTHGfY1N8 zT93dV5C8!X009sH0T2KI5C8!X009uV`vma$|J`Q_iy#03AOHd&00JNY0w4eaAOHd& zu+<14|KDmo0)IdN1V8`;KmY_l00ck)1V8`;K;Z5Z!1w>}K2ul(0T2KI5C8!X009sH z0T2KI5CDO#MgX7xZ?ztQKOg`CAOHd&00JNY0w4eaAOHd&aQ6w|^Z&cg6c#}M1V8`; zKmY_l00ck)1V8`;Kwzs8z~}#4tw-Pw2!H?xfB*=900@8p2!H?xfB*>GeFFIW|L!w| zMGyc15C8!X009sH0T2KI5C8!X*lGln&;KL7{SM|M%r`~;D)NEIGvTj>zbo{C&}{H> zV9_u6-b{sffdB}A00@8p2s}sxUW#mY_(#t>hn;?($K&&jIl~pPDzqkBOI%YZ3x!I7 zPgg4CawVNBr-VwrlF8(<4>|l# zoOh<2PPbzHymR$A&AfEIK3$dC<=A59PScp;U` zm)X*SK4ONm@I zDWnQ%E}6?_$jN*`;8{MIOJ%iomy+2sH8L$^)5%mSAtdveJeMx#Gs!$(ViWmtxx{k? zA*HsPP3PE5w!{}Q zk}NK@Ii9O)jZT*moKRv*iEJjBOJ-69A)iaK`LZBnDml*Iqdz#{e#=Mo7IR#dV}*iH zN|q{#L?WB0a1=dkAz$V=Ays0v7UwykkfBaVvq>(IC<)0#wm@A@0Vd>y44>mud?J(0 z=Cq#9vJ`O?38f@;2FI5ZJe%i*icl$4vZ-vsXmNp}jND~c#ef#vqiePJ8L26^7b&=@ zj|&AF7;GY0q25X534bM#OOn&+Oo307*p%O@Ol|$CA-%1sawgBGDB@BXmgCCVf*_=M z8YyKipCEgQPFu-ZndeG;DJ>+j)I-@MVNE3XL^;Qn*kqYnk}OwPwXF%Zl*;oo$SMNI zr&v;?0iEXxX@Skqz$qDR%@#6+R3=}3qu-b%)NyRg5?V}`$zUlGRG*eJp1rzFiaQa%*z-23KoTg$b!F z^Z5dw$tGyXQg0@?bSjg}r?_012qoz~;kD>`+>ZC$qG}bOLp-@tN>|b}%=kRTe1hl8 z$y7O==Q%D@;wla$8IPcsPF=DM{Nrz6V$S~UrQ70RXj{uf+S&7SlAJ(g;?WlrXrEl%JIbmFP-8S2DL zsZ>s6Qk7(q-f7seLZYLl7!E7{cMO-y*{mS!KkFpJ8VzUXob1{d$JcwkOeWJD7gaw0 zkA(l+5&3K8B(pp6awHqM8QB*3^~i^qO8C!dRp8ys?>G`vgWk{`1a_7HlJ`)<#RMVO49Rb;(diYyE}1psKae`d2{J+GrukuWD_y`r}iz zHd;jSs#+T@hYV`AHdx#kP_;J7(LIlPoaddcjTRr=n%YKd2(D4G=HGB(0Kfmg_eVBb z00JNY0w4eaAOHd&00JNY0w4ea8xaTx79HE2?{S0&0~5ZFcz?#XJowArZyp>T{2tFw zx_`s-X4mhyzt{O5*AEB&V_0xhNcoK$S?nqIDf_M;c8>MPvD;7J=C0%3=>GlATW^xN zQdN-nVohq(cbkljlhYSwr!LOME>4{~Hyc|pcZvN+1bmnsY@5<&&-jL^Rsgo1LJf^OO~44qENIuR8?q7lDu|lyCpQ$ z(q*p6(^o6*{86^9Gxi>t&s$bB%`C1U8p5o1k{>ve0aiS&K?f=UYSX z@PcA(d_g~m+E~aBM$eC}+eY1?P9yc*6E}-lZ*viS=*IZP;kZ7uR2YXNFFOX@u*lZI;edOxVkx(J4<@5zQ{;FUZ z-Q7A(frjC-P%d&b#_Y`qHw3<|1VORf)}q1GG^PsO>QOcje>JU6MXS>~;KioYZYYSg zjY1z}P2R!0s7xq2Fzcky)Bf%dQI1lEduD^-u2rQ5g|>NYHIvlBKHlYTQJGG3pVgh5 zz29otZTh~@e%*~6SC3W3Kx%_Apteu-&TOu_n_tZ*ywP~vdGknjleuzDtQ&1s_cCiV zS}pt+X!UV+FghP!XXI>?Y8Wt`r2(N?6g}s zkJz5zHkcHweNVq=Fz3de2v^T&n;HuXGR@-jz5kkZ)=jQ$xGM4LylxuLumt93S1i!I%*Pu_h~ zvUW0%N>>D4E_U~1pS_NjH9-4xT5Z+TI$m`&iVd#G(S;M8>lb2GslF)Fx1?=CM`*R^ zIM>=6 zZf8PKgPQG&B}1(fN6Nm!y{{jw=eaen!flSz+pG0|gOiswB%&x4muN0DHLVe8^qqF~ zxj_9(zn|E!>N*91X;7W;bl4=pzP+OO+s?NF|CsosriZHcc^ts7ZW2GE9DdZts` zl(takn=1|3dd;=Qm~7V#-QuR)6=ZJFB$=i&hYrU8*$|pFv87yG)1cE^uWM-t)l0%w zyJlMIpRcXSMW^|61#ZR%U4P*7|JQXA!a*Pa0w4eaAOHd&00JNY0w4eaAaG9+!2SR4 zsqTWaAOHd&00JNY0w4eaAOHd&00JQJx(Fzr|2vs4JLn%?AOHd&00JNY0w4eaAOHd& z00JNY0{07nu+trlsz0Pt-;~e)k^kQ>or`vW00@8p2!H?xfB*=900@8p2!KF`K*SS` z`px`b`~3ea4(2N$1Ogxc0w4eaAOHd&00JNY0w4eaAaI`%@H$=EmjJZS|B?USXZ?%j zfB*=900@8p2!H?xfB*=900@A9Lcr(lddtgk;Ogzh(^9G4u2wnk`#!GxKfBH6kW?e{(^D^B zYLq!yn6o^0@#U_aQyhO)d4y-ZY?_HEW^#_oulD6VnXj8LU# zG|o30OI%$j+aCUy-{&ZphYMvvdl2x;&#lz?9uJ@J`5e5VJ4Me}To&aOTSlEiQD|8~ zo%C8!uVtXdiB)UhbS#9GP$>-h94{Cy%uGGsQ@a|c&om`^R-SBDt{!@1z~?yLb?7wL zEGs6uo2n|G_V^sv3}rPa>}qN^o>9WBCDY(hl+H=~RlUWkoj16Bj#oBg$M)zaTvp(A zUUc{H8Cj@xRiAP8#Z*r-l(19{TPZ8~|CC>FFn`MYD)ZyacQY%@8<{7W1T#!)0iTb2 zD)MuYAC7!`Ukv?t z==(yqLbcEfp{dZ3&`|K}!OsQ%-{8*$-y4*IPX)(<-oR%AzZCdj;9CQWfhPlFfkFSD z`hU^?0sq^{1-w821V8`;KmY_l;Qk@7Z^}Iuw?1ONl}xbtL^hFTQ^`y+x$mTHIm;Fj z`9vX=+V_M_d5+3hHbcj!_dUK=c_N=lr4##4xa0APnls6rbcY;J45aCAGLzc>hV>Lk zW8ZPAL9@`((w$@?nMmZ5DK<%MNbWmkTb`i`ST>tU?<@3|Q|r>nESpTFs0DeO@*LUD zu!&SQliQcGEvNRgnRFqM*q5~{PvldCD49xR_GRqLbpZR)>y%Ub_ooa%LMP4B2(sx! zhD|5aqe*urUeR(Yp{$b9IVtUA6Kpn-OJ+vdwbd;BF`C#|jl9}9>E019bIWR9q?0*v zB$H-$PFSQYn`ZNDI+NacRF^7IMNEilGQD%$kmgw>nh19~z4M4EO=OeFL?Kg1vO6C$ zq`5>PnNvb1o!J?;NONSPkRnfZ9=1p`)XNF7o7_2Om69I?R2 zY9M)$VUs%_>5;OjLL#&CkVQ)IMxmWa<#!(Jk!BR>&;j>QoR^w{3XL3N*+ibr4aE#G z`9*BmY?AyN+HZ>UEcrkKDxJv_AW7XP9i}; zHnhzuR$?@h&SnZjj8&Xc##pM5$qYrh#5C%2seCds6t;?!G}RE5RCXxTD`wN_RBkBP zC8mL)#BY8m&@E0WlTJD_5l0U=SfXurTcIEu#J=YneoD*wtN>rRLGdRgZGo6{)8N z<@^6r{!u0W{}tv(neSxU%w=YZi8I?He;4_mkza}YhsgIuRwH8M*+?OBAmR^yIs7}} z|3caScZOHOT=-0w4G)K%q0fbWBlNM*4~5FHfdB}A00@8p2!O!WArSM7 z#hvTq?^A0i7zzAITLnu7|HRq~S_=R80Z%;cTrY<|vAzbSG;dHVStpA>zJ`L4#vij& zQ1V+nk1yCNSQ7dC8VW`xpR-pmQ~BVJS|SyzELq%EZEv-#ARCquM1Oy`s9=~?o5 zb~Ae9Sz?!GN8H($(NA`Xjg)@E6kBupqZY9xsUNqB&8+^2MQlmyAL|kud40S~Y$W!F zyTy8DKh`BSQu{}{#71sE+9y_%`$xLOMs|OwPpqc*2cw>$xU)aMKVXqt6a1J}ZprZX zcgu|we_yxU$ni(|-qlnZn=^0Z|jyD89&n_*HivTx7^72!~JqK=@0eD z^{hYGEAOQJfo{2x_xpR~dgAZvlWUp3caLWz?zB(+2W`sq+<&0A%#!?j`pb;$-`!hg zN&j8D$rr!H7p+Xq|CP`Gy^QQ&ewcX=^S_v%U~Vxo^ZQ%p)C*5Q00ck)1V8`;KmY_l z00ck)1VG?kB@iBTI}V67Zc&)L(hwGX18zrD-Hp23j?9uQH_DUkx_IMgnSKev%Tja2 z=Oh)i;4zQe@eB2!H?xfB*=900@8p2!H?xfB*>GDS?Rl zfYZ$X2mS7-wma~!JL9y=|6R)c`qY9W?w6dF^nJbb-?PJgq)V65e=p-cWRKs8-?RR00ck)1V8`;KmY_l00ck)1V8`;wg7=0&OwLY8QtUI$~Cb*sl98@|N9#U z^EX?-L%09}AOHd&00JNY0w4eaAOHd&00JOz_Xq?$QKw(c|2@j*|L#c5!L%ZON`LSI z0T2KI5C8!X009sH0T2KI5CDNYC$Q{}IOs-(!_lML!`t7oJzP_t|EMOS^u)i2wwsv9 zPOxqD0h2{;nH~_xm8!x^ue|h%^8NQ?lap16=c-FmOFovT2bfHX^|EkdqOsJNq_4%d zCN&$Ahm^g}&P%VX23?F&@lmzn)$M&XckZjI+zhlV(hu&F6REr9L$7J_Cg4=PIWnwZ zSY_7mhQ6)8EDG0!W@3U(P9*OR5zQOJ^h>WO`M+b^op&uPfB*=900@8p2!H?xfB*=9 z00@8p2y9&f%J=_~|8L#?foC890w4eaAOHd&00JNY0w4eaAaFMbD4+kkn4fhpzejI) zfdB}A00@8p2!H?xfB*=900@8p2y8h5Bd&pHe27thKT#2zExFmQUl(8Hn&sA#bUks% zIS?Hh8B>dSsVT@}O?XwR3$yJeJt}bWl+dh-b>;Ja7xPI6^ErCM3j{y_1V8`;KmY_l z00ck)1V8`;KwzsA7*SROoNKHGxOORv0oqOftcwBo{r^_)R`>@3AOHd&00JNY0w4ea zAOHd&00MVTARJzC1e`zR2>evwn*V8k*!#zWKR0mR`L*yzN%)iDw>p;Tzq|eluQ=a+ zM@Gqj^NtMM>jzIgIT*b(>2#>KzieHrin37T+OnkX7bU*fTH=~QQMsdKv(8nEY@g`- zx6IFaqa!2EcVCm0TWsnI`rotD7iOm}&c-fIojNxgThPS|vBQB_Y(Xq9#KgKREDFuo z-2BDZ+@*8pV$WPSbAIZ=<=ENT%dx3T7w6B+k&W}Sa~A{Sbcp7WywVVC4xO1jJ$32a z#aP0kdR>&47Gld>lc%;EPG&Okn69Q&r)svva#6Zo7n-JGHXS!z(hnFLGt|YpD2p83 z<+J+`UDGh#Ab@q*^tDfcnAeJ0dvqt)t6NoXN$n<20Q3XwTcug%nu`L}zrLz+N6(^e zRqv5LS5+;!$6$s{+7N9B&6?P1iBi2~w%zDr-Eya&l7Y;|kJZ`NkK(M-nq+FOhf#V-lvHhs@(Q7T;#c)6tvqh8t5{Ufbc#b(r* zF~0t=Yc$0=FE+SpyD6GYw2wQwvo0~CFOYFEr?s+Fl}g>MREF{gxpStEuvun<)`<4? z$tr`&IuDwjDU3QbtaOK#%H6Q92U%x!(hpgt&a#AK*D>lerYmkRnX-iFu~Xh?Jnnpr zF%gN&G`5s3Rv8(4Puf5-3Okgp3M;lP-eBr)w>CRL(`}p0Xy|IV5jq`MnnFdOvjQ)))HAAO|MbmAPHLpxCTT-kC2j1@+K@I1JERq43To?| zv{u{@G^?sIh}Kf&rFunN>|(dxIZIRT0JNX=DAujRo3&Uhd``@~@M za%w%sLbD~dWIFX0=~mxYJD_`nGjD(Vac?x0a=z=3LAysqLEa;p?&sd4VKW(+D6P%x zzr0aTvDU$%cY!)$)oYX@oe8BjHO#7JM4)k|>A9X^6A7Esk#gph+Lp@`!%ww7z9}e! zyvVJo(Q_JQ6G_W;lWIfNC@yg=%T<^)wp67SPcv@+nO!X_(`lcM)vrpdKrYRldG^w5 z?C^r6lDKlv^!(g&7cWemnY$QUIDNKwIx(G^$z)CyQq!k$h3xb~?DYJF*)vbhsTY}t z7j)|J*oE2CvlnLPre~j1FIJTTo}=?9b*ZXF%;T~7Il_HzmM&|jr=FXhnwedv%IQLa zoz9*<-CJc}SN7H2>1Mi6IILVK+f^H<1mq2`Hwk&|sng!*-fHes)KR@dO}kNT!ilVr=eO1-Akk5z*}(@;ZRpUc*m*N?K=MY`r&PkdTuDb;R+ zYqhRR&7LVzKcgChX`R!rYNltf^;NU>sY^Y)ri|wXrCpW_^>jiRv0V^MwT;HprZ1M2 z3#=B;)y?tMHQ^?rcbO#!89S!lB9$fWuM zi}HTX`@U!J`Ty4KSa=2kAOHd&00JNY0w4eaAOHd&00MW5fb#r5$F{p=4R$~P1V8`; zKmY_l00ck)1V8`;KmY``A_3*|e;4!L9n2T$4KEM?0T2KI5C8!X009sH0T2KI5CDOD zg1}zaz;TD)K~GNMs>hB^*6Q-4@}!?h<#8CT$!*SoLsYsX)r4cm)E@;XpZ~j<-*hlv zrZ>Dm00ck)1V8`;KmY_l00ck)1V8`;?i~Vq+{c}3tN^$o?n8b{{*T}P-#gt3PeA|# zKmY_l00ck)1V8`;KmY_l;Pn!~@Bd#fIUEN9AOHd&00JNY0w4eaAOHd&00Q?80p;`m zT~5~F`We@2kq<;7@y*SudJ{PN)cIq(kyFL{2=Gw=RcmxGkA zI#$Pv-l)Uj{^r|j=_hVaeJg6Oo_oa`m2%G2w~6(#a6>HLC`x>hE0>!>t5uSutNapI zUldwakytLaR_eSgT@~uK%H`<`vr`vmV`t`OW?zgQyltZgWAk&dgSMs*#ttu7yc5d{ z;|qo}3-MLw%iiddap&rOYjf0JixlN1*IFvJTSBv_7Lt8S0dMQGMZt2U;h&{gaZItj z5YL<$jGj5-E3RBjyGr#)Qr@YZahn%;jWv)~eEJc>h zxsI0?r%s)ljV)M87Gj43vDg9ui7j(Y>b2(KY&ssBnLRyq>D8jv$L0DQNKRb6(J(OGK#H!+(&5?RN ztg2b%T5^$J68Nivr5asF_vO;unP)G}5;yWC9*D1Qd&wJpmijKWR^Q2DO=!tntzp-J zr42i<=kT?9(NJ56zv-L2(P{E1x|T;Y6x1T>2umz*8@i$&x0VkIk=4*;Z}ifrbM=U| zcCOLjrFunNEOu>?!K=awxnW2bY@INQO+{r(JKXTb&{Bt+QcUjbd(j)^$(^Z<-0Ap7 z{upo!gT+mI)oaDxIZcbIA_qrb@J3&mbgrJYI#}kGi;bq#5Sp?`L$0&A(%|GJ!@F*s zH`=<`tz%=y@UL4_A!x01A%6Dx!Dz9t&it^*eLTd8xkp5E4d*UttE4X)L?E;TI>*KBNGR4DFH|4%oj^~FW%brj5o@%&bOb{x%6ldp&rQ#eXM#EY{JTj_6?${ zSv7k zxgtoaek#{!G@a2EqiIlWO>Y<&7X?Pks9!J54@Sl5b^5inLfu-ExkY`xtSKk8`l5D; zwMOCdH;>NI`F+}Xi+)nvx#U_ymAZVbDlYZ){aTvlTuL+gW+w}6{@?UuNf*N%5}TNV z4c3-Q)JawIPr0%-jVZ(H{076z&@V1FrFNr-zAC%8ditC?yl&?Dp{p8QtX0W#>mXJY zI%JLNH_`TmMQaaq4VwOwj-uN~BX@Q%dUkYEL8)Ho>nxwW`Qp>+S?V=o;x!z%w*8s4 zTWvF)m?wqx=ac@4*qDg)p{Sj97na4ASQ06HSy^XvYuEciSyVf7Lq(e6#l`w&atI?f zRfor|-i$98^^GrBEK-Ex`~O>UkpVt|00@8p2!H?xfB*=900@8p2!O!dC4le$-(8-t z1_B@e0w4eaAOHd&00JNY0w4eaTaf@h|KEze0-rzt1V8`;KmY_l00ck)1V8`;K;Z5Y zz~}#WmnW=&00@8p2!H?xfB*=900@8p2!Oy=B!JKVw_>lrClCMu5C8!X009sH0T2KI z5C8!XxVr@K`TyPJ32Pt#0w4eaAOHd&00JNY0w4eaAg~n)D4+kk7}3G}CwjvR1V8`; zKmY_l00ck)1V8`;KmY_l;LZq~bq$O-R{erlrzb>;b@t7VBv&3`6OUY4dh+SH`7>8u zJvAY=s`b*7%hmJE`S~j=t(jNz$t!bWCM)J9=Y<;+$!C_^^J1#}!t}MLoC8rZ#+7Rp zOO>Z@q}W`0_VSA>OHZ#%pTE9zdMZ7C{mB=vzgkK>RT3xCH`=9(h3vDJug$->(#V~C z@#(YADWCtlm_K(gU!ga=KmY_l00ck)1V8`;KmY_l00ck)1nzMHA@_(=Uwv@-+)-`y z0Kfmg$Ga8og8&GC00@8p2!H?xfB*=900@A*;Q59YnhyO_5!O{T(p6Enx0VzSI* z%s%EJW-#*ikuOI6B=SEZACLTKGik3WZ$3uLi#u z{QrW#8~pFVPXvE9_#cBG3jUkmcavLqfdB}A00@8p2!H?xfWUo9!0(Pa<&}oeI%{k_ zWo*qFTc?bzCycE(7+YCmD`jjYjIE=_)?>!jn6dSUv30=M+GlL-HnyV1)=pz9U~GAf zE%$c!n6uF?RYksdT_}~g_>haam}#M5+F?t@KG->5Q#(%2xWg zt@N0!G-E4G+Da#FrQ^2JxUKY2Tj?QNY0OqSVk;fCmF}{Y?y!~mZKZ>@QrECM=d6g$ zmR#)gvEHMho$jMlLUB`+g&PzjqAWEFJKRS)!bZ8WEDG0!W?qRLq0W^kJkD6QPg}O9 zE!!t8+b1mBIm>q1vdvnyCoJ1XEZc`I+oP84gO=_6mhHWk?T0PfLzeA_EZaWI_CUbB z$KPr1E;s8`1_=$CVvFbMMHSeds5|8>as1WArqr&NsjyY7No66Z<^PX6n7?K|%lt0$ z%goQv3cwFC-@&|%sWSreGV>hsG&9BInMr1h+0R5H{}lPt$gf3yF7jiMe;fIp$hSpS zBiAEkS_ODKayar(_#eW57XDQDKZk!T{IA3BqIH0U@Y!%aJQ{u|^!3nRgnlRViO@$v zKM?vJbtOO!iJ>=!&V`PJ4u^IG|0(!6%Ktw}+5d-w?+xAxR)d#jqAOHd& z00JNY0w8dY5>PI-wS2WM$znz1Ihiugv-aYr?Zsyva>t#rP-|2v?bULcr}{-t_KQwO z+y|Vk6?&XlP39I`v)#Kh-MiCD?$l^X%c7DSi}l4K*AP$H38(CYCzb5n+Q!xseUiuf zBqz-D{tf2ladY#Sxmhqb^X6twX||dSHrw^<;;USG3SWoneCi_JbcG>AsJK?yU@Q6ZBOKw|BT$6g}u|7$>Pjc8M`yI29J!&Hx zRnl**#u~*Pr{&I%*h_vnJEJQLW!0tzGP+j5uC)q=yzU)NTCm#L`Maa@_aUEq$jM7h zVY|MuP2XVjjflPx);B`>Mo?M*;-z{;Tx<#rsVN6~1^!-vPg(flrCLp>%Pp^UXVAJc zphQMjey^^Wal9-pi}H$F(baccLHCGX-@*6)@6l5a+y(&<009sH0T2KI5C8!X009sH zf!iQ}{Qov!K@|i*00ck)1V8`;KmY_l00ck)1nyA+$p7!rj)U7E00JNY0w4eaAOHd& z00JNY0w8c31eE;W&3wVZe2wk^@P*qr5UL;m0w4eaAOHd&00JNY0w4eaAOHeelfaK~WO-Vb|&gKr!7 zO(be z^wNlP^$D?F7H){;8%2pP@?5jrDsts=Q)snHl5|xp7u!v-sOsqD?dc1%Qx|7rXXa*R zUyL2x%%KNk^K-F-x`BhS!wX$?EQ~KuEerA0aMc@qF78}C(&bcBC<}F2NTqubx9U0BG1X9RBsi}%)D4+v#0WjTsqIP z)2FhhQd7xPawd~ao+3tF%1v9}x>VM+XSmySh~lwSx+3s$QC?{X)TNz0u}n=kD0-ts zYQpU1nlP1_$ty3L zoz(5D?PMn!_bzJw6*lMJbRj#NOH7~c^-sD@|8zTR`^PmDeEy0zdY1Zl%<3M;^HRG` zF(EW-E%n_PEnP)*Tc^5IEJquzbSWqysMJHg>=C@t7s!{c*{w^tsP9?EuP#!Zt!=_1 z{Q%v@+J2du%wK$L}A z2e@)3?sj5%9VcuL>~};}>zw1Y8r7ZnYG%P36;jSst;WuEAzdo9#Fk8>p;(pptJBMwuCX+ceT}ZIg+0)7?s#kB(*3n)q^+KiBn&EPq65q`4h|WqUQH`Yv}YI z+f`E`HGL{q$WGh%UAcX~b!+STt#-e1Ek#3da#O=mQ(B#Vqc?i3;9M20ty`uGM`2NF zt`ucSpNCADHZhskwPuN-E!Z~GMzLthy387lHOmd-o#}{LJHF6~tphE}3@F~aV#^xu z+JNg1_&3=)y55n7XR4Coi#9OS8ptC{%#UkDnvZX7t~n=I40I32S2yXM+IQAOHd&00JNY0w4eaAOHd&00JrjuiNA2ydUC_|05tk00ck)1V8`;KmY_l00ck) z1VG^aC7|X1?|T;a|GR(t7%c+<5C8!X009sH0T2KI5C8!X00Er<@_(HK{(=AqfB*=9 z00@8p2!H?xfB*=9!2L@=$^YHVA3K=8Wj;@T@B#r4009sH0T2KI5C8!X009sH0T8$c z2?X53PW8zyvfJEr`Q5{QO{(SppLQ@`VZOk8`W|c`Tm}IU009sH0T2KI5C8!X009sH z0T56K_}x(_SFVY5x3T5&xubqnrse;iq5JGp9HpfhNGRY_9rH?6Rdl;=V;Vf5vxM0C<`}ap)AT$GdYo(U|qwWBg1~Js8Oyg zi^6rGnV4Xc6Gq387UW00JNY0w4eaAOHd&00JNY0w4ea8Uf`0hy)M-0T2KI z5C8!X009sH0T2KI5V-#dDEYsO`E>{LIeNnj1V8`;KmY_l00ck)1V8`;KmY_l;2t2b z+vCqUXE>QFaV_DPAl6$ly-h6h71u70pY>b?IIxsv}onZI$+KfFKy1V8`;KmY_l z00ck)1V8`;KmY{pCjtS_u+LwSs%4?6<^NxEFkic$8i=-l00@8p2!H?xfB*=900@8p z2!H?xbO?CdPQQBpe<$+~4*G`|2!H?xfB*=900@8p2!H?xfB*=9zEt0|5{K0T2KI5C8!X009sH z0T2Lz2arH`V7qUaGJo^_e~xVrAOl7Jr_P)UD_hg&<}b~Jj4h9r|Nl37{vUk<;4kSr0H0<4C#?efCiCCE5l+J<2!H?x zfB*=900@8p2!H?xfB*=9z+E6P;C4D)Vf8(vzSEKbNdoG--{bZ>J<6L~d2_kle)ai( zLFV5$n6ENlVm`zC0e$}eN#^79D}bM1K1gD`KmY_l00ck)1V8`;KmY_l00ck)1is+} zd~T1E^SfIl8@qh^hF9Mh)Heq74Nqrd$gOXk5+_+He2O{6FfTU;qCW zeeVCGl>6Ue>da+khM8od>opK%AOHd&00JNY0w4eaAOHd&00JOz&l89aj8L+!-?Gn0 z+Jid>qDB&LWb!t`QNN9Fm(Nbs`y7GTr*p>eqLTmnS2(9_QA++ZIhR~Y->%y-5-w;~!e?u4!eBQCc z`9%lQiu`(HJp9(cqW@R@kNQ@>FU(F|oQ<8Co0)wvc5t&M4#wu^Vh1gT4#o~I7}YGqudNPvqtkm_ ztBlpND^jUdq_-kp za6=ZFb*@VDfVtgn2~BE4w}#_Aww`n=^_el8>ef@-Q)(gaoU3kc^vsxRb;Ro3VpD22 zii%2cv0ZFiy2vbXyxZ0zQ=#in!xOVy^+GI@6Z=+O-sq*2YxN1M69ycqA&XMIRn-0# zYf`%|6QJI5$G6!!)T`EI#&D}wOL0!Eh~kqxjIKJp(etCO)hDeUDl;6<@k>InLQ}ac zHMvEh*b-kA$UCFV@h)4pjPl-N4VMfJ#g~rth4BT&KK)TjRo8Z}ItU)QHoKu~HL)hp za8TeG#vSk6ynFib8@Z_KDbfG7+i^}_X>_xD$L%?%9KRXo6g_fo=&hP_$JZp+nLO%T zO(-sLt)(t<-@37Ly~o=-r)g2AD)o?G`L}qZJnLFrwE9I4^bzW1uE{TP^+mdL(D!L@ zwpQxAEL|1qs5!pH*2x|%<0z{!!`B{VB^-4TMIc3_UOl>DOnKvYkx7+$@9AIo{M;gBAa>XgFsHk@CxW1>3k%MpYMxP_EF0b#^ z|KHyI#<*=3a2&VeB#v{JJnhDo{Sj){QI%7>!RUslrcFDp1&waKmUb(MLpe9`-FcV9 z!EtVP)5@@z_orU*@|@ zQL8oWH#)`7<@w{EKTe!Hmy07G6!LgnHQViEvPVZuHt3z2&RkfKn{3s?1Mj|hxe`g| z$wy?zli$ZA>27#PuN)gWI^X1s1K%6NEl$ncpX=$FjskC-v; zJW9r7ICWL8d{6@3xOuqYT%Q^-SI{ptQX+$*WS+d3U!0NcvJ>$4KB!l${i?t2FRw&_^~tGI(kBJ>*1k#$BJJ0m`r;| z@0wBfUI*i0T~hMRoy9D)u8+B9SD*?Cu2PH$ww>8%s4n2oL#&nSi?o<4_& ziIFzV7`Bs7CPhyksHaqaPcmfm+B^IULp52JFYu*c;+BU~lO-A?8wc})I8&lcuv=x- z{8aS$|I>>2xA==Z``<6+834X5KLzk*@svCV!0F5G0m%jVBO-tR0tg_000IagfB*srAb`NdBM{90%VL)zUKjtA-~ab3@gwma@v``WJPY88 zsEfnmeX^1Z0tg_000IagfB*srAbW5b4N68x2%}(7bOlH+< z)qMED-+KAlth!&#I}f&I8cw&q^Io&nl_R$63#F*p(Iv-oI)=!qJJkF^r`8QWt1bTl zK&`0R=w1A%9Ddw2cV*U?ivFn}{uHh@nKiCg%@xx#-A*Q-RZHp{*_>P$Z$$I|7ZmXy z`3-=th(F5D0Q^?`Li|L0U;Z878{%u?tKx-AeiyTW2q1s}0tg_000IagfB*sryoCaK zR#nwZM(ZtuiMwiKt@_(pyNtsu4f{0R#|0009ILKmY**5V(8<bjmY@#6_Grh$UiX^r zvgOUqELpYXXoPj`M35*L{R4Kx>QwKy+>Q*?nOoOr?$B!6cCCJ8`Q4h8=ySp~J4>hR zR%)NHvUID~OmsNDWV&{1QI6_aZp&=U9hcvA*Z(D`v^sJNCUqbqt%RsY009ILKmY**5I_I{1Q0-A`~{f* zkN-Yo3lKm60R#|0009ILKmY**5ZFq9`Ttfz)FXfZ0tg_000IagfB*srATa&{!TdiX z{wlx!PhN3B009ILKmY**5I_I{1Q0*~fiV}D)ZVUIc1xb_&~9bceg9uZ{6P_C5I_I{ z1Q0*~0R#|0U~~cI|D#*c5&;AdKmY**5I_I{1Q0*~fpHZG=KrdARi6KUT(^MS2q1s} z0tg_000IagfB*srAh3bJy7T|5;;cOX{{}6&836uOffAAb2zD(2Jyf6|HKam+$wq4sA|_`>M8$9y<+dq__N8>X4hNt zoMo$39h4eo$E%t(&t9>;wfGJZ*DEii#*OPYA1O{8w-1EZt#W?q5&sIka(G9^-;vB3 zZcL}zTeyzn#o?mcl+ULo+o)e0C2A1lhz1R~3jTJza!f|pyK#h$=eXvgHJsx45mKz{ zni3?gmsuY1w{5A>?O5(`j^{?mG3b{PBB~5D^1hG}F2{_yit5BX)iG9!aj(u0JDiZ_RNW{VJmZs`Gn`uV8`{gPgp zmMgz9edS%d9`A!Vzw-1J>yCS+uUb~i9dMmLsaKBQlJRd(t~x$m)tuIXy;$un!uxab zSQFi@Je8Vcvq9oWLydx+60RjYwygORa;Mxlv{TGlEjZ?Ns#epq2Tv^ho0ZR{rs-EF zd&MdGmBH%6QKd}TFUkk1T>1W_OuuEdZg<)ZbFJDkn}Y{xzgBs2^9*n9oEjzBOeVVC zH}uMVGSS`1L^Y?`v;vN}AP=$5;6dBJS@~>grhav5_c&F*RR9zXAKr8K^?nKOO1Jt2 zy>e2~umZ diff --git a/pkg/share/share.go b/pkg/share/share.go index 8a8295e5f6..d120f5f20e 100644 --- a/pkg/share/share.go +++ b/pkg/share/share.go @@ -24,7 +24,6 @@ import ( userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" "github.com/cs3org/reva/pkg/utils" "google.golang.org/genproto/protobuf/field_mask" ) @@ -118,9 +117,7 @@ func MatchesFilter(share *collaboration.Share, filter *collaboration.Filter) boo case collaboration.Filter_TYPE_GRANTEE_TYPE: return share.Grantee.Type == filter.GetGranteeType() case collaboration.Filter_TYPE_EXCLUDE_DENIALS: - // This filter type is used to filter out "denial shares". These are currently implemented by having the permission "0". - // I.e. if the permission is 0 we don't want to show it. - return int(conversions.RoleFromResourcePermissions(share.Permissions.Permissions).OCSPermissions()) != 0 + return share.Permissions.Permissions.DenyGrant default: return false } diff --git a/pkg/smtpclient/smtpclient.go b/pkg/smtpclient/smtpclient.go index 3d143b1874..85fc89a5ca 100644 --- a/pkg/smtpclient/smtpclient.go +++ b/pkg/smtpclient/smtpclient.go @@ -34,13 +34,13 @@ import ( // SMTPCredentials stores the credentials required to connect to an SMTP server. type SMTPCredentials struct { - SenderLogin string `mapstructure:"sender_login" docs:";The login to be used by sender."` - SenderMail string `mapstructure:"sender_mail" docs:";The email to be used to send mails."` - SenderPassword string `mapstructure:"sender_password" docs:";The sender's password."` - SMTPServer string `mapstructure:"smtp_server" docs:";The hostname of the SMTP server."` - SMTPPort int `mapstructure:"smtp_port" docs:"587;The port on which the SMTP daemon is running."` - DisableAuth bool `mapstructure:"disable_auth" docs:"false;Whether to disable SMTP auth."` - LocalName string `mapstructure:"local_name" docs:";The host name to be used for unauthenticated SMTP."` + SenderLogin string `docs:";The login to be used by sender." mapstructure:"sender_login"` + SenderMail string `docs:";The email to be used to send mails." mapstructure:"sender_mail"` + SenderPassword string `docs:";The sender's password." mapstructure:"sender_password"` + SMTPServer string `docs:";The hostname of the SMTP server." mapstructure:"smtp_server"` + SMTPPort int `docs:"587;The port on which the SMTP daemon is running." mapstructure:"smtp_port"` + DisableAuth bool `docs:"false;Whether to disable SMTP auth." mapstructure:"disable_auth"` + LocalName string `docs:";The host name to be used for unauthenticated SMTP." mapstructure:"local_name"` } // NewSMTPCredentials creates a new SMTPCredentials object with the details of the passed object with sane defaults. diff --git a/pkg/storage/favorite/loader/loader.go b/pkg/storage/favorite/loader/loader.go index 2d9abc1d44..b468144989 100644 --- a/pkg/storage/favorite/loader/loader.go +++ b/pkg/storage/favorite/loader/loader.go @@ -21,5 +21,6 @@ package loader import ( // Load storage favorite drivers. _ "github.com/cs3org/reva/pkg/storage/favorite/memory" + _ "github.com/cs3org/reva/pkg/storage/favorite/sql" // Add your own here. ) diff --git a/pkg/cbox/favorite/sql/sql.go b/pkg/storage/favorite/sql/sql.go similarity index 100% rename from pkg/cbox/favorite/sql/sql.go rename to pkg/storage/favorite/sql/sql.go diff --git a/pkg/storage/fs/loader/loader.go b/pkg/storage/fs/loader/loader.go index 0f4752ac85..370ba393d4 100644 --- a/pkg/storage/fs/loader/loader.go +++ b/pkg/storage/fs/loader/loader.go @@ -31,10 +31,6 @@ import ( _ "github.com/cs3org/reva/pkg/storage/fs/local" _ "github.com/cs3org/reva/pkg/storage/fs/localhome" _ "github.com/cs3org/reva/pkg/storage/fs/nextcloud" - _ "github.com/cs3org/reva/pkg/storage/fs/ocis" - _ "github.com/cs3org/reva/pkg/storage/fs/owncloud" - _ "github.com/cs3org/reva/pkg/storage/fs/owncloudsql" _ "github.com/cs3org/reva/pkg/storage/fs/s3" - _ "github.com/cs3org/reva/pkg/storage/fs/s3ng" // Add your own here. ) diff --git a/pkg/storage/fs/local/local.go b/pkg/storage/fs/local/local.go index 7ecbd04d1d..504cb835a9 100644 --- a/pkg/storage/fs/local/local.go +++ b/pkg/storage/fs/local/local.go @@ -32,8 +32,8 @@ func init() { } type config struct { - Root string `mapstructure:"root" docs:"/var/tmp/reva/;Path of root directory for user storage."` - ShareFolder string `mapstructure:"share_folder" docs:"/MyShares;Path for storing share references."` + Root string `docs:"/var/tmp/reva/;Path of root directory for user storage." mapstructure:"root"` + ShareFolder string `docs:"/MyShares;Path for storing share references." mapstructure:"share_folder"` } func (c *config) ApplyDefaults() { diff --git a/pkg/storage/fs/localhome/localhome.go b/pkg/storage/fs/localhome/localhome.go index f57534c2df..fae49c711e 100644 --- a/pkg/storage/fs/localhome/localhome.go +++ b/pkg/storage/fs/localhome/localhome.go @@ -33,9 +33,9 @@ func init() { } type config struct { - Root string `mapstructure:"root" docs:"/var/tmp/reva/;Path of root directory for user storage."` - ShareFolder string `mapstructure:"share_folder" docs:"/MyShares;Path for storing share references."` - UserLayout string `mapstructure:"user_layout" docs:"{{.Username}};Template for user home directories"` + Root string `docs:"/var/tmp/reva/;Path of root directory for user storage." mapstructure:"root"` + ShareFolder string `docs:"/MyShares;Path for storing share references." mapstructure:"share_folder"` + UserLayout string `docs:"{{.Username}};Template for user home directories" mapstructure:"user_layout"` } func (c *config) ApplyDefaults() { diff --git a/pkg/storage/fs/ocis/blobstore/blobstore.go b/pkg/storage/fs/ocis/blobstore/blobstore.go deleted file mode 100644 index aa2bb77d24..0000000000 --- a/pkg/storage/fs/ocis/blobstore/blobstore.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package blobstore - -import ( - "bufio" - "io" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// Blobstore provides an interface to an filesystem based blobstore. -type Blobstore struct { - root string -} - -// New returns a new Blobstore. -func New(root string) (*Blobstore, error) { - err := os.MkdirAll(root, 0700) - if err != nil { - return nil, err - } - - return &Blobstore{ - root: root, - }, nil -} - -// Upload stores some data in the blobstore under the given key. -func (bs *Blobstore) Upload(key string, data io.Reader) error { - f, err := os.OpenFile(bs.path(key), os.O_CREATE|os.O_WRONLY, 0700) - if err != nil { - return errors.Wrapf(err, "could not open blob '%s' for writing", key) - } - - w := bufio.NewWriter(f) - _, err = w.ReadFrom(data) - if err != nil { - return errors.Wrapf(err, "could not write blob '%s'", key) - } - - return w.Flush() -} - -// Download retrieves a blob from the blobstore for reading. -func (bs *Blobstore) Download(key string) (io.ReadCloser, error) { - file, err := os.Open(bs.path(key)) - if err != nil { - return nil, errors.Wrapf(err, "could not read blob '%s'", key) - } - return file, nil -} - -// Delete deletes a blob from the blobstore. -func (bs *Blobstore) Delete(key string) error { - err := os.Remove(bs.path(key)) - if err != nil { - return errors.Wrapf(err, "could not delete blob '%s'", key) - } - return nil -} - -func (bs *Blobstore) path(key string) string { - return filepath.Join(bs.root, filepath.Clean(filepath.Join("/", key))) -} diff --git a/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go b/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go deleted file mode 100644 index ae3e86e9f0..0000000000 --- a/pkg/storage/fs/ocis/blobstore/blobstore_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package blobstore_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestBlobstore(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Blobstore Suite") -} diff --git a/pkg/storage/fs/ocis/blobstore/blobstore_test.go b/pkg/storage/fs/ocis/blobstore/blobstore_test.go deleted file mode 100644 index 17e7e77419..0000000000 --- a/pkg/storage/fs/ocis/blobstore/blobstore_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package blobstore_test - -import ( - "bytes" - "io" - "os" - "path" - - "github.com/cs3org/reva/pkg/storage/fs/ocis/blobstore" - "github.com/cs3org/reva/tests/helpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Blobstore", func() { - var ( - tmpRoot string - key string - blobPath string - data []byte - - bs *blobstore.Blobstore - ) - - BeforeEach(func() { - var err error - tmpRoot, err = helpers.TempDir("reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - - data = []byte("1234567890") - key = "foo" - blobPath = path.Join(tmpRoot, "blobs", key) - - bs, err = blobstore.New(path.Join(tmpRoot, "blobs")) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if tmpRoot != "" { - os.RemoveAll(tmpRoot) - } - }) - - It("creates the root directory if it doesn't exist", func() { - _, err := os.Stat(path.Join(tmpRoot, "blobs")) - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("Upload", func() { - It("writes the blob", func() { - err := bs.Upload(key, bytes.NewReader(data)) - Expect(err).ToNot(HaveOccurred()) - - writtenBytes, err := os.ReadFile(blobPath) - Expect(err).ToNot(HaveOccurred()) - Expect(writtenBytes).To(Equal(data)) - }) - }) - - Context("with an existing blob", func() { - BeforeEach(func() { - Expect(os.WriteFile(blobPath, data, 0700)).To(Succeed()) - }) - - Describe("Download", func() { - It("cleans the key", func() { - reader, err := bs.Download("../" + key) - Expect(err).ToNot(HaveOccurred()) - - readData, err := io.ReadAll(reader) - Expect(err).ToNot(HaveOccurred()) - Expect(readData).To(Equal(data)) - }) - - It("returns a reader to the blob", func() { - reader, err := bs.Download(key) - Expect(err).ToNot(HaveOccurred()) - - readData, err := io.ReadAll(reader) - Expect(err).ToNot(HaveOccurred()) - Expect(readData).To(Equal(data)) - }) - }) - - Describe("Delete", func() { - It("deletes the blob", func() { - _, err := os.Stat(blobPath) - Expect(err).ToNot(HaveOccurred()) - - err = bs.Delete(key) - Expect(err).ToNot(HaveOccurred()) - - _, err = os.Stat(blobPath) - Expect(err).To(HaveOccurred()) - }) - }) - }) - -}) diff --git a/pkg/storage/fs/ocis/ocis.go b/pkg/storage/fs/ocis/ocis.go deleted file mode 100644 index 252c351bc2..0000000000 --- a/pkg/storage/fs/ocis/ocis.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis - -import ( - "context" - "path" - - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/ocis/blobstore" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" -) - -func init() { - registry.Register("ocis", New) -} - -// New returns an implementation to of the storage.FS interface that talk to -// a local filesystem. -func New(ctx context.Context, m map[string]interface{}) (storage.FS, error) { - o, err := options.New(m) - if err != nil { - return nil, err - } - - bs, err := blobstore.New(path.Join(o.Root, "blobs")) - if err != nil { - return nil, err - } - - return decomposedfs.NewDefault(m, bs) -} diff --git a/pkg/storage/fs/ocis/ocis_suite_test.go b/pkg/storage/fs/ocis/ocis_suite_test.go deleted file mode 100644 index 21cb044f2d..0000000000 --- a/pkg/storage/fs/ocis/ocis_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestOcis(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Ocis Suite") -} diff --git a/pkg/storage/fs/ocis/ocis_test.go b/pkg/storage/fs/ocis/ocis_test.go deleted file mode 100644 index be7e255882..0000000000 --- a/pkg/storage/fs/ocis/ocis_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ocis_test - -import ( - "context" - "os" - - "github.com/cs3org/reva/pkg/storage/fs/ocis" - "github.com/cs3org/reva/tests/helpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Ocis", func() { - var ( - options map[string]interface{} - tmpRoot string - ) - - BeforeEach(func() { - tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - - options = map[string]interface{}{ - "root": tmpRoot, - "enable_home": true, - "share_folder": "/Shares", - } - }) - - AfterEach(func() { - if tmpRoot != "" { - os.RemoveAll(tmpRoot) - } - }) - - Describe("New", func() { - It("returns a new instance", func() { - _, err := ocis.New(context.Background(), options) - Expect(err).ToNot(HaveOccurred()) - }) - }) -}) diff --git a/pkg/storage/fs/owncloud/owncloud.go b/pkg/storage/fs/owncloud/owncloud.go deleted file mode 100644 index eb23d12257..0000000000 --- a/pkg/storage/fs/owncloud/owncloud.go +++ /dev/null @@ -1,2395 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloud - -import ( - "context" - "encoding/hex" - "fmt" - "io" - iofs "io/fs" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/internal/grpc/services/storageprovider" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/cs3org/reva/pkg/sharedconf" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/utils/cfg" - "github.com/gomodule/redigo/redis" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -const ( - // Currently,extended file attributes have four separated - // namespaces (user, trusted, security and system) followed by a dot. - // A non root user can only manipulate the user. namespace, which is what - // we will use to store ownCloud specific metadata. To prevent name - // collisions with other apps We are going to introduce a sub namespace - // "user.oc.". - ocPrefix string = "user.oc." - - // idAttribute is the name of the filesystem extended attribute that is used to store the uuid in. - idAttribute string = ocPrefix + "id" - - // SharePrefix is the prefix for sharing related extended attributes. - sharePrefix string = ocPrefix + "grant." // grants are similar to acls, but they are not propagated down the tree when being changed - trashOriginPrefix string = ocPrefix + "o" - mdPrefix string = ocPrefix + "md." // arbitrary metadata - favPrefix string = ocPrefix + "fav." // favorite flag, per user - etagPrefix string = ocPrefix + "etag." // allow overriding a calculated etag with one from the extended attributes - checksumPrefix string = ocPrefix + "cs." - checksumsKey string = "http://owncloud.org/ns/checksums" - favoriteKey string = "http://owncloud.org/ns/favorite" -) - -var defaultPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // no permissions -} -var ownerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, -} - -func init() { - registry.Register("owncloud", New) -} - -type config struct { - DataDirectory string `mapstructure:"datadirectory"` - UploadInfoDir string `mapstructure:"upload_info_dir"` - DeprecatedShareDirectory string `mapstructure:"sharedirectory"` - ShareFolder string `mapstructure:"share_folder"` - UserLayout string `mapstructure:"user_layout"` - Redis string `mapstructure:"redis"` - EnableHome bool `mapstructure:"enable_home"` - Scan bool `mapstructure:"scan"` - UserProviderEndpoint string `mapstructure:"userprovidersvc"` -} - -func (c *config) ApplyDefaults() { - if c.Redis == "" { - c.Redis = ":6379" - } - if c.UserLayout == "" { - c.UserLayout = "{{.Id.OpaqueId}}" - } - if c.UploadInfoDir == "" { - c.UploadInfoDir = "/var/tmp/reva/uploadinfo" - } - // fallback for old config - if c.DeprecatedShareDirectory != "" { - c.ShareFolder = c.DeprecatedShareDirectory - } - if c.ShareFolder == "" { - c.ShareFolder = "/Shares" - } - // ensure share folder always starts with slash - c.ShareFolder = filepath.Join("/", c.ShareFolder) - - if !c.Scan { - // TODO: check if it was set in the config - c.Scan = true - } - - c.UserProviderEndpoint = sharedconf.GetGatewaySVC(c.UserProviderEndpoint) -} - -// New returns an implementation to of the storage.FS interface that talk to -// a local filesystem. -func New(ctx context.Context, m map[string]interface{}) (storage.FS, error) { - var c config - if err := cfg.Decode(m, &c); err != nil { - return nil, err - } - - // c.DataDirectory should never end in / unless it is the root? - c.DataDirectory = filepath.Clean(c.DataDirectory) - - // create datadir if it does not exist - err := os.MkdirAll(c.DataDirectory, 0700) - if err != nil { - logger.New().Error().Err(err). - Str("path", c.DataDirectory). - Msg("could not create datadir") - } - - err = os.MkdirAll(c.UploadInfoDir, 0700) - if err != nil { - logger.New().Error().Err(err). - Str("path", c.UploadInfoDir). - Msg("could not create uploadinfo dir") - } - - pool := &redis.Pool{ - - MaxIdle: 3, - IdleTimeout: 240 * time.Second, - - Dial: func() (redis.Conn, error) { - c, err := redis.Dial("tcp", c.Redis) - if err != nil { - return nil, err - } - return c, err - }, - - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } - - return &ocfs{ - c: &c, - pool: pool, - chunkHandler: chunking.NewChunkHandler(c.UploadInfoDir), - }, nil -} - -type ocfs struct { - c *config - pool *redis.Pool - chunkHandler *chunking.ChunkHandler -} - -func (fs *ocfs) Shutdown(ctx context.Context) error { - return fs.pool.Close() -} - -// scan files and add uuid to path mapping to kv store. -func (fs *ocfs) scanFiles(ctx context.Context, conn redis.Conn) { - if fs.c.Scan { - fs.c.Scan = false // TODO ... in progress use mutex ? - log := appctx.GetLogger(ctx) - log.Debug().Str("path", fs.c.DataDirectory).Msg("scanning data directory") - err := filepath.Walk(fs.c.DataDirectory, func(path string, info os.FileInfo, err error) error { - if err != nil { - log.Error().Str("path", path).Err(err).Msg("error accessing path") - return filepath.SkipDir - } - // TODO(jfd) skip versions folder only if direct in users home dir - // we need to skip versions, otherwise a lookup by id might resolve to a version - if strings.Contains(path, "files_versions") { - log.Debug().Str("path", path).Err(err).Msg("skipping versions") - return filepath.SkipDir - } - - // reuse connection to store file ids - id := readOrCreateID(context.Background(), path, nil) - _, err = conn.Do("SET", id, path) - if err != nil { - log.Error().Str("path", path).Err(err).Msg("error caching id") - // continue scanning - return nil - } - - log.Debug().Str("path", path).Str("id", id).Msg("scanned path") - return nil - }) - if err != nil { - log.Error().Err(err).Str("path", fs.c.DataDirectory).Msg("error scanning data directory") - } - } -} - -// owncloud stores files in the files subfolder -// the incoming path starts with /, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username. -func (fs *ocfs) toInternalPath(ctx context.Context, sp string) (ip string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - // The inner filepath.Join prevents the path from breaking out of - // //files/ - ip = filepath.Join(fs.c.DataDirectory, layout, "files", filepath.Join("/", sp)) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - ip = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - ip = filepath.Join(fs.c.DataDirectory, layout, "files") - } else { - // parts = "", "foo/bar.txt" - ip = filepath.Join(fs.c.DataDirectory, layout, "files", segments[1]) - } - } - return -} - -func (fs *ocfs) toInternalShadowPath(ctx context.Context, sp string) (internal string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", sp) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - internal = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - } else { - // parts = "", "foo/bar.txt" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", segments[1]) - } - } - return -} - -// ownloud stores versions in the files_versions subfolder -// the incoming path starts with /, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username. -func (fs *ocfs) getVersionsPath(ctx context.Context, ip string) string { - // ip = /path/to/data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - } - // ip = //files/foo/bar.txt - parts := strings.SplitN(ip, "/", 4) - - // parts[1] contains the username or userid. - u, err := fs.getUser(ctx, parts[1]) - if err != nil { - // TODO return invalid internal path? - return "" - } - layout := templates.WithUser(u, fs.c.UserLayout) - - switch len(parts) { - case 3: - // parts = "", "" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions") - case 4: - // parts = "", "", "foo/bar.txt" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions", filepath.Join("/", parts[3])) - default: - return "" // TODO Must not happen? - } -} - -// owncloud stores trashed items in the files_trashbin subfolder of a users home. -func (fs *ocfs) getRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), nil -} - -func (fs *ocfs) getVersionRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files_versions"), nil -} - -func (fs *ocfs) toStoragePath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "files") - sp = strings.TrimPrefix(ip, trim) - // root directory - if sp == "" { - sp = "/" - } - } else { - // ip = /data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join("/", segments[1], segments[3]) - } - } - log := appctx.GetLogger(ctx) - log.Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") - return -} - -func (fs *ocfs) toStorageShadowPath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - sp = strings.TrimPrefix(ip, trim) - } else { - // ip = /data//shadow_files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //shadow_files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "shadow_files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join("/", segments[1], segments[3]) - } - } - appctx.GetLogger(ctx).Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStorageShadowPath") - return -} - -// TODO the owner needs to come from a different place. -func (fs *ocfs) getOwner(ip string) string { - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - parts := strings.SplitN(ip, "/", 3) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// TODO cache user lookup. -func (fs *ocfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { - u := ctxpkg.ContextMustGetUser(ctx) - // check if username matches and id is set - if u.Username == usernameOrID && u.Id != nil && u.Id.OpaqueId != "" { - return u, nil - } - // check if userid matches and username is set - if u.Id != nil && u.Id.OpaqueId == usernameOrID && u.Username != "" { - return u, nil - } - // look up at the userprovider - - // parts[0] contains the username or userid. use user service to look up id - c, err := pool.GetUserProviderServiceClient(pool.Endpoint(fs.c.UserProviderEndpoint)) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user provider client") - return nil, err - } - res, err := c.GetUser(ctx, &userpb.GetUserRequest{ - UserId: &userpb.UserId{OpaqueId: usernameOrID}, - }) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user") - return nil, err - } - - if res.Status.Code == rpc.Code_CODE_NOT_FOUND { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user not found") - return nil, fmt.Errorf("user not found") - } - - if res.Status.Code != rpc.Code_CODE_OK { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user lookup failed") - return nil, fmt.Errorf("user lookup failed") - } - return res.User, nil -} - -// permissionSet returns the permission set for the current user. -func (fs *ocfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { - if owner == nil { - return &provider.ResourcePermissions{ - Stat: true, - } - } - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id == nil { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id.OpaqueId == owner.OpaqueId && u.Id.Idp == owner.Idp { - return &provider.ResourcePermissions{ - // owner has all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } - } - // TODO fix permissions for share recipients by traversing reading acls up to the root? cache acls for the parent node and reuse it - return &provider.ResourcePermissions{ - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } -} -func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip string, sp string, c redis.Conn, mdKeys []string) *provider.ResourceInfo { - id := readOrCreateID(ctx, ip, c) - - etag := calcEtag(ctx, fi) - - if val, err := xattr.Get(ip, etagPrefix+etag); err == nil { - appctx.GetLogger(ctx).Debug(). - Str("ipath", ip). - Str("calcetag", etag). - Str("etag", string(val)). - Msg("overriding calculated etag") - etag = string(val) - } - - mdKeysMap := make(map[string]struct{}) - for _, k := range mdKeys { - mdKeysMap[k] = struct{}{} - } - - var returnAllKeys bool - if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { - returnAllKeys = true - } - - metadata := map[string]string{} - - if _, ok := mdKeysMap[favoriteKey]; returnAllKeys || ok { - favorite := "" - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if val, err := xattr.Get(ip, fa); err == nil { - appctx.GetLogger(ctx).Debug(). - Str("ipath", ip). - Str("favorite", string(val)). - Str("username", u.GetUsername()). - Msg("found favorite flag") - favorite = string(val) - } - } else { - appctx.GetLogger(ctx).Error().Err(errtypes.UserRequired("userrequired")).Msg("user has no id") - } - } else { - appctx.GetLogger(ctx).Error().Err(errtypes.UserRequired("userrequired")).Msg("error getting user from ctx") - } - metadata[favoriteKey] = favorite - } - - list, err := xattr.List(ip) - if err == nil { - for _, entry := range list { - // filter out non-custom properties - if !strings.HasPrefix(entry, mdPrefix) { - continue - } - if val, err := xattr.Get(ip, entry); err == nil { - k := entry[len(mdPrefix):] - if _, ok := mdKeysMap[k]; returnAllKeys || ok { - metadata[k] = string(val) - } - } else { - appctx.GetLogger(ctx).Error().Err(err). - Str("entry", entry). - Msg("error retrieving xattr metadata") - } - } - } else { - appctx.GetLogger(ctx).Error().Err(err).Msg("error getting list of extended attributes") - } - - ri := &provider.ResourceInfo{ - Id: &provider.ResourceId{OpaqueId: id}, - Path: sp, - Type: getResourceType(fi.IsDir()), - Etag: etag, - MimeType: mime.Detect(fi.IsDir(), ip), - Size: uint64(fi.Size()), - Mtime: &types.Timestamp{ - Seconds: uint64(fi.ModTime().Unix()), - // TODO read nanos from where? Nanos: fi.MTimeNanos, - }, - ArbitraryMetadata: &provider.ArbitraryMetadata{ - Metadata: metadata, - }, - } - - if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil { - ri.Owner = owner.Id - } else { - appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner") - } - - ri.PermissionSet = fs.permissionSet(ctx, ri.Owner) - - // checksums - if !fi.IsDir() { - if _, checksumRequested := mdKeysMap[checksumsKey]; returnAllKeys || checksumRequested { - // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, ip, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, ip, storageprovider.XSMD5, ri) - readChecksumIntoOpaque(ctx, ip, storageprovider.XSAdler32, ri) - } - } - - return ri -} -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} - -// CreateStorageSpace creates a storage space. -func (fs *ocfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - return nil, fmt.Errorf("unimplemented: CreateStorageSpace") -} - -func readOrCreateID(ctx context.Context, ip string, conn redis.Conn) string { - log := appctx.GetLogger(ctx) - - // read extended file attribute for id - // generate if not present - var id []byte - var err error - if id, err = xattr.Get(ip, idAttribute); err != nil { - log.Warn().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error reading file id") - - uuid := uuid.New() - // store uuid - id = uuid[:] - if err := xattr.Set(ip, idAttribute, id); err != nil { - log.Error().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error storing file id") - } - // TODO cache path for uuid in redis - // TODO reuse conn? - if conn != nil { - _, err := conn.Do("SET", uuid.String(), ip) - if err != nil { - log.Error().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error caching id") - // continue - } - } - } - // todo sign metadata - var uid uuid.UUID - if uid, err = uuid.FromBytes(id); err != nil { - log.Error().Err(err).Msg("error parsing uuid") - return "" - } - return uid.String() -} - -func (fs *ocfs) getPath(ctx context.Context, id *provider.ResourceId) (string, error) { - log := appctx.GetLogger(ctx) - c := fs.pool.Get() - defer c.Close() - fs.scanFiles(ctx, c) - ip, err := redis.String(c.Do("GET", id.OpaqueId)) - if err != nil { - return "", errtypes.NotFound(id.OpaqueId) - } - - idFromXattr, err := xattr.Get(ip, idAttribute) - if err != nil { - return "", errtypes.NotFound(id.OpaqueId) - } - - uid, err := uuid.FromBytes(idFromXattr) - if err != nil { - log.Error().Err(err).Msg("error parsing uuid") - } - - if uid.String() != id.OpaqueId { - if _, err := c.Do("DEL", id.OpaqueId); err != nil { - return "", err - } - return "", errtypes.NotFound(id.OpaqueId) - } - - return ip, nil -} - -// GetPathByID returns the storage relative path for the file id, without the internal namespace. -func (fs *ocfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - ip, err := fs.getPath(ctx, id) - if err != nil { - return "", err - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.GetPath { - return "", errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return "", errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return "", errors.Wrap(err, "ocfs: error reading permissions") - } - - return fs.toStoragePath(ctx, ip), nil -} - -// resolve takes in a request path or request id and converts it to an internal path. -func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { - // if storage id is set look up that - if ref.ResourceId != nil { - ip, err := fs.getPath(ctx, ref.ResourceId) - if err != nil { - return "", err - } - return filepath.Join("/", ip, filepath.Join("/", ref.Path)), nil - } - - // use a path - return fs.toInternalPath(ctx, ref.Path), nil -} - -func (fs *ocfs) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - return errtypes.NotSupported("ocfs: deny grant not supported") -} - -func (fs *ocfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.AddGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) -} - -// extractACEsFromAttrs reads ACEs in the list of attrs from the file. -func extractACEsFromAttrs(ctx context.Context, ip string, attrs []string) (entries []*ace.ACE) { - log := appctx.GetLogger(ctx) - entries = []*ace.ACE{} - for i := range attrs { - if strings.HasPrefix(attrs[i], sharePrefix) { - var value []byte - var err error - if value, err = xattr.Get(ip, attrs[i]); err != nil { - log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") - continue - } - var e *ace.ACE - principal := attrs[i][len(sharePrefix):] - if e, err = ace.Unmarshal(principal, value); err != nil { - log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") - continue - } - entries = append(entries, e) - } - } - return -} - -// TODO if user is owner but no acls found he can do everything? -// The owncloud driver does not integrate with the os so, for now, the owner can do everything, see ownerPermissions. -// Should this change we can store an acl for the owner in every node. -// We could also add default acls that can only the admin can set, eg for a read only storage? -// Someone needs to write to provide the content that should be read only, so this would likely be an acl for a group anyway. -// We need the storage relative path so we can calculate the permissions -// for the node based on all acls in the tree up to the root. -func (fs *ocfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("no user in context, returning default permissions") - return defaultPermissions, nil - } - // check if the current user is the owner - if fs.getOwner(ip) == u.Id.OpaqueId { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("user is owner, returning owner permissions") - return ownerPermissions, nil - } - - // for non owners this is a little more complicated: - aggregatedPermissions := &provider.ResourcePermissions{} - // add default permissions - addPermissions(aggregatedPermissions, defaultPermissions) - - // determine root - rp := fs.toInternalPath(ctx, "") - // TODO rp will be the datadir ... be we don't want to go up that high. The users home is far enough - np := ip - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var e *ace.ACE - // for all segments, starting at the leaf - for np != rp { - var attrs []string - if attrs, err = xattr.List(np); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("ipath", np).Msg("error listing attributes") - return nil, err - } - - userace := sharePrefix + "u:" + u.Id.OpaqueId - userFound := false - for i := range attrs { - // we only need the find the user once per node - switch { - case !userFound && attrs[i] == userace: - e, err = fs.readACE(ctx, np, "u:"+u.Id.OpaqueId) - case strings.HasPrefix(attrs[i], sharePrefix+"g:"): - g := strings.TrimPrefix(attrs[i], sharePrefix+"g:") - if groupsMap[g] { - e, err = fs.readACE(ctx, np, "g:"+g) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - addPermissions(aggregatedPermissions, e.Grant().GetPermissions()) - appctx.GetLogger(ctx).Debug().Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Interface("permissions", aggregatedPermissions).Msg("adding permissions") - case isNoData(err): - err = nil - appctx.GetLogger(ctx).Error().Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Interface("attrs", attrs).Msg("no permissions found on node, but they were listed") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Msg("error reading permissions") - return nil, err - } - } - - np = filepath.Dir(np) - } - - // 3. read user permissions until one is found? - // what if, when checking /a/b/c/d, /a/b has write permission, but /a/b/c has not? - // those are two shares one read only, and a higher one rw, - // should the higher one be used? - // or, since we did find a matching ace in a lower node use that because it matches the principal? - // this would allow ai user to share a folder rm but take away the write capability for eg a docs folder inside it. - // 4. read group permissions until all groups of the user are matched? - // same as for user permission, but we need to keep going further up the tree until all groups of the user were matched. - // what if a user has thousands of groups? - // we will always have to walk to the root. - // but the same problem occurs for a user with 2 groups but where only one group was used to share. - // in any case we need to iterate the aces, not the number of groups of the user. - // listing the aces can be used to match the principals, we do not need to fully real all aces - // what if, when checking /a/b/c/d, /a/b has write permission for group g, but /a/b/c has an ace for another group h the user is also a member of? - // it would allow restricting a users permissions by resharing something with him with lower permission? - // so if you have reshare permissions you could accidentially restrict users access to a subfolder of a rw share to ro by sharing it to another group as ro when they are part of both groups - // it makes more sense to have explicit negative permissions - - // TODO we need to read all parents ... until we find a matching ace? - appctx.GetLogger(ctx).Debug().Interface("permissions", aggregatedPermissions).Str("ipath", ip).Msg("returning aggregated permissions") - return aggregatedPermissions, nil -} - -func isNoData(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNotFound(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOENT - } - } - return false -} - -func (fs *ocfs) readACE(ctx context.Context, ip string, principal string) (e *ace.ACE, err error) { - var b []byte - if b, err = xattr.Get(ip, sharePrefix+principal); err != nil { - return nil, err - } - if e, err = ace.Unmarshal(principal, b); err != nil { - return nil, err - } - return -} - -// additive merging of permissions only. -func addPermissions(p1 *provider.ResourcePermissions, p2 *provider.ResourcePermissions) { - p1.AddGrant = p1.AddGrant || p2.AddGrant - p1.CreateContainer = p1.CreateContainer || p2.CreateContainer - p1.Delete = p1.Delete || p2.Delete - p1.GetPath = p1.GetPath || p2.GetPath - p1.GetQuota = p1.GetQuota || p2.GetQuota - p1.InitiateFileDownload = p1.InitiateFileDownload || p2.InitiateFileDownload - p1.InitiateFileUpload = p1.InitiateFileUpload || p2.InitiateFileUpload - p1.ListContainer = p1.ListContainer || p2.ListContainer - p1.ListFileVersions = p1.ListFileVersions || p2.ListFileVersions - p1.ListGrants = p1.ListGrants || p2.ListGrants - p1.ListRecycle = p1.ListRecycle || p2.ListRecycle - p1.Move = p1.Move || p2.Move - p1.PurgeRecycle = p1.PurgeRecycle || p2.PurgeRecycle - p1.RemoveGrant = p1.RemoveGrant || p2.RemoveGrant - p1.RestoreFileVersion = p1.RestoreFileVersion || p2.RestoreFileVersion - p1.RestoreRecycleItem = p1.RestoreRecycleItem || p2.RestoreRecycleItem - p1.Stat = p1.Stat || p2.Stat - p1.UpdateGrant = p1.UpdateGrant || p2.UpdateGrant -} - -func (fs *ocfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - log := appctx.GetLogger(ctx) - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListGrants { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - var attrs []string - if attrs, err = xattr.List(ip); err != nil { - // TODO err might be a not exists - log.Error().Err(err).Msg("error listing attributes") - return nil, err - } - - log.Debug().Interface("attrs", attrs).Msg("read attributes") - - aces := extractACEsFromAttrs(ctx, ip, attrs) - - grants = make([]*provider.Grant, 0, len(aces)) - for i := range aces { - grants = append(grants, aces[i].Grant()) - } - - return grants, nil -} - -func (fs *ocfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = sharePrefix + "g:" + g.Grantee.GetGroupId().OpaqueId - } else { - attr = sharePrefix + "u:" + g.Grantee.GetUserId().OpaqueId - } - - if err = xattr.Remove(ip, attr); err != nil { - return - } - - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.UpdateGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) CreateHome(ctx context.Context) error { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return err - } - layout := templates.WithUser(u, fs.c.UserLayout) - - homePaths := []string{ - filepath.Join(fs.c.DataDirectory, layout, "files"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin"), - filepath.Join(fs.c.DataDirectory, layout, "files_versions"), - filepath.Join(fs.c.DataDirectory, layout, "uploads"), - filepath.Join(fs.c.DataDirectory, layout, "shadow_files"), - } - - for _, v := range homePaths { - if err := os.MkdirAll(v, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating home path: "+v) - } - } - - return nil -} - -// If home is enabled, the relative home is always the empty string. -func (fs *ocfs) GetHome(ctx context.Context) (string, error) { - if !fs.c.EnableHome { - return "", errtypes.NotSupported("ocfs: get home not supported") - } - return "", nil -} - -func (fs *ocfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return err - } - - // check permissions of parent dir - if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { - if !perm.CreateContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(ref.Path) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - if err = os.Mkdir(ip, 0700); err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(ref.Path) - } - // FIXME we also need already exists error, webdav expects 405 MethodNotAllowed - return errors.Wrap(err, "ocfs: error creating dir "+ref.Path) - } - return fs.propagate(ctx, ip) -} - -// TouchFile as defined in the storage.FS interface. -func (fs *ocfs) TouchFile(ctx context.Context, ref *provider.Reference) error { - return fmt.Errorf("unimplemented: TouchFile") -} - -func (fs *ocfs) isShareFolderChild(sp string) bool { - return strings.HasPrefix(sp, fs.c.ShareFolder) -} - -func (fs *ocfs) isShareFolderRoot(sp string) bool { - return sp == fs.c.ShareFolder -} - -func (fs *ocfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { - if !fs.isShareFolderChild(sp) { - return errtypes.PermissionDenied("ocfs: cannot create references outside the share folder: share_folder=" + "/Shares" + " path=" + sp) - } - - ip := fs.toInternalShadowPath(ctx, sp) - // TODO check permission? - - dir, _ := filepath.Split(ip) - if err := os.MkdirAll(dir, 0700); err != nil { - return errors.Wrapf(err, "ocfs: error creating shadow path %s", dir) - } - - f, err := os.Create(ip) - if err != nil { - return errors.Wrapf(err, "ocfs: error creating shadow file %s", ip) - } - - err = xattr.FSet(f, mdPrefix+"target", []byte(targetURI.String())) - if err != nil { - return errors.Wrapf(err, "ocfs: error setting the target %s on the shadow file %s", targetURI.String(), ip) - } - return nil -} - -func (fs *ocfs) setMtime(ctx context.Context, ip string, mtime string) error { - log := appctx.GetLogger(ctx) - if mt, err := parseMTime(mtime); err == nil { - // updating mtime also updates atime - if err := os.Chtimes(ip, mt, mt); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Time("mtime", mt). - Msg("could not set mtime") - return errors.Wrap(err, "could not set mtime") - } - } else { - log.Error().Err(err). - Str("ipath", ip). - Str("mtime", mtime). - Msg("could not parse mtime") - return errors.Wrap(err, "could not parse mtime") - } - return nil -} -func (fs *ocfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var fi os.FileInfo - fi, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - errs := []error{} - - if md.Metadata != nil { - if val, ok := md.Metadata["mtime"]; ok { - err := fs.setMtime(ctx, ip, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set mtime")) - } - // remove from metadata - delete(md.Metadata, "mtime") - } - // TODO(jfd) special handling for atime? - // TODO(jfd) allow setting birth time (btime)? - // TODO(jfd) any other metadata that is interesting? fileid? - if val, ok := md.Metadata["etag"]; ok { - etag := calcEtag(ctx, fi) - val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) - if etag == val { - log.Debug(). - Str("ipath", ip). - Str("etag", val). - Msg("ignoring request to update identical etag") - } else - // etag is only valid until the calculated etag changes - // TODO(jfd) cleanup in a batch job - if err := xattr.Set(ip, etagPrefix+etag, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("calcetag", etag). - Str("etag", val). - Msg("could not set etag") - errs = append(errs, errors.Wrap(err, "could not set etag")) - } - delete(md.Metadata, "etag") - } - if val, ok := md.Metadata["http://owncloud.org/ns/favorite"]; ok { - // TODO we should not mess with the user here ... the favorites is now a user specific property for a file - // that cannot be mapped to extended attributes without leaking who has marked a file as a favorite - // it is a specific case of a tag, which is user individual as well - // TODO there are different types of tags - // 1. public that are managed by everyone - // 2. private tags that are only visible to the user - // 3. system tags that are only visible to the system - // 4. group tags that are only visible to a group ... - // urgh ... well this can be solved using different namespaces - // 1. public = p: - // 2. private = u:: for user specific - // 3. system = s: for system - // 4. group = g:: - // 5. app? = a:: for apps? - // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem - // public tags can be mapped to extended attributes - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Set(ip, fa, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not set favorite flag") - errs = append(errs, errors.Wrap(err, "could not set favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - // remove from metadata - delete(md.Metadata, "http://owncloud.org/ns/favorite") - } - } - for k, v := range md.Metadata { - if err := xattr.Set(ip, mdPrefix+k, []byte(v)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Str("val", v). - Msg("could not set metadata") - errs = append(errs, errors.Wrap(err, "could not set metadata")) - } - } - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - -func (fs *ocfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - errs := []error{} - for _, k := range keys { - switch k { - case "http://owncloud.org/ns/favorite": - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(ip, fa); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not unset favorite flag") - errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - default: - if err = xattr.Remove(ip, mdPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) - } - } - } - } - - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -// GetLock returns an existing lock on the given reference. -func (fs *ocfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) { - return nil, errtypes.NotSupported("unimplemented") -} - -// SetLock puts a lock on the given reference. -func (fs *ocfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// RefreshLock refreshes an existing lock on the given reference. -func (fs *ocfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error { - return errtypes.NotSupported("unimplemented") -} - -// Unlock removes an existing lock from the given reference. -func (fs *ocfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// Delete is actually only a move to trash -// -// This is a first optimistic approach. -// When a file has versions and we want to delete the file it could happen that -// the service crashes before all moves are finished. -// That would result in invalid state like the main files was moved but the -// versions were not. -// We will live with that compromise since this storage driver will be -// deprecated soon. -func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Delete { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - - if err := os.MkdirAll(rp, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating trashbin dir "+rp) - } - - // ip is the path on disk ... we need only the path relative to root - origin := filepath.Dir(fs.toStoragePath(ctx, ip)) - - err = fs.trash(ctx, ip, rp, origin) - if err != nil { - return errors.Wrapf(err, "ocfs: error deleting file %s", ip) - } - err = fs.trashVersions(ctx, ip, origin) - if err != nil { - return errors.Wrapf(err, "ocfs: error deleting versions of file %s", ip) - } - return nil -} - -func (fs *ocfs) trash(ctx context.Context, ip string, rp string, origin string) error { - // set origin location in metadata - if err := xattr.Set(ip, trashOriginPrefix, []byte(origin)); err != nil { - return err - } - - // move to trash location - dtime := time.Now().Unix() - tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - - // The condition reads: "if the file exists" - // I know this check is hard to read because of the double negation - // but this way we avoid to duplicate the code following the if block. - // If two deletes happen fast consecutively they will have the same `dtime`, - // therefore we have to increase the 'dtime' to avoid collisions. - if _, err := os.Stat(tgt); !errors.Is(err, os.ErrNotExist) { - // timestamp collision, try again with higher value: - dtime++ - tgt = filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - } - if err := os.Rename(ip, tgt); err != nil { - return errors.Wrap(err, "ocfs: could not move item to trash") - } - - return fs.propagate(ctx, filepath.Dir(ip)) -} - -func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) error { - vp := fs.getVersionsPath(ctx, ip) - vrp, err := fs.getVersionRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "error resolving versions recycle path") - } - - if err := os.MkdirAll(vrp, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating trashbin dir "+vrp) - } - - // Ignore error since the only possible error is malformed pattern. - versions, _ := filepath.Glob(vp + ".v*") - for _, v := range versions { - err := fs.trash(ctx, v, vrp, origin) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting file "+v) - } - } - return nil -} - -func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldIP string - if oldIP, err = fs.resolve(ctx, oldRef); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, oldIP); err == nil { - if !perm.Move { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(oldIP))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var newIP string - if newIP, err = fs.resolve(ctx, newRef); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // TODO check target permissions ... if it exists - - if err = os.Rename(oldIP, newIP); err != nil { - return errors.Wrap(err, "ocfs: error moving "+oldIP+" to "+newIP) - } - - log := appctx.GetLogger(ctx) - conn := fs.pool.Get() - defer conn.Close() - // Ideally if we encounter an error here we should rollback the Move/Rename. - // But since the owncloud storage driver is not being actively used by anyone other - // than the acceptance tests we should be fine by ignoring the errors. - _ = filepath.Walk(newIP, func(path string, info os.FileInfo, err error) error { - if err != nil { - // TODO(c0rby): rollback the move in case of an error - log.Error().Str("path", path).Err(err).Msg("error caching id") - return nil - } - id := readOrCreateID(context.Background(), path, nil) - _, err = conn.Do("SET", id, path) - if err != nil { - // TODO(c0rby): rollback the move in case of an error - log.Error().Str("path", path).Err(err).Msg("error caching id") - } - return nil - }) - if err := fs.propagate(ctx, newIP); err != nil { - return err - } - if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { - return err - } - return nil -} - -func (fs *ocfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - // TODO return correct errtype - if _, ok := err.(errtypes.IsNotFound); ok { - return nil, err - } - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - p := fs.toStoragePath(ctx, ip) - - if fs.c.EnableHome { - if fs.isShareFolderChild(p) { - return fs.getMDShareFolder(ctx, p, mdKeys) - } - } - - // If GetMD is called for a path shared with the user then the path is - // already wrapped. (fs.resolve wraps the path) - if strings.HasPrefix(p, fs.c.DataDirectory) { - ip = p - } - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error stating "+ip) - } - c := fs.pool.Get() - defer c.Close() - m := fs.convertToResourceInfo(ctx, md, ip, fs.toStoragePath(ctx, ip), c, mdKeys) - - return m, nil -} - -func (fs *ocfs) getMDShareFolder(ctx context.Context, sp string, mdKeys []string) (*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, errors.Wrapf(err, "ocfs: error stating %s", ip) - } - c := fs.pool.Get() - defer c.Close() - m := fs.convertToResourceInfo(ctx, md, ip, fs.toStorageShadowPath(ctx, ip), c, mdKeys) - if !fs.isShareFolderRoot(sp) { - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(ip, mdPrefix+"target") - if err != nil { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, err - } - m.Target = string(ref) - } - - return m, nil -} - -func (fs *ocfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - sp := fs.toStoragePath(ctx, ip) - - if fs.c.EnableHome { - log.Debug().Msg("home enabled") - if strings.HasPrefix(sp, "/") { - // permissions checked in listWithHome - return fs.listWithHome(ctx, "/", sp, mdKeys) - } - } - - log.Debug().Msg("list with nominal home") - // permissions checked in listWithNominalHome - return fs.listWithNominalHome(ctx, sp, mdKeys) -} - -func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { - // If a user wants to list a folder shared with him the path will already - // be wrapped with the files directory path of the share owner. - // In that case we don't want to wrap the path again. - if !strings.HasPrefix(ip, fs.c.DataDirectory) { - ip = fs.toInternalPath(ctx, ip) - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - entries, err := os.ReadDir(ip) - if err != nil { - return nil, errors.Wrapf(err, "ocfs: error listing %s", ip) - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - return nil, err - } - mds = append(mds, info) - } - c := fs.pool.Get() - defer c.Close() - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *ocfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - if p == home { - log.Debug().Msg("listing home") - return fs.listHome(ctx, home, mdKeys) - } - - if fs.isShareFolderRoot(p) { - log.Debug().Msg("listing share folder root") - return fs.listShareFolderRoot(ctx, p, mdKeys) - } - - if fs.isShareFolderChild(p) { - return nil, errtypes.PermissionDenied("ocfs: error listing folders inside the shared folder, only file references are stored inside") - } - - log.Debug().Msg("listing nominal home") - return fs.listWithNominalHome(ctx, p, mdKeys) -} - -func (fs *ocfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { - // list files - ip := fs.toInternalPath(ctx, home) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - entries, err := os.ReadDir(ip) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error listing files") - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - return nil, err - } - mds = append(mds, info) - } - c := fs.pool.Get() - defer c.Close() - - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - - // list shadow_files - ip = fs.toInternalShadowPath(ctx, home) - entries, err = os.ReadDir(ip) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error listing shadow_files") - } - mds = make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - return nil, err - } - mds = append(mds, info) - } - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *ocfs) listShareFolderRoot(ctx context.Context, sp string, mdKeys []string) ([]*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - entries, err := os.ReadDir(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error listing shadow_files") - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - return nil, err - } - mds = append(mds, info) - } - c := fs.pool.Get() - defer c.Close() - - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), c, mdKeys) - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(cp, mdPrefix+"target") - if err != nil { - return nil, err - } - m.Target = string(ref) - finfos = append(finfos, m) - } - - return finfos, nil -} - -func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) error { - // move existing file to versions dir - vp := fmt.Sprintf("%s.v%d", vbp, time.Now().Unix()) - if err := os.MkdirAll(filepath.Dir(vp), 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating versions dir "+vp) - } - - // TODO(jfd): make sure rename is atomic, missing fsync ... - if err := os.Rename(ip, vp); err != nil { - return errors.Wrap(err, "ocfs: error renaming from "+ip+" to "+vp) - } - - return nil -} - -func (fs *ocfs) copyMD(s string, t string) (err error) { - var attrs []string - if attrs, err = xattr.List(s); err != nil { - return err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], ocPrefix) { - var d []byte - if d, err = xattr.Get(s, attrs[i]); err != nil { - return err - } - if err = xattr.Set(t, attrs[i], d); err != nil { - return err - } - } - } - return nil -} - -func (fs *ocfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileDownload { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - r, err := os.Open(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error reading "+ip) - } - return r, nil -} - -func (fs *ocfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListFileVersions { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - - bn := filepath.Base(ip) - - revisions := []*provider.FileVersion{} - entries, err := os.ReadDir(filepath.Dir(vp)) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error reading"+filepath.Dir(vp)) - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - return nil, err - } - mds = append(mds, info) - } - for i := range mds { - rev := fs.filterAsRevision(ctx, bn, mds[i]) - if rev != nil { - revisions = append(revisions, rev) - } - } - return revisions, nil -} - -func (fs *ocfs) filterAsRevision(ctx context.Context, bn string, md os.FileInfo) *provider.FileVersion { - if strings.HasPrefix(md.Name(), bn) { - // versions have filename.ext.v12345678 - version := md.Name()[len(bn)+2:] // truncate ".v" to get version mtime - mtime, err := strconv.Atoi(version) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("invalid version mtime") - return nil - } - // TODO(jfd) trashed versions are in the files_trashbin/versions folder ... not relevant here - return &provider.FileVersion{ - Key: version, - Size: uint64(md.Size()), - Mtime: uint64(mtime), - Etag: calcEtag(ctx, md), - } - } - return nil -} - -func (fs *ocfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - return nil, errtypes.NotSupported("download revision") -} - -func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.RestoreFileVersion { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - rp := vp + ".v" + revisionKey - - // check revision exists - rs, err := os.Stat(rp) - if err != nil { - return err - } - - if !rs.Mode().IsRegular() { - return fmt.Errorf("%s is not a regular file", rp) - } - - source, err := os.Open(rp) - if err != nil { - return err - } - defer source.Close() - - // destination should be available, otherwise we could not have navigated to its revisions - if err := fs.archiveRevision(ctx, fs.getVersionsPath(ctx, ip), ip); err != nil { - return err - } - - destination, err := os.Create(ip) - if err != nil { - // TODO(jfd) bring back revision in case sth goes wrong? - return err - } - defer destination.Close() - - _, err = io.Copy(destination, source) - - if err != nil { - return err - } - - // TODO(jfd) bring back revision in case sth goes wrong? - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) PurgeRecycleItem(ctx context.Context, basePath, key, relativePath string) error { - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - ip := filepath.Join(rp, filepath.Clean(key)) - // TODO check permission? - - // check permissions - /* are they stored in the trash? - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.unwrap(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - */ - - err = os.Remove(ip) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle item") - } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions", filepath.Clean(key))) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle item versions") - } - // TODO delete keyfiles, keys, share-keys - return nil -} - -func (fs *ocfs) EmptyRecycle(ctx context.Context) error { - // TODO check permission? on what? user must be the owner - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - err = os.RemoveAll(rp) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle files") - } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions")) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle files versions") - } - // TODO delete keyfiles, keys, share-keys ... or just everything? - return nil -} - -func (fs *ocfs) convertToRecycleItem(ctx context.Context, rp string, md os.FileInfo) *provider.RecycleItem { - // trashbin items have filename.ext.d12345678 - suffix := filepath.Ext(md.Name()) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - log := appctx.GetLogger(ctx) - log.Error().Str("path", md.Name()).Msg("invalid trash item suffix") - return nil - } - trashtime := suffix[2:] // truncate "d" to get trashbin time - ttime, err := strconv.Atoi(trashtime) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("invalid trash time") - return nil - } - var v []byte - if v, err = xattr.Get(filepath.Join(rp, md.Name()), trashOriginPrefix); err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("could not read origin") - return nil - } - // ownCloud 10 stores the parent dir of the deleted item as the location in the oc_files_trashbin table - // we use extended attributes for original location, but also only the parent location, which is why - // we need to join and trim the path when listing it - originalPath := filepath.Join(string(v), strings.TrimSuffix(filepath.Base(md.Name()), suffix)) - - return &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Key: md.Name(), - // TODO do we need to prefix the path? it should be relative to this storage root, right? - Ref: &provider.Reference{ - Path: originalPath, - }, - Size: uint64(md.Size()), - DeletionTime: &types.Timestamp{ - Seconds: uint64(ttime), - // no nanos available - }, - } -} - -func (fs *ocfs) ListRecycle(ctx context.Context, basePath, key, relativePath string) ([]*provider.RecycleItem, error) { - // TODO check permission? on what? user must be the owner? - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving recycle path") - } - - // list files folder - entries, err := os.ReadDir(filepath.Join(rp, key)) - if err != nil { - log := appctx.GetLogger(ctx) - log.Debug().Err(err).Str("path", rp).Msg("trash not readable") - // TODO jfd only ignore not found errors - return []*provider.RecycleItem{}, nil - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - // TODO jfd only ignore not found errors - return []*provider.RecycleItem{}, nil - } - mds = append(mds, info) - } - // TODO (jfd) limit and offset - items := []*provider.RecycleItem{} - for i := range mds { - ri := fs.convertToRecycleItem(ctx, rp, mds[i]) - if ri != nil { - items = append(items, ri) - } - } - return items, nil -} - -func (fs *ocfs) RestoreRecycleItem(ctx context.Context, basePath, key, relativePath string, restoreRef *provider.Reference) error { - // TODO check permission? on what? user must be the owner? - log := appctx.GetLogger(ctx) - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - src := filepath.Join(rp, filepath.Clean(key)) - - suffix := filepath.Ext(src) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - log.Error().Str("key", key).Str("path", src).Msg("invalid trash item suffix") - return nil - } - - if restoreRef == nil { - restoreRef = &provider.Reference{} - } - if restoreRef.Path == "" { - v, err := xattr.Get(src, trashOriginPrefix) - if err != nil { - log.Error().Err(err).Str("key", key).Str("path", src).Msg("could not read origin") - } - restoreRef.Path = filepath.Join("/", filepath.Clean(string(v)), strings.TrimSuffix(filepath.Base(src), suffix)) - } - tgt := fs.toInternalPath(ctx, restoreRef.Path) - // move back to original location - if err := os.Rename(src, tgt); err != nil { - log.Error().Err(err).Str("key", key).Str("restorePath", restoreRef.Path).Str("src", src).Str("tgt", tgt).Msg("could not restore item") - return errors.Wrap(err, "ocfs: could not restore item") - } - // unset trash origin location in metadata - if err := xattr.Remove(tgt, trashOriginPrefix); err != nil { - // just a warning, will be overwritten next time it is deleted - log.Warn().Err(err).Str("key", key).Str("tgt", tgt).Msg("could not unset origin") - } - // TODO(jfd) restore versions - - return fs.propagate(ctx, tgt) -} - -func (fs *ocfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { - return nil, errtypes.NotSupported("list storage spaces") -} - -// UpdateStorageSpace updates a storage space. -func (fs *ocfs) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { - return nil, errtypes.NotSupported("update storage space") -} - -func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { - var root string - if fs.c.EnableHome { - root = fs.toInternalPath(ctx, "/") - } else { - owner := fs.getOwner(leafPath) - root = fs.toInternalPath(ctx, owner) - } - if !strings.HasPrefix(leafPath, root) { - err := errors.New("internal path outside root") - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - fi, err := os.Stat(leafPath) - if err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - parts := strings.Split(strings.TrimPrefix(leafPath, root), "/") - // root never ends in / so the split returns an empty first element, which we can skip - // we do not need to chmod the last element because it is the leaf path (< and not <= comparison) - for i := 1; i < len(parts); i++ { - appctx.GetLogger(ctx).Debug(). - Str("leafPath", leafPath). - Str("root", root). - Int("i", i). - Interface("parts", parts). - Msg("propagating change") - if err := os.Chtimes(root, fi.ModTime(), fi.ModTime()); err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - root = filepath.Join(root, parts[i]) - } - return nil -} - -func readChecksumIntoResourceChecksum(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - log := appctx.GetLogger(ctx). - Debug(). - Err(err). - Str("nodepath", nodePath). - Str("algorithm", algo) - switch { - case err == nil: - ri.Checksum = &provider.ResourceChecksum{ - Type: storageprovider.PKG2GRPCXS(algo), - Sum: hex.EncodeToString(v), - } - case isNoData(err): - log.Msg("checksum not set") - case isNotFound(err): - log.Msg("file not found") - default: - log.Msg("could not read checksum") - } -} - -func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - log := appctx.GetLogger(ctx). - Debug(). - Err(err). - Str("nodepath", nodePath). - Str("algorithm", algo) - switch { - case err == nil: - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[algo] = &types.OpaqueEntry{ - Decoder: "plain", - Value: []byte(hex.EncodeToString(v)), - } - case isNoData(err): - log.Msg("checksum not set") - case isNotFound(err): - log.Msg("file not found") - default: - log.Msg("could not read checksum") - } -} - -// TODO propagate etag and mtime or append event to history? propagate on disk ... -// - but propagation is a separate task. only if upload was successful ... diff --git a/pkg/storage/fs/owncloud/owncloud_unix.go b/pkg/storage/fs/owncloud/owncloud_unix.go deleted file mode 100644 index f2ecd58899..0000000000 --- a/pkg/storage/fs/owncloud/owncloud_unix.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build !windows -// +build !windows - -package owncloud - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - "syscall" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" -) - -// TODO(jfd) get rid of the differences between unix and windows. the inode and dev should never be used for the etag because it interferes with backups - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned. -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { - // take device and inode into account - err = binary.Write(h, binary.BigEndian, stat.Ino) - if err != nil { - log.Error().Err(err).Msg("error writing inode") - } - err = binary.Write(h, binary.BigEndian, stat.Dev) - if err != nil { - log.Error().Err(err).Msg("error writing device") - } - } - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) -} - -func (fs *ocfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - stat := syscall.Statfs_t{} - err := syscall.Statfs(fs.toInternalPath(ctx, "/"), &stat) - if err != nil { - return 0, 0, err - } - total := stat.Blocks * uint64(stat.Bsize) // Total data blocks in filesystem - used := (stat.Blocks - stat.Bavail) * uint64(stat.Bsize) // Free blocks available to unprivileged user - return total, used, nil -} diff --git a/pkg/storage/fs/owncloud/owncloud_windows.go b/pkg/storage/fs/owncloud/owncloud_windows.go deleted file mode 100644 index 0eca194c6d..0000000000 --- a/pkg/storage/fs/owncloud/owncloud_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build windows -// +build windows - -package owncloud - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "golang.org/x/sys/windows" -) - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - // device and inode have no meaning on windows - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) -} - -func (fs *ocfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - var free, total, avail uint64 - - pathPtr, err := windows.UTF16PtrFromString(fs.toInternalPath(ctx, "/")) - if err != nil { - return 0, 0, err - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free) - if err != nil { - return 0, 0, err - } - - used := total - free - return total, used, nil -} diff --git a/pkg/storage/fs/owncloud/upload.go b/pkg/storage/fs/owncloud/upload.go deleted file mode 100644 index 04df416185..0000000000 --- a/pkg/storage/fs/owncloud/upload.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloud - -import ( - "context" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash/adler32" - "io" - "os" - "path/filepath" - "strings" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" - "github.com/rs/zerolog" - tusd "github.com/tus/tusd/pkg/handler" -) - -var defaultFilePerm = os.FileMode(0664) - -func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { - upload, err := fs.GetUpload(ctx, ref.GetPath()) - if err != nil { - return errors.Wrap(err, "ocfs: error retrieving upload") - } - - uploadInfo := upload.(*fileUpload) - - p := uploadInfo.info.Storage["InternalDestination"] - ok, err := chunking.IsChunked(p) - if err != nil { - return errors.Wrap(err, "ocfs: error checking path") - } - if ok { - var assembledFile string - p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) - if err != nil { - return err - } - if p == "" { - if err = uploadInfo.Terminate(ctx); err != nil { - return errors.Wrap(err, "ocfs: error removing auxiliary files") - } - return errtypes.PartialContent(ref.String()) - } - uploadInfo.info.Storage["InternalDestination"] = p - fd, err := os.Open(assembledFile) - if err != nil { - return errors.Wrap(err, "ocfs: error opening assembled file") - } - defer fd.Close() - defer os.RemoveAll(assembledFile) - r = fd - } - - if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "ocfs: error writing to binary file") - } - - return uploadInfo.FinishUpload(ctx) -} - -// InitiateUpload returns upload ids corresponding to different protocols it supports -// TODO read optional content for small files in this request. -func (fs *ocfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // permissions are checked in NewUpload below - - p := fs.toStoragePath(ctx, ip) - - info := tusd.FileInfo{ - MetaData: tusd.MetaData{ - "filename": filepath.Base(p), - "dir": filepath.Dir(p), - }, - Size: uploadLength, - } - - if metadata != nil { - if metadata["mtime"] != "" { - info.MetaData["mtime"] = metadata["mtime"] - } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - } - - upload, err := fs.NewUpload(ctx, info) - if err != nil { - return nil, err - } - - info, _ = upload.GetInfo(ctx) - - return map[string]string{ - "simple": info.ID, - "tus": info.ID, - }, nil -} - -// UseIn tells the tus upload middleware which extensions it supports. -func (fs *ocfs) UseIn(composer *tusd.StoreComposer) { - composer.UseCore(fs) - composer.UseTerminater(fs) - composer.UseConcater(fs) - composer.UseLengthDeferrer(fs) -} - -// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol -// - the storage needs to implement NewUpload and GetUpload -// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload - -func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("ocfs: NewUpload") - - if info.MetaData["filename"] == "" { - return nil, errors.New("ocfs: missing filename in metadata") - } - info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) - - dir := info.MetaData["dir"] - if dir == "" { - return nil, errors.New("ocfs: missing dir in metadata") - } - info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) - - ip := fs.toInternalPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) - - // check permissions - var perm *provider.ResourcePermissions - var perr error - // if destination exists - if _, err := os.Stat(ip); err == nil { - // check permissions of file to be overwritten - perm, perr = fs.readPermissions(ctx, ip) - } else { - // check permissions of parent folder - perm, perr = fs.readPermissions(ctx, filepath.Dir(ip)) - } - if perr == nil { - if !perm.InitiateFileUpload { - return nil, errtypes.PermissionDenied("") - } - } else { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - log.Debug().Interface("info", info).Msg("ocfs: resolved filename") - - info.ID = uuid.New().String() - - binPath, err := fs.getUploadPath(ctx, info.ID) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving upload path") - } - usr := ctxpkg.ContextMustGetUser(ctx) - info.Storage = map[string]string{ - "Type": "OwnCloudStore", - "BinPath": binPath, - "InternalDestination": ip, - - "Idp": usr.Id.Idp, - "UserId": usr.Id.OpaqueId, - "UserType": utils.UserTypeToString(usr.Id.Type), - "UserName": usr.Username, - - "LogLevel": log.GetLevel().String(), - } - // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("ocfs: built storage info") - file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return nil, err - } - defer file.Close() - - u := &fileUpload{ - info: info, - binPath: binPath, - infoPath: filepath.Join(fs.c.UploadInfoDir, info.ID+".info"), - fs: fs, - ctx: ctx, - } - - // writeInfo creates the file by itself if necessary - err = u.writeInfo() - if err != nil { - return nil, err - } - - return u, nil -} - -func (fs *ocfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "uploads", uploadID), nil -} - -// GetUpload returns the Upload for the given upload id. -func (fs *ocfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - infoPath := filepath.Join(fs.c.UploadInfoDir, filepath.Join("/", id+".info")) - - info := tusd.FileInfo{} - data, err := os.ReadFile(infoPath) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &info); err != nil { - return nil, err - } - - stat, err := os.Stat(info.Storage["BinPath"]) - if err != nil { - return nil, err - } - - info.Offset = stat.Size() - - u := &userpb.User{ - Id: &userpb.UserId{ - Idp: info.Storage["Idp"], - OpaqueId: info.Storage["UserId"], - Type: utils.UserTypeMap(info.Storage["UserType"]), - }, - Username: info.Storage["UserName"], - } - - ctx = ctxpkg.ContextSetUser(ctx, u) - // TODO configure the logger the same way ... store and add traceid in file info - - var opts []logger.Option - opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) - opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) - l := logger.New(opts...) - - sub := l.With().Int("pid", os.Getpid()).Logger() - - ctx = appctx.WithLogger(ctx, &sub) - - return &fileUpload{ - info: info, - binPath: info.Storage["BinPath"], - infoPath: infoPath, - fs: fs, - ctx: ctx, - }, nil -} - -type fileUpload struct { - // info stores the current information about the upload - info tusd.FileInfo - // infoPath is the path to the .info file - infoPath string - // binPath is the path to the binary file (which has no extension) - binPath string - // only fs knows how to handle metadata and versions - fs *ocfs - // a context with a user - // TODO add logger as well? - ctx context.Context -} - -// GetInfo returns the FileInfo. -func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { - return upload.info, nil -} - -// WriteChunk writes the stream from the reader to the given offset of the upload. -func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return 0, err - } - defer file.Close() - - n, err := io.Copy(file, src) - - // If the HTTP PATCH request gets interrupted in the middle (e.g. because - // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for OwnCloudStore it's not important whether the stream has ended - // on purpose or accidentally. - if err != nil { - if err != io.ErrUnexpectedEOF { - return n, err - } - } - - upload.info.Offset += n - err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk - - return n, err -} - -// GetReader returns an io.Reader for the upload. -func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { - return os.Open(upload.binPath) -} - -// writeInfo updates the entire information. Everything will be overwritten. -func (upload *fileUpload) writeInfo() error { - data, err := json.Marshal(upload.info) - if err != nil { - return err - } - return os.WriteFile(upload.infoPath, data, defaultFilePerm) -} - -// FinishUpload finishes an upload and moves the file to the internal destination. -func (upload *fileUpload) FinishUpload(ctx context.Context) error { - log := appctx.GetLogger(upload.ctx) - - sha1Sum := make([]byte, 0, 32) - md5Sum := make([]byte, 0, 32) - adler32Sum := make([]byte, 0, 32) - { - sha1h := sha1.New() - md5h := md5.New() - adler32h := adler32.New() - f, err := os.Open(upload.binPath) - if err != nil { - log.Err(err).Msg("Decomposedfs: could not open file for checksumming") - // we can continue if no oc checksum header is set - } - defer f.Close() - - r1 := io.TeeReader(f, sha1h) - r2 := io.TeeReader(r1, md5h) - - if _, err := io.Copy(adler32h, r2); err != nil { - log.Err(err).Msg("Decomposedfs: could not copy bytes for checksumming") - } - - sha1Sum = sha1h.Sum(sha1Sum) - md5Sum = md5h.Sum(md5Sum) - adler32Sum = adler32h.Sum(adler32Sum) - } - - if upload.info.MetaData["checksum"] != "" { - parts := strings.SplitN(upload.info.MetaData["checksum"], " ", 2) - if len(parts) != 2 { - return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - var err error - switch parts[0] { - case "sha1": - err = upload.checkHash(parts[1], sha1Sum) - case "md5": - err = upload.checkHash(parts[1], md5Sum) - case "adler32": - err = upload.checkHash(parts[1], adler32Sum) - default: - err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - if err != nil { - return err - } - } - - ip := upload.info.Storage["InternalDestination"] - - // if destination exists - // TODO check etag with If-Match header - if _, err := os.Stat(ip); err == nil { - // copy attributes of existing file to tmp file - if err := upload.fs.copyMD(ip, upload.binPath); err != nil { - return errors.Wrap(err, "ocfs: error copying metadata from "+ip+" to "+upload.binPath) - } - // create revision - if err := upload.fs.archiveRevision(upload.ctx, upload.fs.getVersionsPath(upload.ctx, ip), ip); err != nil { - return err - } - } - - err := os.Rename(upload.binPath, ip) - if err != nil { - log.Err(err).Interface("info", upload.info). - Str("binPath", upload.binPath). - Str("ipath", ip). - Msg("ocfs: could not rename") - return err - } - - // only delete the upload if it was successfully written to the storage - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - log.Err(err).Interface("info", upload.info).Msg("ocfs: could not delete upload info") - return err - } - } - - if upload.info.MetaData["mtime"] != "" { - err := upload.fs.setMtime(ctx, ip, upload.info.MetaData["mtime"]) - if err != nil { - log.Err(err).Interface("info", upload.info).Msg("ocfs: could not set mtime metadata") - return err - } - } - - // now try write all checksums - tryWritingChecksum(log, ip, "sha1", sha1Sum) - tryWritingChecksum(log, ip, "md5", md5Sum) - tryWritingChecksum(log, ip, "adler32", adler32Sum) - - return upload.fs.propagate(upload.ctx, ip) -} - -// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination -// - the storage needs to implement AsTerminatableUpload -// - the upload needs to implement Terminate - -// AsTerminatableUpload returns a TerminatableUpload. -func (fs *ocfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { - return upload.(*fileUpload) -} - -// Terminate terminates the upload. -func (upload *fileUpload) Terminate(ctx context.Context) error { - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return nil -} - -// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation -// - the storage needs to implement AsLengthDeclarableUpload -// - the upload needs to implement DeclareLength - -// AsLengthDeclarableUpload returns a LengthDeclarableUpload. -func (fs *ocfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { - return upload.(*fileUpload) -} - -// DeclareLength updates the upload length information. -func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { - upload.info.Size = length - upload.info.SizeIsDeferred = false - return upload.writeInfo() -} - -// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation -// - the storage needs to implement AsConcatableUpload -// - the upload needs to implement ConcatUploads - -// AsConcatableUpload returns a ConcatableUpload. -func (fs *ocfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { - return upload.(*fileUpload) -} - -// ConcatUploads concatenates multiple uploads. -func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return err - } - defer file.Close() - - for _, partialUpload := range uploads { - fileUpload := partialUpload.(*fileUpload) - - src, err := os.Open(fileUpload.binPath) - if err != nil { - return err - } - - if _, err := io.Copy(file, src); err != nil { - return err - } - } - - return -} - -func (upload *fileUpload) checkHash(expected string, h []byte) error { - if expected != hex.EncodeToString(h) { - upload.discardChunk() - return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", upload.info.MetaData["checksum"], h)) - } - return nil -} - -func (upload *fileUpload) discardChunk() { - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") - return - } - } - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("infoPath", upload.infoPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk info") - return - } - } -} - -func tryWritingChecksum(log *zerolog.Logger, path, algo string, h []byte) { - if err := xattr.Set(path, checksumPrefix+algo, h); err != nil { - log.Err(err). - Str("csType", algo). - Bytes("hash", h). - Msg("ocfs: could not write checksum") - } -} diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache.go b/pkg/storage/fs/owncloudsql/filecache/filecache.go deleted file mode 100644 index ff4117a0e6..0000000000 --- a/pkg/storage/fs/owncloudsql/filecache/filecache.go +++ /dev/null @@ -1,697 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package filecache - -import ( - "crypto/md5" - "database/sql" - "encoding/hex" - "fmt" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" - "github.com/pkg/errors" - "github.com/rs/zerolog/log" -) - -// Cache represents a oc10-style file cache. -type Cache struct { - driver string - db *sql.DB -} - -// NewMysql returns a new Cache instance connecting to a MySQL database. -func NewMysql(dsn string) (*Cache, error) { - sqldb, err := sql.Open("mysql", dsn) - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - sqldb.SetConnMaxLifetime(time.Minute * 3) - sqldb.SetMaxOpenConns(10) - sqldb.SetMaxIdleConns(10) - - err = sqldb.Ping() - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - - return New("mysql", sqldb) -} - -// New returns a new Cache instance connecting to the given sql.DB. -func New(driver string, sqldb *sql.DB) (*Cache, error) { - return &Cache{ - driver: driver, - db: sqldb, - }, nil -} - -// GetNumericStorageID returns the database id for the given storage. -func (c *Cache) GetNumericStorageID(id string) (int, error) { - row := c.db.QueryRow("SELECT numeric_id FROM oc_storages WHERE id = ?", id) - var nid int - switch err := row.Scan(&nid); err { - case nil: - return nid, nil - default: - return -1, err - } -} - -// CreateStorage creates a new storage and returns its numeric id. -func (c *Cache) CreateStorage(id string) (int, error) { - tx, err := c.db.Begin() - if err != nil { - return -1, err - } - defer func() { _ = tx.Rollback() }() - - stmt, err := tx.Prepare("INSERT INTO oc_storages(id) VALUES(?)") - if err != nil { - return -1, err - } - defer stmt.Close() - - res, err := stmt.Exec(id) - if err != nil { - return -1, err - } - insertedID, err := res.LastInsertId() - if err != nil { - return -1, err - } - - data := map[string]interface{}{ - "path": "", - "etag": "", - "mimetype": "httpd/unix-directory", - } - _, err = c.doInsertOrUpdate(tx, int(insertedID), data, true) - if err != nil { - return -1, err - } - - err = tx.Commit() - if err != nil { - return -1, err - } - - return int(insertedID), err -} - -// GetStorageOwner returns the username of the owner of the given storage. -func (c *Cache) GetStorageOwner(numericID interface{}) (string, error) { - numericID, err := toIntID(numericID) - if err != nil { - return "", err - } - row := c.db.QueryRow("SELECT id FROM oc_storages WHERE numeric_id = ?", numericID) - var id string - switch err := row.Scan(&id); err { - case nil: - return strings.TrimPrefix(id, "home::"), nil - default: - return "", err - } -} - -// GetStorageOwnerByFileID returns the username of the owner of the given entry. -func (c *Cache) GetStorageOwnerByFileID(numericID interface{}) (string, error) { - numericID, err := toIntID(numericID) - if err != nil { - return "", err - } - row := c.db.QueryRow("SELECT id FROM oc_storages storages, oc_filecache cache WHERE storages.numeric_id = cache.storage AND cache.fileid = ?", numericID) - var id string - switch err := row.Scan(&id); err { - case nil: - return strings.TrimPrefix(id, "home::"), nil - default: - return "", err - } -} - -// File represents an entry of the file cache. -type File struct { - ID int - Storage int - Parent int - MimePart int - MimeType int - MimeTypeString string - Size int - MTime int - StorageMTime int - UnencryptedSize int - Permissions int - Encrypted bool - Path string - Name string - Etag string - Checksum string -} - -// TrashItem represents a trash item of the file cache. -type TrashItem struct { - ID int - Name string - User string - Path string - Timestamp int -} - -// Scannable describes the interface providing a Scan method. -type Scannable interface { - Scan(...interface{}) error -} - -func (c *Cache) rowToFile(row Scannable) (*File, error) { - var fileid, storage, parent, mimetype, mimepart, size, mtime, storageMtime, encrypted, unencryptedSize int - var permissions sql.NullInt32 - var path, name, etag, checksum, mimetypestring sql.NullString - err := row.Scan(&fileid, &storage, &path, &parent, &permissions, &mimetype, &mimepart, &mimetypestring, &size, &mtime, &storageMtime, &encrypted, &unencryptedSize, &name, &etag, &checksum) - if err != nil { - return nil, err - } - - return &File{ - ID: fileid, - Storage: storage, - Path: path.String, - Parent: parent, - Permissions: int(permissions.Int32), - MimeType: mimetype, - MimeTypeString: mimetypestring.String, - MimePart: mimepart, - Size: size, - MTime: mtime, - StorageMTime: storageMtime, - Encrypted: encrypted == 1, - UnencryptedSize: unencryptedSize, - Name: name.String, - Etag: etag.String, - Checksum: checksum.String, - }, nil -} - -// Get returns the cache entry for the specified storage/path. -func (c *Cache) Get(s interface{}, p string) (*File, error) { - storageID, err := toIntID(s) - if err != nil { - return nil, err - } - - phashBytes := md5.Sum([]byte(p)) - phash := hex.EncodeToString(phashBytes[:]) - - row := c.db.QueryRow(` - SELECT - fc.fileid, fc.storage, fc.path, fc.parent, fc.permissions, fc.mimetype, fc.mimepart, - mt.mimetype, fc.size, fc.mtime, fc.storage_mtime, fc.encrypted, fc.unencrypted_size, - fc.name, fc.etag, fc.checksum - FROM oc_filecache fc - LEFT JOIN oc_mimetypes mt ON fc.mimetype = mt.id - WHERE path_hash = ? AND storage = ?`, phash, storageID) - return c.rowToFile(row) -} - -// Path returns the path for the specified entry. -func (c *Cache) Path(id interface{}) (string, error) { - id, err := toIntID(id) - if err != nil { - return "", err - } - - row := c.db.QueryRow("SELECT path FROM oc_filecache WHERE fileid = ?", id) - var path string - err = row.Scan(&path) - if err != nil { - return "", err - } - return path, nil -} - -// List returns the list of entries below the given path. -func (c *Cache) List(storage interface{}, p string) ([]*File, error) { - storageID, err := toIntID(storage) - if err != nil { - return nil, err - } - - var rows *sql.Rows - phash := fmt.Sprintf("%x", md5.Sum([]byte(strings.Trim(p, "/")))) - rows, err = c.db.Query(` - SELECT - fc.fileid, fc.storage, fc.path, fc.parent, fc.permissions, fc.mimetype, fc.mimepart, - mt.mimetype, fc.size, fc.mtime, fc.storage_mtime, fc.encrypted, fc.unencrypted_size, - fc.name, fc.etag, fc.checksum - FROM oc_filecache fc - LEFT JOIN oc_mimetypes mt ON fc.mimetype = mt.id - WHERE storage = ? AND parent = (SELECT fileid FROM oc_filecache WHERE storage = ? AND path_hash=?) AND name IS NOT NULL - `, storageID, storageID, phash) - if err != nil { - return nil, err - } - defer rows.Close() - entries := []*File{} - for rows.Next() { - entry, err := c.rowToFile(rows) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - - return entries, nil -} - -// Permissions returns the permissions for the specified storage/path. -func (c *Cache) Permissions(storage interface{}, p string) (*provider.ResourcePermissions, error) { - entry, err := c.Get(storage, p) - if err != nil { - return nil, err - } - - perms, err := conversions.NewPermissions(entry.Permissions) - if err != nil { - return nil, err - } - - return conversions.RoleFromOCSPermissions(perms).CS3ResourcePermissions(), nil -} - -// InsertOrUpdate creates or updates a cache entry. -func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}, allowEmptyParent bool) (int, error) { - tx, err := c.db.Begin() - if err != nil { - return -1, err - } - defer func() { _ = tx.Rollback() }() - - id, err := c.doInsertOrUpdate(tx, storage, data, allowEmptyParent) - if err != nil { - return -1, err - } - - err = tx.Commit() - if err != nil { - return -1, err - } - - return id, err -} - -func (c *Cache) doInsertOrUpdate(tx *sql.Tx, storage interface{}, data map[string]interface{}, allowEmptyParent bool) (int, error) { - storageID, err := toIntID(storage) - if err != nil { - return -1, err - } - - columns := []string{"storage"} - placeholders := []string{"?"} - values := []interface{}{storage} - - for _, key := range []string{"path", "mimetype", "etag"} { - if _, exists := data[key]; !exists { - return -1, fmt.Errorf("missing required data") - } - } - - path := data["path"].(string) - data["name"] = filepath.Base(path) - if data["name"] == "." { - data["name"] = "" - } - - parentPath := strings.TrimRight(filepath.Dir(path), "/") - if parentPath == "." { - parentPath = "" - } - if path == "" { - data["parent"] = -1 - } else { - parent, err := c.Get(storageID, parentPath) - if err == nil { - data["parent"] = parent.ID - } else { - if allowEmptyParent { - data["parent"] = -1 - } else { - return -1, fmt.Errorf("could not find parent %s, %s, %v, %w", parentPath, path, parent, err) - } - } - } - - if _, exists := data["checksum"]; !exists { - data["checksum"] = "" - } - - for k, v := range data { - switch k { - case "path": - phashBytes := md5.Sum([]byte(v.(string))) - phash := hex.EncodeToString(phashBytes[:]) - columns = append(columns, "path_hash") - values = append(values, phash) - placeholders = append(placeholders, "?") - case "storage_mtime": - if _, exists := data["mtime"]; !exists { - columns = append(columns, "mtime") - values = append(values, v) - placeholders = append(placeholders, "?") - } - case "mimetype": - parts := strings.Split(v.(string), "/") - columns = append(columns, "mimetype") - values = append(values, v) - placeholders = append(placeholders, "(SELECT id FROM oc_mimetypes WHERE mimetype=?)") - columns = append(columns, "mimepart") - values = append(values, parts[0]) - placeholders = append(placeholders, "(SELECT id FROM oc_mimetypes WHERE mimetype=?)") - continue - } - - columns = append(columns, k) - values = append(values, v) - placeholders = append(placeholders, "?") - } - - err = c.insertMimetype(tx, data["mimetype"].(string)) - if err != nil { - return -1, err - } - - query := "INSERT INTO oc_filecache( " + strings.Join(columns, ", ") + ") VALUES(" + strings.Join(placeholders, ",") + ")" - - updates := []string{} - for i, column := range columns { - if column != "path" && column != "path_hash" && column != "storage" { - updates = append(updates, column+"="+placeholders[i]) - values = append(values, values[i]) - } - } - if c.driver == "mysql" { // mysql upsert - query += " ON DUPLICATE KEY UPDATE " - } else { // sqlite3 upsert - query += " ON CONFLICT(storage,path_hash) DO UPDATE SET " - } - query += strings.Join(updates, ",") - - stmt, err := tx.Prepare(query) - if err != nil { - return -1, err - } - - res, err := stmt.Exec(values...) - if err != nil { - log.Err(err).Msg("could not store filecache item") - return -1, err - } - id, err := res.LastInsertId() - if err != nil { - return -1, err - } - return int(id), nil -} - -// Copy creates a copy of the specified entry at the target path. -func (c *Cache) Copy(storage interface{}, sourcePath, targetPath string) (int, error) { - storageID, err := toIntID(storage) - if err != nil { - return -1, err - } - source, err := c.Get(storageID, sourcePath) - if err != nil { - return -1, errors.Wrap(err, "could not find source") - } - - row := c.db.QueryRow("SELECT mimetype FROM oc_mimetypes WHERE id=?", source.MimeType) - var mimetype string - err = row.Scan(&mimetype) - if err != nil { - return -1, errors.Wrap(err, "could not find source mimetype") - } - - data := map[string]interface{}{ - "path": targetPath, - "checksum": source.Checksum, - "mimetype": mimetype, - "permissions": source.Permissions, - "etag": source.Etag, - "size": source.Size, - "mtime": source.MTime, - "storage_mtime": source.StorageMTime, - "encrypted": source.Encrypted, - "unencrypted_size": source.UnencryptedSize, - } - return c.InsertOrUpdate(storage, data, false) -} - -// Move moves the specified entry to the target path. -func (c *Cache) Move(storage interface{}, sourcePath, targetPath string) error { - storageID, err := toIntID(storage) - if err != nil { - return err - } - source, err := c.Get(storageID, sourcePath) - if err != nil { - return errors.Wrap(err, "could not find source") - } - newParentPath := strings.TrimRight(filepath.Dir(targetPath), "/") - newParent, err := c.Get(storageID, newParentPath) - if err != nil { - return errors.Wrap(err, "could not find new parent") - } - - tx, err := c.db.Begin() - if err != nil { - return err - } - defer func() { _ = tx.Rollback() }() - stmt, err := tx.Prepare("UPDATE oc_filecache SET parent=?, path=?, name=?, path_hash=? WHERE storage = ? AND fileid=?") - if err != nil { - return err - } - defer stmt.Close() - phashBytes := md5.Sum([]byte(targetPath)) - _, err = stmt.Exec(newParent.ID, targetPath, filepath.Base(targetPath), hex.EncodeToString(phashBytes[:]), storageID, source.ID) - if err != nil { - return err - } - - childRows, err := tx.Query("SELECT fileid, path FROM oc_filecache WHERE parent = ?", source.ID) - if err != nil { - return err - } - defer childRows.Close() - children := map[int]string{} - for childRows.Next() { - var ( - id int - path string - ) - err = childRows.Scan(&id, &path) - if err != nil { - return err - } - - children[id] = path - } - for id, path := range children { - path = strings.ReplaceAll(path, sourcePath, targetPath) - phashBytes = md5.Sum([]byte(path)) - _, err = stmt.Exec(source.ID, path, filepath.Base(path), hex.EncodeToString(phashBytes[:]), storageID, id) - if err != nil { - return err - } - } - - return tx.Commit() -} - -// Purge removes the specified storage/path from the cache without putting it into the trash. -func (c *Cache) Purge(storage interface{}, path string) error { - storageID, err := toIntID(storage) - if err != nil { - return err - } - phashBytes := md5.Sum([]byte(path)) - phash := hex.EncodeToString(phashBytes[:]) - _, err = c.db.Exec("DELETE FROM oc_filecache WHERE storage = ? and path_hash = ?", storageID, phash) - return err -} - -// Delete removes the specified storage/path from the cache. -func (c *Cache) Delete(storage interface{}, user, path, trashPath string) error { - err := c.Move(storage, path, trashPath) - if err != nil { - return err - } - - re := regexp.MustCompile(`(.*)\.d(\d+)$`) - parts := re.FindStringSubmatch(filepath.Base(trashPath)) - - query := "INSERT INTO oc_files_trash(user,id,timestamp,location) VALUES(?,?,?,?)" - stmt, err := c.db.Prepare(query) - if err != nil { - return err - } - - relativeLocation, err := filepath.Rel("files/", filepath.Dir(path)) - if err != nil { - return err - } - _, err = stmt.Exec(user, filepath.Base(parts[1]), parts[2], relativeLocation) - if err != nil { - log.Err(err).Msg("could not store filecache item") - return err - } - - return nil -} - -// GetRecycleItem returns the specified recycle item. -func (c *Cache) GetRecycleItem(user, path string, timestamp int) (*TrashItem, error) { - row := c.db.QueryRow("SELECT auto_id, id, location FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) - var autoID int - var id, location string - err := row.Scan(&autoID, &id, &location) - if err != nil { - return nil, err - } - - return &TrashItem{ - ID: autoID, - Name: id, - User: user, - Path: location, - Timestamp: timestamp, - }, nil -} - -// EmptyRecycle clears the recycle bin for the given user. -func (c *Cache) EmptyRecycle(user string) error { - _, err := c.db.Exec("DELETE FROM oc_files_trash WHERE user = ?", user) - if err != nil { - return err - } - - storage, err := c.GetNumericStorageID("home::" + user) - if err != nil { - return err - } - - _, err = c.db.Exec("DELETE FROM oc_filecache WHERE storage = ? AND PATH LIKE ?", storage, "files_trashbin/%") - return err -} - -// DeleteRecycleItem deletes the specified item from the trash. -func (c *Cache) DeleteRecycleItem(user, path string, timestamp int) error { - _, err := c.db.Exec("DELETE FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) - return err -} - -// PurgeRecycleItem deletes the specified item from the filecache and the trash. -func (c *Cache) PurgeRecycleItem(user, path string, timestamp int, isVersionFile bool) error { - row := c.db.QueryRow("SELECT auto_id, location FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) - var autoID int - var location string - err := row.Scan(&autoID, &location) - if err != nil { - return err - } - - _, err = c.db.Exec("DELETE FROM oc_files_trash WHERE auto_id=?", autoID) - if err != nil { - return err - } - - storage, err := c.GetNumericStorageID("home::" + user) - if err != nil { - return err - } - trashType := "files" - if isVersionFile { - trashType = "versions" - } - item, err := c.Get(storage, filepath.Join("files_trashbin", trashType, path+".d"+strconv.Itoa(timestamp))) - if err != nil { - return err - } - _, err = c.db.Exec("DELETE FROM oc_filecache WHERE fileid=? OR parent=?", item.ID, item.ID) - - return err -} - -// SetEtag set a new etag for the specified item. -func (c *Cache) SetEtag(storage interface{}, path, etag string) error { - storageID, err := toIntID(storage) - if err != nil { - return err - } - source, err := c.Get(storageID, path) - if err != nil { - return errors.Wrap(err, "could not find source") - } - stmt, err := c.db.Prepare("UPDATE oc_filecache SET etag=? WHERE storage = ? AND fileid=?") - if err != nil { - return err - } - _, err = stmt.Exec(etag, storageID, source.ID) - return err -} - -func (c *Cache) insertMimetype(tx *sql.Tx, mimetype string) error { - insertPart := func(v string) error { - stmt, err := tx.Prepare("INSERT INTO oc_mimetypes(mimetype) VALUES(?)") - if err != nil { - return err - } - _, err = stmt.Exec(v) - if err != nil { - if strings.Contains(err.Error(), "UNIQUE") || strings.Contains(err.Error(), "Error 1062") { - return nil // Already exists - } - return err - } - return nil - } - parts := strings.Split(mimetype, "/") - err := insertPart(parts[0]) - if err != nil { - return err - } - return insertPart(mimetype) -} - -func toIntID(rid interface{}) (int, error) { - switch t := rid.(type) { - case int: - return t, nil - case string: - return strconv.Atoi(t) - default: - return -1, fmt.Errorf("invalid type") - } -} diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go b/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go deleted file mode 100644 index 828a46f29a..0000000000 --- a/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package filecache_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestFilecache(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Filecache Suite") -} diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache_test.go b/pkg/storage/fs/owncloudsql/filecache/filecache_test.go deleted file mode 100644 index a3157fca8a..0000000000 --- a/pkg/storage/fs/owncloudsql/filecache/filecache_test.go +++ /dev/null @@ -1,571 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package filecache_test - -import ( - "database/sql" - "os" - "strconv" - - "github.com/cs3org/reva/pkg/storage/fs/owncloudsql/filecache" - _ "github.com/mattn/go-sqlite3" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Filecache", func() { - var ( - cache *filecache.Cache - testDBFile *os.File - sqldb *sql.DB - ) - - BeforeEach(func() { - var err error - testDBFile, err = os.CreateTemp("", "example") - Expect(err).ToNot(HaveOccurred()) - - dbData, err := os.ReadFile("test.db") - Expect(err).ToNot(HaveOccurred()) - - _, err = testDBFile.Write(dbData) - Expect(err).ToNot(HaveOccurred()) - err = testDBFile.Close() - Expect(err).ToNot(HaveOccurred()) - - sqldb, err = sql.Open("sqlite3", testDBFile.Name()) - Expect(err).ToNot(HaveOccurred()) - - cache, err = filecache.New("sqlite3", sqldb) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - os.Remove(testDBFile.Name()) - }) - - Describe("GetNumericStorageID", func() { - It("returns the proper storage id", func() { - nid, err := cache.GetNumericStorageID("home::admin") - Expect(err).ToNot(HaveOccurred()) - Expect(nid).To(Equal(1)) - }) - }) - - Describe("GetStorageOwner", func() { - It("returns the owner", func() { - owner, err := cache.GetStorageOwner("1") - Expect(err).ToNot(HaveOccurred()) - Expect(owner).To(Equal("admin")) - }) - }) - - Describe("CreateStorage", func() { - It("creates the storage and a root item", func() { - id, err := cache.CreateStorage("bar") - Expect(err).ToNot(HaveOccurred()) - Expect(id > 0).To(BeTrue()) - - owner, err := cache.GetStorageOwner(id) - Expect(err).ToNot(HaveOccurred()) - Expect(owner).To(Equal("bar")) - - file, err := cache.Get(1, "") - Expect(err).ToNot(HaveOccurred()) - Expect(file).ToNot(BeNil()) - }) - }) - Describe("GetStorageOwnerByFileID", func() { - It("returns the owner", func() { - owner, err := cache.GetStorageOwnerByFileID("10") - Expect(err).ToNot(HaveOccurred()) - Expect(owner).To(Equal("admin")) - }) - }) - - Describe("Get", func() { - It("gets existing files", func() { - path := "files/Photos/Portugal.jpg" - file, err := cache.Get(1, path) - Expect(err).ToNot(HaveOccurred()) - Expect(file).ToNot(BeNil()) - Expect(file.ID).To(Equal(10)) - Expect(file.Storage).To(Equal(1)) - Expect(file.Path).To(Equal(path)) - Expect(file.Parent).To(Equal(9)) - Expect(file.MimeType).To(Equal(6)) - Expect(file.MimePart).To(Equal(5)) - Expect(file.MimeTypeString).To(Equal("image/jpeg")) - Expect(file.Size).To(Equal(243733)) - Expect(file.MTime).To(Equal(1619007009)) - Expect(file.StorageMTime).To(Equal(1619007009)) - Expect(file.Encrypted).To(BeFalse()) - Expect(file.UnencryptedSize).To(Equal(0)) - Expect(file.Name).To(Equal("Portugal.jpg")) - Expect(file.Etag).To(Equal("13cf411aefccd7183d3b117ccd0ac5f8")) - Expect(file.Checksum).To(Equal("SHA1:872adcabcb4e06bea6265200c0d71b12defe2df1 MD5:01b38c622feac31652d738a94e15e86b ADLER32:6959358d")) - }) - }) - - Describe("List", func() { - It("lists all entries", func() { - list, err := cache.List(1, "") - Expect(err).ToNot(HaveOccurred()) - Expect(len(list)).To(Equal(3)) - }) - - It("filters", func() { - list, err := cache.List(1, "files_trashbin/") - Expect(err).ToNot(HaveOccurred()) - Expect(len(list)).To(Equal(3)) - }) - - It("filters deep", func() { - list, err := cache.List(1, "files/Photos/") - Expect(err).ToNot(HaveOccurred()) - Expect(len(list)).To(Equal(3)) - }) - }) - - Describe("Path", func() { - It("returns the path", func() { - path, err := cache.Path(10) - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("files/Photos/Portugal.jpg")) - }) - - It("returns the path when given a string id", func() { - path, err := cache.Path("10") - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("files/Photos/Portugal.jpg")) - }) - }) - - Describe("InsertOrUpdate", func() { - Context("when inserting a new recored", func() { - It("checks for required fields", func() { - data := map[string]interface{}{ - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - _, err := cache.InsertOrUpdate(3, data, false) - Expect(err).To(MatchError("missing required data")) - - data = map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "etag": "abcdefg", - } - _, err = cache.InsertOrUpdate(3, data, false) - Expect(err).To(MatchError("missing required data")) - - data = map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "mimetype": "httpd/unix-directory", - } - _, err = cache.InsertOrUpdate(3, data, false) - Expect(err).To(MatchError("missing required data")) - }) - - It("inserts a new minimal entry", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - id, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - Expect(id).To(Equal(18)) - - entry, err := cache.Get(1, "files/Photos/foo.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.Path).To(Equal("files/Photos/foo.jpg")) - Expect(entry.Name).To(Equal("foo.jpg")) - Expect(entry.MimeType).To(Equal(2)) - Expect(entry.MimePart).To(Equal(1)) - Expect(entry.Etag).To(Equal("abcdefg")) - }) - - It("inserts a complete entry", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "checksum": "SHA1: abcdefg", - "etag": "abcdefg", - "size": 1234, - "mimetype": "image/jpeg", - "mtime": 1617702482, - "storage_mtime": 1617702483, - "encrypted": true, - "unencrypted_size": 2000, - } - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - entry, err := cache.Get(1, "files/Photos/foo.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.Path).To(Equal("files/Photos/foo.jpg")) - Expect(entry.Name).To(Equal("foo.jpg")) - Expect(entry.Checksum).To(Equal("SHA1: abcdefg")) - Expect(entry.Etag).To(Equal("abcdefg")) - Expect(entry.Size).To(Equal(1234)) - Expect(entry.MimeType).To(Equal(6)) - Expect(entry.MimePart).To(Equal(5)) - Expect(entry.MTime).To(Equal(1617702482)) - Expect(entry.StorageMTime).To(Equal(1617702483)) - Expect(entry.Encrypted).To(BeTrue()) - Expect(entry.UnencryptedSize).To(Equal(2000)) - }) - - It("sets the parent", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - entry, err := cache.Get(1, "files/Photos/foo.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.Parent).To(Equal(9)) - }) - - It("sets the mtime storage_mtime if not set", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - "storage_mtime": 1617702483, - } - - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - entry, err := cache.Get(1, "files/Photos/foo.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.MTime).To(Equal(1617702483)) - }) - - It("sets the mimetype and part ids from the mimetype string", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "checksum": "SHA1: abcdefg", - "etag": "abcdefg", - "mimetype": "image/jpeg", - } - - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - entry, err := cache.Get(1, "files/Photos/foo.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.MimeType).To(Equal(6)) - Expect(entry.MimePart).To(Equal(5)) - }) - - It("adds unknown mimetypes to the database", func() { - data := map[string]interface{}{ - "path": "files/Photos/foo.tiff", - "checksum": "SHA1: abcdefg", - "etag": "abcdefg", - "mimetype": "image/tiff", - } - - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - entry, err := cache.Get(1, "files/Photos/foo.tiff") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.MimeType).To(Equal(9)) - Expect(entry.MimePart).To(Equal(5)) - }) - - It("does not add a . as the name for root entries", func() { - data := map[string]interface{}{ - "path": "", - "checksum": "SHA1: abcdefg", - "etag": "abcdefg", - "mimetype": "image/tiff", - } - - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - - file, err := cache.Get(1, "") - Expect(err).ToNot(HaveOccurred()) - Expect(file).ToNot(BeNil()) - Expect(file.Name).To(Equal("")) - }) - }) - - Context("when updating an existing record", func() { - var ( - data map[string]interface{} - ) - - BeforeEach(func() { - data = map[string]interface{}{ - "path": "files/Photos/foo.jpg", - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("updates the record", func() { - recordBefore, err := cache.Get(1, data["path"].(string)) - Expect(err).ToNot(HaveOccurred()) - - data["etag"] = "12345" - id, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - Expect(id).To(Equal(recordBefore.ID)) - - recordAfter, err := cache.Get(1, data["path"].(string)) - Expect(err).ToNot(HaveOccurred()) - - Expect(recordBefore.Etag).To(Equal("abcdefg")) - Expect(recordAfter.Etag).To(Equal("12345")) - }) - - }) - }) - - Describe("Move", func() { - It("moves a file", func() { - err := cache.Move(1, "files/Photos/Portugal.jpg", "files/Documents/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).To(HaveOccurred()) - - newEntry, err := cache.Get(1, "files/Documents/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(newEntry.Path).To(Equal("files/Documents/Portugal.jpg")) - }) - - It("moves a file while changing its name", func() { - err := cache.Move(1, "files/Photos/Portugal.jpg", "files/Documents/Spain.jpg") - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).To(HaveOccurred()) - - newEntry, err := cache.Get(1, "files/Documents/Spain.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(newEntry.Path).To(Equal("files/Documents/Spain.jpg")) - Expect(newEntry.Name).To(Equal("Spain.jpg")) - }) - - It("moves a directory", func() { - err := cache.Move(1, "files/Photos", "files/Foo") - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files/Photos") - Expect(err).To(HaveOccurred()) - - _, err = cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).To(HaveOccurred()) - newEntry, err := cache.Get(1, "files/Foo/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(newEntry.Path).To(Equal("files/Foo/Portugal.jpg")) - }) - }) - - Describe("SetEtag", func() { - It("updates the etag", func() { - entry, err := cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.Etag).To(Equal("13cf411aefccd7183d3b117ccd0ac5f8")) - - err = cache.SetEtag(1, "files/Photos/Portugal.jpg", "foo") - Expect(err).ToNot(HaveOccurred()) - - entry, err = cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(entry.Etag).To(Equal("foo")) - }) - }) - - Context("trash", func() { - var ( - filePath = "files/Photos/Portugal.jpg" - - data = map[string]interface{}{ - "path": "files_trashbin/files/Photos", - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - trashPathBase = "Portugal.jpg" - trashPathTimestamp = 1619007109 - trashPath = "files_trashbin/files/" + trashPathBase + ".d" + strconv.Itoa(trashPathTimestamp) - ) - - BeforeEach(func() { - _, err := cache.InsertOrUpdate(1, data, false) - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("Delete", func() { - It("deletes an item", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).To(HaveOccurred()) - _, err = cache.Get(1, "files_trashbin/files/Portugal.jpg.d1619007109") - Expect(err).ToNot(HaveOccurred()) - }) - - It("creates an entry in the trash table", func() { - _, err := cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).To(HaveOccurred()) - - err = cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - item, err := cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).ToNot(HaveOccurred()) - Expect(item.Path).To(Equal("Photos")) - }) - - It("rewrites the path of the children", func() { - err := cache.Delete(1, "admin", "files/Photos", "files_trashbin/files/Photos.d1619007109") - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Describe("EmptyRecycle", func() { - It("clears the recycle bin", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - err = cache.EmptyRecycle("admin") - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).To(HaveOccurred()) - }) - }) - - Describe("DeleteRecycleItem", func() { - It("removes the item from the trash", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - err = cache.DeleteRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).To(HaveOccurred()) - }) - - It("does not remove the item from the file cache", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - err = cache.DeleteRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, trashPath) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Describe("PurgeRecycleItem", func() { - It("removes the item from the database", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).ToNot(HaveOccurred()) - - err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp, false) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) - Expect(err).To(HaveOccurred()) - }) - - It("removes the item from the filecache table", func() { - err := cache.Delete(1, "admin", filePath, trashPath) - Expect(err).ToNot(HaveOccurred()) - - err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp, false) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, trashPath) - Expect(err).To(HaveOccurred()) - }) - - It("removes children from the filecache table", func() { - err := cache.Delete(1, "admin", "files/Photos", "files_trashbin/files/Photos.d1619007109") - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - - err = cache.PurgeRecycleItem("admin", "Photos", 1619007109, false) - Expect(err).ToNot(HaveOccurred()) - - _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") - Expect(err).To(HaveOccurred()) - }) - }) - }) - - Describe("Copy", func() { - It("copies the entry", func() { - for _, dir := range []string{"files_versions", "files_versions/Photos"} { - parentData := map[string]interface{}{ - "path": dir, - "mimetype": "httpd/unix-directory", - "etag": "abcdefg", - } - _, err := cache.InsertOrUpdate(1, parentData, false) - Expect(err).ToNot(HaveOccurred()) - } - - existingEntry, err := cache.Get(1, "files/Photos/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - _, err = cache.Copy(1, "files/Photos/Portugal.jpg", "files_versions/Photos/Portugal.jpg.v1619528083") - Expect(err).ToNot(HaveOccurred()) - - newEntry, err := cache.Get(1, "files_versions/Photos/Portugal.jpg.v1619528083") - Expect(err).ToNot(HaveOccurred()) - Expect(newEntry.ID).ToNot(Equal(existingEntry.ID)) - Expect(newEntry.MimeType).To(Equal(existingEntry.MimeType)) - }) - }) - - Describe("Permissions", func() { - It("returns the permissions", func() { - perms, err := cache.Permissions(1, "files/Photos/Portugal.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(perms).ToNot(BeNil()) - Expect(perms.InitiateFileUpload).To(BeTrue()) - - perms, err = cache.Permissions(1, "files/Photos/Teotihuacan.jpg") - Expect(err).ToNot(HaveOccurred()) - Expect(perms).ToNot(BeNil()) - Expect(perms.InitiateFileUpload).To(BeFalse()) - }) - }) -}) diff --git a/pkg/storage/fs/owncloudsql/filecache/test.db b/pkg/storage/fs/owncloudsql/filecache/test.db deleted file mode 100644 index 913b70f86074a302f4c7b5cc5e42e40c176bcbdc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 655360 zcmeFa3z!?nb>|BVFoQRm6gh-QiX=#i8bTb-bmI+5v@{$JC5q&bqDV=!Bm<2`W2Q*} z4Wb)Ej{MS)Qer#y=C#Qtn@w)Axz6iuvb)L6%Spbyabi2KyLYomZr+89d0O)y8lqHh>Llma_)TvXaes!vGmuBP_g%$J#eU_Q_M74s?P&zL_p zglptS5(t0*2!H?xfB*=900@8p2!H?xfWVprUR@cZbV*yapMQzoo_vDdvPbD{-z2?l z-A->?m>9j&q8~31009sH0T2KI5C8!X009sH0T2LzdzgT>{@)b;QxEeu%%3nHWBwEK zJsFJAoBLTxZJz>+Bkx$?oVynh?l#xxNhJ^2`i>iD@Y>jO3`wnTA_*$(aDoRzB zuP7t;`+Nn>2ZvBt4bheR}o4nb_vs3kh%Km2I!>);^e8 zu89p%6>Ii~S@n;r_M~M+Y}8+P=RPj8@5sIvr{!y{*@oP$CC~7!F5j4H*XsM;{n86B zxg{*BYWv9abVH`^*DlJ6dZfq}*=fGrR(czQEEDv9ZZJt@s8QzkIX`SJ@MOl@V7pG72SH#1!^!;a*Zwcbm<@SPs(nxnn-XKsT zzut(^mB}&7>5H+=kGu}2xjv^ai1f|v#V#-Kts##G(r(?|$jU>bmb+&uPG5(+Y@fU5 zrA+HN5d=>CViG0PscWwnztO2 zMcs|#ve0dc^yT{(r{}Ko&2~eal548rmX+pA+Mt(KUftc`N_fO_w-DRB_tvQu`rRGy zaD!gy&IDAi<;&jK=GdiMMwcgFbO$z8tX}FEe*gasn$lnf1V8`;KmY_l00ck)1V8`; zKmY{pU;@VP|9#TKeDV%<60U&&2!H?xfB*=900@8p2!H?xfB*<=fWZCUF%K6Ch0LD< zJfeTNYHCTSYwQ0J=CX(RPI|)&1V8`;KmY_l00ck)1V8`;KmY_l;5G>y9tn6#yLNd( z?T*wEq&DABrn~g}{EG2I{Pe{em7db42))0eH{dzCefxX84s~T&QN`x8@yGf8jGmBm zJpV6wZBY^ylZ$errX=}BBdHgalI{+QnI0s#;J0T2KI z5C8!X009sH0T2KI5V*4l?Dv;;Irjz}TM6bC!pJ`V(e2Ld2ABN_eVpK&qRJP@;#AvN6tmEk@4`S!~akC$HVUn*TW~mJ3?Ox{b}f@L;pIo6gnMB zg(AT}4gN~-$AjM-l!NaG&IIod{8Qlf13wyA4wM6>z^<{cjQ!5o&yW4vu?9JX7YKj= z2!O!dLSW}pel8K~w&^jmVvBy6sV)h;D$$nqa$8g;Px`s(#Bid%8Qo}zwek{ehm$rt zm7d~~Px`r?TZXm87GEXZWl>2y;ZG%Ev?JbbP;p{SQt8)~w1N(v@TaB|{Ru9GWT)5% z=KQIhTlzKa0(Knt?@jD9W=)k}P;}!Bd0|0nEj&2u-#eYK(_HLlrr7(B`SBL|jmy&Z+ z?C!_>hj(rn)V0gs^)~-RVyCl>`l>f^)ITwu=u37jC%yAg|HRHMeTsG=%p?BoiCB~G zToF~hYRM!1?b8WE;9@U7#qKHjw;$SWXy`vtRHol_Tav_UVuzjLC@!`Z{o8kLF%;~Q z5(O<5>QaYbbz9e@m-$Z3h=sfs3;hX0C36&T>;pM17Wy^q0`_L9fW3xCy)ZR^=_399 z%fXCMR6|tENLE~mqOf=%Z4~9ybQHzeMX4QLS*%J%2XYj}+C`ZvLq)Mur77-#X}c&> z(=KZJ6h$Fn(;QL7>-3{`RwsJ+uoi5CNkfICr>H)=C$(T3)U}IA9-?6D!BZA_x&gMJ zH;$u14^zBtH!SothKi#0r!KoQr3GA{rd`ne2mOc4`dD$J7xnOj|BzXLmEu+e#lo(K z{D;gUEM2>h9S8i=W+6=?#v9?O7jx)A|FlWXkv3EiH^ok6_w4sio8>r^?UMHHvm-%k zolc8y41(KhM}ny`R2n-~ph!sCkzi`tMcu#0pXfEwKnZ(x`xCt?4H@R9a#QIoyZnh> zHEeskAMhtQqnU(`+`24R%L@HSUs-Rs9Lw(ANn>lOF4n}3Jwxzab&*EC-lXy14sD3F zRfbBZfS@6EyEeqyTDA)l_xp2HC8G$jrkCs*d{YU3&MK%+(WOWlTla1A=d2?8^zFj8 zjr+GG_SX2N0h+tE`nOCcNbX{qhQG~Q{9ATzp=8@q_CEjk#3@1Ui0y8*AqnMcVwDu= zpZ4RGQmZacFLBdGnvpcsUTjZp_KzPp>d2*6zpvZ$@@U$7Hc=USHSGqksK%sAb2yjh zN^CKmE97=C{_(?``xVZMO1GiD@Y0UBe|(pfS!?ZHzC6qAi}}a*AJXz!_3fEu^Q8he zm1m2&15y8YVV9<(55DD|ra6XkTJ&nYCE_2?jB9FDUbv!7j;$Jnl~QiXHBl>dPuPDT zL4LIA(gMMvi-7_54~6^(rV~yD%PF%k?b3tY5%eF}xy7kxx5fhj|E|P-L2l~5PpB8r zyCRsR7TGuE-=&pcq`FwA!E)QEf0t(6(6KG=@YBGyxvy?oV^9^j!cHHRx|cp5AT7}~ z!a%k5jcA3Mi9>~^rzozrd9^}K9lKEN{{K_KfQR`j=Kp3s!hA3705q6O%;U^1W-R{c z_^-u(GXDMX_s2W&7vppBTzqFd7W=E%FUJ0T>>Fa2V;5t^*h8^!^h?n{iGC#debJlI z7ov|v_ecIE^5>CXjeI!rp-4M&GBOp}68>EHcfubDe_QxnVLtq5cz<|P=nJ9W3wU9#ZR> ze=I>OVQ=4diSF*(_S5>eUp{2`=ytk5?APg8eRLD;vv?i*ERQ@u`z$@dkewqtXp5zn zXeu!{(NUZkTfzHWP*nvkt?iQqSL(w5wDSAwX=@n-~rka8B7_nJh78DLk6{awjbI_ zJ0ZO^#|DOO{lEn6gbb$**?({sZGsHz^-9?9XiIa`LN~w9(UxYyknO#WwltM`hLa5K zcNoR$_V(Xjhb~g;k@b1O~`gmmTNa*%=>f@0u94cgoJ|5W$ zy^^-?rcDg*s+$=1KTKN}uE|3MB$Bji;i}pzYFmu9EWEUe)g2z+P8${4*^r^F4DC^9 zDZPAKw$bK+S6i6(ZBX1dPCF1zu^xk)E#uLmL^ptQ3uQd@JoK z=n~5{UXAUhO$1%&x`_~dfHn{G=JmpK5D0``vtLAgHQi-qDYqd}Ub<~dHxmqebg z=fzT?#~{oX@6`Gi`bp;d5g3l%!7QGKHy3 zK6&uelNZjMo=i5RD`N5qQMe+flCz5)xhYPUSnY!g*$jPxA)S1NuhS==dbz4qs$)8x zDF}rEeaIqHNR#vRLV+)(^HlAO$Yr=3n=R$)Rg%^?malR-fzPFjd`Zk!t9gOTSF^=@ zfzRX%wQQzdVDmLLSKzrqHJ{EGvP2TLkp%c0k%H@b4L z2Ld1f0w4eaAOHd&00JNY0w4eaAaExU!217AY7O`T0w4eaAOHd&00JNY0w4eaAOHei z9Rk|t|8f8StHTynK>!3m00ck)1V8`;KmY_l00cnbP9YGB{|^u24SASi{FBj6{0HOh z&>sc4KzQ`qN1yXw@ux>#8hMbCf0_P!U9Z{AV~OWDuSaUt#Oun{hNOyRddi2apO)L+!CGx+i6hAXsP2&GWH9;=lp#*DaV# ztu&`ze2Al^b~^;M%9;eVpYAX#WRaYh(Cbc*?~q#M7T*+YeVzDf8QQ#}T$4Lwz9G|t zjOd4DEt6Wy>P8K7R=R0QPwO?B=^On<$?DE&KkR9@$#5N5qYCt#kh1y9o*Psb7U`!- z*44c@fAZ;zbIF4h!|7BwRfw(^4ehq^Q>sQZm}jvSDWXTi&2cLl%rq?;%;V^AG?+=f zXwaT|<7nGVdUbq;BUi*_yJ^=71w(y_Z*&Q|)p%xKG8@;~a}QPwZzd~tQ3UB|cr5Ys zp|!e9tws-aQ>wCjMKL%z1d$t8LxIGhL*Cc6Tc{k#8ZF0Zco>)?^)`A?o1W7pNNu9A z{$Ny-6nYBZvP&--q_>fwttKi$M`~-&yt5nbc1J-gl>6#6sry4sc7vWclPDCtH|@d7 z#%Rw04)Q%=Xf6Ido!e-I4Zy#JnZV)x>zfR29t;E$sg(EYB35Yjo=Q}1UezMgJ_`|m$60GYHpWLyTYE+%jgksoZEQ&27-w0h+2reWQJ{zBX%Anv1Jyag*sA@5R^J3;DJk9RWin`+T2rmfclf4aD`)AK zv>Z;RUfVJjNbKM5eSdSXsT}PTShbPN(bjCU*U?HFZz1ZFa)%ymwQke6G|TV^Gt?YI zDjRLNHU-z}F|dL5>ZzD*Wlbu5u|#pK@C$?2A45*})P`&gDQds8Zu@P-#gI}%YSH{} zR&Es~x-6%FEN_H*Pt`R{%);4l>gGFrdQ^V>Q9CLHdX}52U19Trap-r6N+abqMp985 zZ0s7l&nH?})(0dL%y1WM$n-GM@&*)plcL+Tr3u8mrl?%)$|`R=r(M(x>1aRwUe+&H z*X<_SPsZybomq{cL1~eGpw2FjW7CdHWPL!Qp;aXfbLCg$WVfy9H__01&ZOs&j$1Bt_jy*CD#3rX*xpLI3MY=sx1|J^)!-iBdAR~ z&TiK~QE3i28oIK#wgpb{gpK_MxEBlW!+j z>?>MlytsGmidGsu1e8W4ZPuzb60g1NE_QyeQLKsegmBPmL=A_ z>^@5=aL z)djgBJFb!49oC+7-d5$BOjot0;UQObCBDJaFuSgns`A33{-oA5R96X5vsd&@Xnl=j ztsT#fk0peo-qmL{lR{rO>Fa*il>1*jdY`^m@;+;NvGi!}u#U%9kK5ab)xJ8e6Ns)M z=H9p)=mNK}kiCU10u$F)BVx4`&dYL(I*oN@KNPU^6reV#&=sydyrtc`7CmmPrO?$q zsoLRchGBodvaYcD8p{gE!j^S5=}acZ=96`{?N;{dX<^Wx-Bnn#+K^I7nn#wR)k?i}c7MaKWxJQb*=B2~TO*6<|768kqIE{*Xg&98{eEMQ zX^Yj>dq}iC-aFj6D!1F{)y6p4w|8juL<^d64Wi2Jrc6ucTgW0TN)6hfZ&4j+Yr+oS z)rz2PPq{O&O|k~$ilM*3CaGBaL#t0ROJb*L_erg2v!_{sBT$TMTk6h@)78VQw_mD* z*jNr{m~1HMZy_*KH!dUsiA=`(>RBt$w6#*1p43YBE>)>_gSKC^V>s{zGTlZ1uCtYH z@i(u7J2kDjtOi)7sveGOa&5@f;n= zExtj!82aD++%bAC;PGeYo~6%%PdFHrJ%Ce%0&1vJ$tlXlzU23u_ zb=1WgzpT(FQZ+%@66`-guh|)=Y3=*}Bg~I^nEy_1c!2;2fB*=900@8p2!H?xfB*=9 z00`Ux1orz&yS(n(25xOGZ;xPTO8~$Be+M)xTmS(O009sH0T2KI5C8!X z009sHfv++F{Qmz}88FO)00@8p2!H?xfB*=900@8p2!OyHKtQ|y?_~-e`iBuJ)sEQvlYI89{z#2vb}BQKo1UJV4__oL{iguJC+2$xy}ysp zD?EGp?8TnK;|-a9DRq&4%koH(EwbUc`QyEUk83|6&nu#le`-mnS6LqKP&qf+q|axtR-d$qO^bPR}JP{fU+2!EiEJ=_+EUl3e0D!Xn=}n9rt?^Jgz4=P#Z< z9iF6QNw-{=)>LvhP)RoA)`BWtSChx*PRv|9ZQD9`{^XgN^OusR<}M`<8s$zp?We-2 zvpdET!cnhBYSqN+%GHLXieY+elV#0fK!sKixQhD~t^bln`tp{1ERgzLm6&J)#(&C$R z0-3pZ;q1wIvUFx{{(??kbIQ(THi|`+)s?P@%WE{Asy5cJaG29fT%14o^u;-95X}@d z!O4fm5-%0jB1Xa1;Z?ClZJ0GWq#w`MtEy6H&a4c~tvjzr=aIw?k8)dnzN^;A9Fese7 z5t|4kG8ym9856;v3IP}t9U7|sphmCqM$xXdUnjVla<`?rX52tnE%A-6=meTy1*lcZ zq#0c|PCpb#aGdwm*&!afvNnKOl{F*}uTH~I=)tfGvwE5t2^(V3ra?xvE5)UY{gtXH zs@&lh#B!Avu81v1Kn;zVG-54oK&{=8%^ny_ zJjZP$N(H%9mlpJq(&B5#Qa2(G>TTi@qlSjFO$^Dw)!W2S=(UM);u>cgYLr#`_1fd! zK!9f1(#9k7)c&zVxwH|ae7oI{1Z@uP$7;(bZtUL|NR&$6oBU9?+geV%w)COZ5!*^V z%(mdGfO?hK(=HAMIBOljj9l{q!Wvw4Gn2h6lNGDC(pX#CyUrwCxrM?5Gm?XK=R#`(4@F=`Q@Ok#Oy6q@IoCHm@!Y;I}N82qz{CJcggkExy?|hdC|^ ztRNo9G7#=V7ZMfI9rrF{ndQQ%ncZWFi_>e2GFo(AmaAn&<<+inanUc@yYil0dNbTS zG4FvH0|GS};;3Eit00@8p2!H?xfB*=9 z00@8p2;3e4-2cBle#i|1AOHd&00JNY0w4eaAOHd&00MUd0o?z;8(IlIf&d7B00@8p z2!H?xfB*=900@AG9s%6{zde4)4FVtl0w4eaAOHd& z00JNY0w4eacLM?3|Gyhr2|j`V2!H?xfB*=900@8p2!H?xfWYk$!2SQ*ZJ00JNY z0w4eaAOHd&00JNY0w8cV5YX=bZyx&@5Azeu+v9&1|8V?V>>p#_68&&=E^;Zn5R!uz z1D^@J8rVT9c!2;2fB*=9z*|nBvU#&7lsV%a_lAOge=s=Vjn$=wsFc+XuPj!j*7Ox| zSt)U)oKO(z0#_~O3-xrm#0!O@K&N@0t%{krrZiG{W!r1o|9Q5^a`|E|Q?Il9p2ba6 z!XszAyH+h>N$e<++){*mrc~py*;=N~v0RqTaUz%HOU0_l3%LTDi<^qWB~`QO;vSKq zl5%If32(^fUKhP;8M4h63Yv3IcL`YQzE0kkJf!nhf zr(#lPys6cSsaNy)n!xAk=>lKR*SVa))wo)b6Z7?AEnTeo`w6dJ(v3vyK2NB7#(T=^ z)oXOdyYftb=4t(OTE5nrZOGkP@(kbV@{Or>tv*%b@?42!3mjX@^Z6QE;4{@iny*z$ zoKPzW`E0r_*2Qd|%~qLp@&|nejlD!r_suC}y~HHCrs!b6Jir=IFhUsnz*RhOZT~*<$j{@!XMgp_EOF>1r`s z7rBBUW(tL3y;dmYs+=I!1THyq{Pf)UO!`PQU9WLMDZSE-5}8wHyxbs}>2r&+Dl5~c z`77e#S&Ax^Zwcbm<@N$aP_9Z*nWe_zO5}!EsAp=0YDO&ZTrSIphO>-~21{?#>a6`= zU<=top;YDS`Fu8CWD7Z-70J6&zFrdxRodani_)_YOKH&1&YIZwVu!9S*yk+jz*evj$4t!RO83acn_>r<8yLH z?JiKzQyZ5`Y@x)lY3h3UOp)-{Sqgc+kj<3@w#sEfPGzd=&y1UO|3M}OzJ~gUIx>o1(y0Tm~Qxx)P z>Sw%=;YhJo$Q1cfR^)PGF<-UnnlI%_nOw2qiyhwC7E|U@2 zx|k|e4eLZ&C#G*%W54uU#zF8 zlV@`RU(EV-Ep39-$1KxXHl455_MG;U(bySp%tlhn(!in(({<{)X*MfTe`iZHY76yZ zHeC?EGNK&#%%F=@v3xBd`ZjyKsWTy(2Td18jLAv7u8N%&-w@ITwnP&iT?>db!qaqA zqBg5nizS)|3nJgItvS6z``>VyCW}m6%~{@s%Mj3u(5H(C+_lj$QY} z|DJg~_RGwU_@?-a@qFxh{6_41;{P@NLrgvP|IyyS2bn*3%P;xh00@8p2!H?xfB*=9 z00@8p2;7wf4){Z7yz6aGZ`QTe+g{$JYpu6C%;;L{ZSlr+t@XBDW4hLQ`;yU~*4i6+ z5nXG&{jadDwcbWjNY`3#_a~@pt+$C1(6!dv4jD6St+Tl?s%x#cM)yDD_nz^Nthf2# zGt}1GLl`+g)-k>Lw z*voi4E}xF$YfY(Tf52`k7GCgd_I}6{8w*bbeC?$`=TDxQ zIe#g6YVJ~U=Hi93C+Eq?nYsB3;YrG(sB(v25X){^b%l;BtE;DW6|tkIF7X{T+Auahk_(8zyQdy?&_8o(a3c z>0HjPZ}RxuiJ6P1FC-@>G(=X_t!z}|!jZAWsRL`(*mTpVw0ZW}%E3Y)F)`u2!Pxa) z=e8H6q^{FRga)rrelvR4 zYRn4XSY0&%K8*<)mcBwtx{<{@hzzJBivZxY-)|3 zZ`4|M&rxmtJb&Z*v|jJmta>}XT0r%7oMndU?MUp^TboB*C%AP61!vo*bB^h3Aj0(- zZ9{#bqS7c%pZjk*N8PkN=nfig$bvqu+s1R;8tU5IXRFdsZFYCXN}i8=(5mj}3)D zUve)9oU+#w41F_Y#?c5mS8WioxjIKfJ=xLh=HNXvBpZ`~T)iv`YPr89hwSyVoB=wd zqjYssYk1YuD7X0zPZv(~U8hR2A-5J(`joUw=!l9!*OC49`hrKh+NAK-v^Hq7A+@g9 z290TS$Rsc5mo{tpCJ8c4ujXXrVkh<5wZr66`- zrCZ*RyQ0c3*d((wbC_@}kZrNklob8Tg?7D6Ed!x`NvL$2wx!|mnxes4T=c3>SKxMh z(Des?|Njk|mS6@1KmY_l00ck)1V8`;KmY_l00iz}0{H#^JGgP+8VG;@2!H?xfB*=9 z00@8p2!H?xyg>wv@Be?^!+ib?G6*vu00JNY0w4eaAOHd&00JNY0w4eaZ#{vS*Oy4> zU!>FDwEO>FTKr@E|JFARJOBX@009sH0T2KI5C8!X009sHfj5jm+@DBf9_Ck>_c2c~Bk|vf|J(TU@kH!1vHuY3 z#-^j6js8$njvgcxyg&d1KmY_l00ck)1V8`;?tB8Xn}eP!<9>fI_~O~w7iYTaB0cY| z_qfCtFUVKK){Do*I^S)m`pIlVkqk;h{}k7$){7`f1)C*@V8ma7px zv1egKvv52b^fU)7JVVdDmRbu`E8X{%|PhDcRY@nrJ)-_{C?ITY~9f zuc*g@L62bR&d@U!mn3!Bl~FHI5)~a(&%omWC+gJ*Ws;cjUx=y}d^;rPt6 z1EuS6dafhWv+~q|!VirGJ&*Kdo#i_<&4f!;PxynLtCq4J6mBJT+vl`!Q&j3an$l@m zxMI|J#R#rPeL>Gl8?ob>`>_!xa5pddTll0ZHqGK+)>=2`9g3-eYG`4p8#X9EGOuOR z*8ejh(Zl=&^J~n1Wd1d?%)Ennf?=8Mv={K1_;1I5A^rpLZ;W4$^YN$RnfL?o(b(r= zzZ?6%Vm}=Fme@OEwb+?hKDH+ojD9})htZEk|6}w!qc@|?=yTDT=%MI%V)@4(Z)T zT+?$@0LSHX+1;h#bgEr8o#)b-4Ar3Ml3pO&IgZWbbA{an*L12sm&=w|c6Z(_oh@bv zQ98rscIVvFO#r*IYot^C_hc+UVsDve5ahCKj?1RA2hzS=s&1^Q#F|dZcIzQq%I4E)wv;QSxvdXd(gIsb7qrmH=C-CB(gN8iWyq7Q2OZKJ zwK7X~(_1H;Qu3q3X?7oSNVQrN(?youdcYy23X&H&F1_`^0V$U$vAM1L9a4%n3hi8` zxOLxvG^a_&_xi?Dg4_{xXyh2ju|=*hp0vc|7qR8?Y4UG;k1Z~89C?{tW1Fep%f6xjUu4yQQ7l{hv{9btTXpO|F1 z9Ce-X`<-HnbuPmdGr94EEiRCOyyjw#9pB~zpGPBFzbIi2Cy z@nD~rBE`@S42Y?YRMG8YzFjH4w|hdBDN<8V2A0cYw~r1bQB&uroZ@zWf07m$M2Iq$ zw)+N?sJ+QrjxB5-=}*d2^{D6Sxx8*k`qZF({(mNPKwJO+D)XbvH#1%45;Mc3n9cEj zivMZ+SL6RV{+;oaxD*aI~n6*+hgA7r=!0a{Ydot zqaTcRqwk155oM#>qa%^eM1Gsr{yz};#>hhCu}C8Ph462OelIBspkTq8EkF26#E#r^4DQN3ka~)rDRd6iii>oMDEBS)Eg1wZ_ zuclzG<#TH)5QnUTl>2HvGvUt>?RA&)>9zD6>v?V?dgK|q&A%n(9a_;(_ld0~{gf?s zuIUdu#EwP%q*H9K>JK@@j%EGBePU}}pXw7^3;TopVsm9b(I>W+_7C-mt+oAuA+f%= zf3Qz%t?u^^iS_0EzJz}~6cqe{N4R>YmL8aSgtSfALx_Y ztNfk)a%-8tV?b`M^SAfQt%d&mPPt>HpXig@OZ{#Aa(k`cFSi!^Tl?kKYJW?=+*yF9~zLG z3;*Dd+*tVscKUavyzWc?F_$!R?LRu0=2-muhtsUpzi%+jvHTy|LB50>z8Gox`d_>M zA7q~MFz;vD%-=BI%G8;iWS;lX+W)s`-T$r32binO0txT}0T2KI5C8!X009sH0T2KI5C8!X zSeHQDx7Ta0|HneUgmF50zc1%?TmO$}Kgp*j9P+*3b*#|WTK@aD_zv~y((*s>fbVFZ z%(?szGQR!JM4$2ezt4D>&#YS|D1ZP6fB*=900@8p2!H?xfB*=900`Xq1h#m`JRxsl zr=PDirPj3ZZan|*3m)bRcYX!nAqao~2!H?xfB*=900@8p2!H?xfWR#f2>TP>kiP!+ z8~6Ve52M^deyD-~2!H?xfB*=900@8p2!H?xfB*=rM_?uFWjyqIj)%4%-W=P!!fcK; z_2*#f3-Ss*KhgMwR{ALak|bUeJM0vfp5nUtBR9+Z5z^;+eL~5MTzc)~qI`n|QZtI%# zGT*5wi}E#gzjrh-zH35H7UYhoN=@-)xh2kZJM0@H2A+@yo|0B$=dzeqt8(ts) z0w4eaAOHd&00JNY0w4eaAOHe)I)PoSwAn!LRnelJ7QV9 zu8N%&-zalKqBCE2b}o?Ewafd#tE%>clBRSH>&R@u6`B6m9i0WdST*UGTWcNRn{ ze{EIm7d^|SRkKBgT-CMI0fRX%?LxFIcAAo+NODWD>uxo%X}Q-<+GmQ^)#3#~Y^$PM z#PpjJueYTRuTs&KWQ|w#Pb>{Lxhh`~Nx{UDajQg=ekHZ6h^_U@G@`P_YxOWqYOQE` zW`zTA2ubb{DSS4YhxK#tqNX`Zl6BFK>?=^Y1wU4%SR7uo8+b1W7 z2-|5cXsu{po2=HUoa3PFnZ~GB!g7CT>D(>*W{~wpCo{_uHI^eB`|{|+n5nqVV9F7q zhmQpksg(Cs#zrJ9QQy*de$O6*a(A}+huc%7lYt88?DrXMM`qoJ?jR_OF# zkt-Y2TnneuYU+`hvBcSwclAlN-4W{|%?g63=rgM0{KSn1A2&$*Y|@stPTD$~b0KXd z_DE~UG}O*9X|=c^XiinF6RoBy$gR4x(8q4AIm=M*0dybrDAt|b?o3=($rg~AGW^I13e%-{@UB#7D!|=-Vf}zXb-4p$OlBT!`ufn zTm}OhrL&sDmpA$p>+Bq66X-owzeXw3OelA$Voo(H05;ChB*S(oWZn|bA~goE{4N{&bhHHyf+3a}P&-M{LTfW3JM#Db2ik5EvSI$eUx?D&zWL&bUZ7d>e>Q z=P0GyZS#t9P3{Z~k>-qQb*43@U){_=XB#SJ^{GqG-qiYYo0eUU3-v6k_1HcLw%U6A zX~P%G+69&(@GZN44UHsLC(|o2SsC<#hGX3Sf4!X$`9J^!KmY_l00ck)1V8`;KmY_l z;O-%S_5a<|R`3)AKmY_l00ck)1V8`;KmY_l00dqy0d4*7Wj^Mie|Uia2!H?xfB*=9 z00@8p2!H?xfB*>G$plK?t)5^ak?@Rf_IUSt{Xvh1{x$iIB8u|3AX~HxKhUdcz9@KmY_l00ck)1V8`;KmY_l z00cnbt|0Kh$mk=UkcXa}!Z(f_nQpe!Y3)fr)7s-Ol<7_0(fyRVC^y9;NAzz6X!rj| znBVa*pQksxKmY_l00ck)1V8`;KmY_l00ck)1nwRJJAIFMSJ?p=iTn149P594|Nri3 zR(J{mAOHd&00JNY0w4eaAOHd&00OU<0KWhKddVRl2!H?xfB*=900@8p2!H?xfB*>G zJp{D-|J%HrXXNKbUX6b^-iUpF>>bgm$nS=ahrSY;41PKAm9fu{{qLjyJoyYgPCRTHmEwd-YBDD$;iM^u!m zEME~8`PPD{I7L#etSq+#RlXv&T$O9H=jUcF%q36GAD??Zx$l;Z?n|DXPwsOyy)SvN z;_yzYRVFKzGnLee_r*ZsiIjI`kFz@Zw?)cohgTNMT}AAa^+dAoNDy3ob|^SSwGk4DyrP!HKv^7NdrWB$PEpsilewVkbF&)=)^EJAd-b%=t^nQ*)P+GZ!zMJ*kyx!9gH6Ju0E741V#Ki;N zl|#CG?tX&8v%+ zcl|o=aCNa?$Hk82U%#eC&{=3Db?Vu%M7gxa_^>4K)Gy`Ea#@wja`mz(sQOUhB7f;d zJ2O8g>%5LM+~!1vwFWb9p8wqy4;nH!?vWbnf3HFpf7=ICfw*6GRc z@YvOAy@C1|-fl?KtgLM}oy(W_#i*F9Jw&}Y$8&ul7qFj?Z4v4E(b|@+|cc}em zpB_s*$F0FY?Q|7Yr0W;4vt-g3kUf8M)44!`#u(shlkvlg_9e$8g}{EBdWG?I+u4-R(Z9n ztZ_w<*8SA3(P%iMD@NO(UYo%%ur3O$no+x6JUf<XcSJ3B~BgKP*Ca@`X)@nxr@8x{5T!OAD=ytRbw} z)E%C5dNWzE%A2e>EK-Ex^Z$3^CIfr|0T2KI5C8!X009sH0T2KI5CDNUmH@CgJ!00ck)1V8`;KmY_l z00ck)1m0Kzxc~pg@`N=I009sH0T2KI5C8!X009sH0T8$o3268KN0^3(`6+tC3j{y_ z1V8`;KmY_l00ck)1V8`;K;X6sJUKF&@T`QId`A+wcRiS1evo4yY${FV>3OkrPMvyE zs6Bn+-094f(`TEPj&aYYGi|Y2U1py?bL`3I-(GKAR-S(D<@V|4pFR7eSGUC1no`TM z^zw4|&=CiFD|z( zJ)_5C8!X009sH0T2KI5C8!X0D<)gVEw-yA*6!<2!H?xfB*=900@8p2!H?x zfWX~P0Kflt_qQQx00JNY0w4eaAOHd&00JNY0w4ea>k+{Ee?3A-2LTWO0T2KI5C8!X z009sH0T2LzyPtrz{(ma+Z64+?ncrkS&U}RVG3MVh-_Cr1c@NWJ>df1jdFB|CXC7vD zGxsrL@h`ELX#y${JWABJP z8B50=h>b=6Ci(}_UyJ@i^gl=cUGxLd<>*56rRX!!lhMbbx#*$j-e@8ki~1t}6#47O zUqt>e@|%%QM1CRi6OkW|{BPtGULXJhAOHd&00JNY0wD0UKtTJQE_JyrDo!RZ zoA9=~)rKULuZh(fzjWUw-vLunml~q7SyO57irQ`0cvYnTH*NJDc1X^PO1Ggh5#LT- zcv-IcqrP4GvCxrQamKgbE437rZ#1;rv2owDeO_J?JBlQ?PP$4@xJqYTrH{KxkGe{a zxJvV`(u}K=b(J1=l|Jk$op6;t=qlaoD&6fW-Qg-txJtLWN<*&FF<0rxc3;6;mpY1C z?zOSmqR}nBLzF@hRBqSmOOkj^?3A=15nFtfBH>BL@touMnB({{$MK_%>T9FK;5J43ztZu4Ncb||Dlc4F=*ZnxjS`h|Q?7{-WBq^1!~8w-DdrEDUtxZZb^yMQ`6lMQOp6hj z7nx_6r(u-00@8p2!H?xfB*>GwFI;)Z)5S= zl2xfL3A{=x&ZpePC*8$Q-seksRk7J_&>C2*Nvhm=Vpw!ySTq;+?e!|l3guP#1?71E z>1_Y$F>Pht?#N4$wkDQZ3uV47&A164cN0FQE!CZMRNgiuIXWbHH0;~1{rdP7QGLWd zJYpY~?8Bl~L4CQ`>9(#(FY}#RVOW$O7Uc|)wi@iM_?7IiC^IZd+ak5YD~nZnu-dSQ z9TrV%gpBp4Dqg3J3$4wj+=PeSgp(R4BfGLl4~A{n4vUi7hLCPf-%Qevc3ZMt*3ko6xYos* z*wMSt&i>OK{ioaS_vK7wv8LNJK;FMvLSnUqZ2{jFFKteZ_x^6}{oNAuje7;TBi?5o zY&H)znFow{5H}BE=0Vgvh-jN$g50W03mvg7chvBpAT%flYI|IQ+-!<1RS7sx#+)ak zT8Q+m@Ab_wUQneaNnQ47y5`A9#J4MCo@ncTFY{>+{lg0cKmY_l00ck)1V8`;KmY_l z00cnbZYQwKmkG(&TC)wgTT7ndTV1{}MIV?j*8hL&VgB}RuOR#f0T2KI5C8!X009sH z0T2KI5C8!XunEL`dqZ5FE3s^WV@t;R|BD{xi?#-iKmY_l00ck)1V8`;KmY_l00ck) z1nvm}L0=-o*P2qxSpR?7!+iOks23^$0w4eaAOHd&00JNY0w4eaAOHd&&`-ec4~0V7 z`rosuKLKYT00JNY0w4eaAOHd&00JNY0w4ea_Z9)H|L?8#L~TF-1V8`;KmY_l00ck) z1V8`;K){Uv*8gr)AOisq009sH0T2KI5C8!X009sHfqRPp*8lfbd!jZV00JNY0w4ea zAOHd&00JNY0wCZ3umZC+u|1 z2g5jd@%ZIHLO$$Wx$5+yC96_h5_naTTT1!l@#o82{#cPMWQ!a(dn|t}Gn3AwkLU8~ zW5lRWx#Q|vpURr{EO+~|XdcVe%c7u`)#bKGP1-w?YE*@NQXo;LD$H%H3NyLm`MG>% zZqW6sx9GZQXLXlVsVOQd-)xhUyA};6AK93b#p9*S@qD&4=w$mAoiy#N?qn|-A6PK_ zD{aib*;0P4z|Ni+^iRG;|4cip`^UF6e4%tm2!c;9|kyo28}^1QIWh zb0^nwuEDF+ZOV$+s=2s#Wkc=_+E~j$hprZB)j(pFT+FQHVsCnIP`kV#xB8|9M{bi9 zU$4_3C)RqMvcd-v?>*|hvDxX1AUB&dWhv#3$k)nEzH>$F(Bx%G4MEu)AhijY+Sf{U zW?FRh*;Umx;9}o`>zZ}V!8`UTPijL~Jny_3NIXH86nmT~ltsQHmW}solDcShe_gcf zT0QqXRu%MQ&1KtaCDmvL5=W`<$Z+ARd_`>amwSDUavkc!Me2!_)O%#QFrd=K+))G68!Jk-8mJPqpI74SJZ(m8J%vs_kXa~!f|#=vA>*V-wT zw&+?-7sawI>oaRr)=syK_l6^??PR4FTYD8+8Bo0U#g;SPjSe>)@b7eWbgi70XS$N+ zi_tOk638P*%ugCghL7**tU4w*4D@%$mpAC0-gav{mcsr2Tj&;01pyEM0T2KI5C8!X z009sH0T2LzyOaRd|95G_!EF!#0T2KI5C8!X009sH0T2KI5V!>bSpVMwEU1D22!H?x zfB*=900@8p2!H?xfWTc!0PFv|wBg`32!H?xfB*=900@8p2!H?xfB*>G0s*Z5Zvhrm zK>!3m00ck)1V8`;KmY_l00cnbE+wF?|GmsVc<3KqAOHd&00JNY0w4eaAOHd&00JNY z0{0YwkZ*fPf8L8~tpC60VZL}zRS=Z{0T2KI5C8!X009sH0T2KI5C8!X@OXm0M2N37 zrIxY&|3?q=kH7)~AOHd&00JNY0w4eaAOHd&00JOzuMr6N{GrM#+jbc1|4(|DPu^=a zM14R21V8`;KmY_l00ck)1V8`;K;UbQz;^$k?V-BV5S4PfR$r3DYhs6;;?h%`w*L2Q z`dZ_G`hoxmfB*=900@8p2!H?xfB*=9z}FT5tpC5Z+8eb60T2KI5C8!X009sH0T2KI z5ct|8V0`}n(;nv2Uz_zutw8_;KmY_l00ck)1V8`;KmY_l;GQC|%^%`I$9a{n@``vw z|Lwo2C82Jt|MC0(_f%V>G9Ul~AOHd&00JNY0w4eaAOHd&U=RrU$AkJ00vPN6FMF6T z0}Tj(00@8p2!H?xfB*=900@8p2!OynNWky&hT_Kh|DQd~Ki`8jM3q1Q1V8`;KmY_l z00ck)1V8`;KmY_hKA$Iq&;KJ3KmY_l00ck)1V8`;KmY_l00cnbo+S|UB|=TUBZ=T!Ahb^AOHd&00JNY0w4ea zAOHd&00JNY0u}-7{=c93xQF>7^I7Ik=m0Me009sH0T2KI5C8!X009sH0T2LzJCwlv zzMPk@HKo?$4O!qDi?X5~DY8Y@AM@=o5+0$KFXT%YXCvD8|Md*U`v1>7%s()H&3u~q zvpckca0>)L00ck)1V8`;KmY_l00ck)1VCU-0%N`{p^GPvkNOzzyl>36*?VGsMB50M zJ$?4#alf(t|13TKk3IqLx6EhPG=)SE009sH0T2KI5C8!X009sH0T2LzJD-5x7xMbG zH=p)4;`4>{=l_M6|Kwr*k?#M0lKErix0zpOKFa(oiSPme5C8!X009sH0T2KI5C8!X z009tqiwFdLesATKZLfuT2YZ9&LBKp1GY>}11HXCTGY>`rK7WYp?A7l7hnSZ=bnpMq zY3=_p=BJn+WWIxWKZ)=H0T2KI5C8!X009sH0T2KI5C8!XxNQP)f5LBk8(&|~_nqzy z_MZm&PsjRCNBd9x{inYE(~%gJYkc?LUjHBWFn`7TF5UP4QCj=oWLnH6=J;(_5C%X1 z1V8`;KmY_l00ck)1V8`;K;WJtuyr(HF6H&#)YI4VF2cP*H(|g{IOZlCbrbsCgg!Un z$QI2{W9e`D*}MOb_5VE;kf;m@fB*=900@8p2!H?xfB*=9z*mKUW3gSA8lp0|{x^kv z>win=T>o1_=lb6gI@kY}(7FD%gwFN9C3LR;EnzJD8P68)=RAxO|F7}M*n7eYpCcut;$LNgvj9AThghWQB2hc3G|}WqKP2ZI?Bb@;q>7H+Jb!bqn z&y3~PpqA#GUJ%76d6-!71`=lujI2E2^iUh&1YTGa%XOu!sB(v25X*}6vPj-pX`T`0%|89nQgzpMtau0>xi+`1YfY&sQg_hcS;jr@-?)2b{`Fim z^|a`J@2xneF1P#Hz3kS?)zA2U$d1bMW+}E$~++cop z=L{`sRIMEHtN89fLf}SL7My<3Gk3&RjqeDHd}~3ZakO_%ol{wE395WWY>|7WxTog% zI#(wLw5(iCW0tQ2%33&@B8otYNV9lyeapK7iBmN8&pKV_1yx#-)a9~uAXhJoR>X=ShujGUHlrd)H%NGj`%J87QjePrJ|1BqwIt4nKpMV*4i<1*iFJ3YI(LC@^m zuHNzWI#pS$=>zX%g$f~bD)mT3<(q9nxA)~h;^_@LSzUJec;nW5tS)cF#h#~5A9pMp zKAu_|q&{ljo1OP{X?a5) z&aCg@65r^G){0`##dqI=iw;X``l##CWD-_spiX&LCY(+h*WUVl`LgqvF7Sz<6`Q*^inw63&DdYS9-{xVyKu`bsW9B#M7XW^iz6J1o%(uPXm_|Mj009sH z0T2KI5C8!X009sH0T2Lz4H1a=6JD=&7f6YP9LGWXI2y2zBV+b)c+@@)`R(JN&pr-} zgvowLw?C$>|2>;F+ zdGsz_-LINwJU{3x%ckX1Bk$4GY1MqfT6G?rlkEjo({`eau?bzhUp0=e%O983ox@%& zD30msd)3036)*7GrqI>BYT=k?cjBLA8+YsKl&Z%YuLi#SspAE=S&k!0o|OLpAfwac zpS$LF>E=}O&jabNaJ4br+@o4+R$%$Qy~26Z2|3{}-QA#J}Vf0N)XRlD`4? zo%pr*x%i3v9N_!nJMy!DH^gP}MOlO!0tg_000IagfB*srAboJ>qrEtcY>FqR~vNiq^AdNEFN6G<|jBqB-5Nn$36ktBsA$tQ`P%>Q4CzW@Kb z_^bG{_@nr}`~=`P;uqql;z!~w@m=vPc{YI81gB1UcOy(~1Q0*~0R#|0009ILKmY** z1_k8Y-&Av2lH}yq^ro475Rgwk0mvtx0OXTT0JP-O0j-$VO;eqSm*eqL#LIHLG~=Za zFAMQ9A1}M}|8t7?r}&$k{r^G!0^nzI-v2%EP4T+8Bwi5D2~RAGPl$s#i44WVJZ+*w|RIs{LU1$VhkfGjENKCacPepDh;CRl_jyrVMFf zB1y-`lQf^_{C{`+-UtbwIB2q1s}0tg_000IagfB*srWC_IQ|10GcdCvbg zMMr#6JSHZ}Z%T3Y^;Vtc;xXr<)u{*ZgSomRFC5o;+`BOGAd#I;UY{;Cr!&aF81YSO|$IT1w^wf-@;?zF2< zbuUP_I8bQu_y>XVKaTVO0R#|0009ILKmY**5I_Kd41s~`|Ec1N zBJbP~KmY**5I_I{1Q0*~0R#|0URc#c!&T32q1s}0tg_000IagfWQtEVE(@Y2bC@%fB*srAb_z25I_I{1Q0*~0R#|0009IL=n51yU0n(vozIoz6XS&!74c;G)zZI9 zD`S5dJ3ac_(eop}8aZkH*nG5j$#~niM;73Q00IagfWWO12=n1RM&;CjT=+h>S##Fi z+IrQqs~6q6Q>}aUYCB$5gP>lG^9S;jug`t_`0Ux^lP69eJN}8ueOp!DH#vWLa^FA` z`z9Y)l8rA-g&zrb8an=LJZ(8-L)oRsU+qxj*>RSyqT26J%s&^dOYDQV* z&CCchN@lm3Mx0S24qa}4>CjRff$YNv!@G@&E&H(WKlGvDb(+C-!hhrc2|sRdtH|R* zRktRio(gvv6?cCwobQj?>I5r+x9T*jn`?Ee9aJqlaM#>mBi$j=eC2DI;imaFZYhm8 zEguOFY;tMp`S32Ia&m7j+}j^(+?jT@oA{m=q`@W6DX(WncD;IOC`o}Rj-=6us~Aog zl_zBg2d^JOJMesK*%^-FwVQ}yplU`SX}*l*`EYzojZWL~hvRtlCgRwvmk~sg8EF(k zAw!Tm&w+l8HQNfDWzXNJrYmu_+|AQ0<*S)dr1?Wt(Z4j5Y2x7BZb^uZS($uqI$*9cAjql3dc*m`!W01zLyt2i%(^}b^mf3Ou?hVI` z%EELme58NV>G8_;nit*WYB!1Z=l)|&^1SkLW+XRiBn@e(P&6pfGNL`9X;jY1h)(oJWP6Q<6LF*~@(}B7zG!<-D__ox zsh6EuJ&meYDgsKHkN4a|-3RfmbTlj)mGd&ZjT?vOU3kW^HxIGBrj;+ou!^+i==})o4l9gP|ecz+~L#ZX9>uwHls$2Wwfr^p-oQysyhw zL)w8$H`#&AhBA9{P06Jx&i{X_{+Oj*1Q0*~0R#|0009ILKmY**ZXW^W|F_Rr5D5YZ vAb, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username. -func (fs *owncloudsqlfs) toInternalPath(ctx context.Context, sp string) (ip string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - ip = filepath.Join(fs.c.DataDirectory, layout, "files", sp) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - ip = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - ip = filepath.Join(fs.c.DataDirectory, layout, "files") - } else { - // parts = "", "foo/bar.txt" - ip = filepath.Join(fs.c.DataDirectory, layout, "files", segments[1]) - } - } - return -} - -// owncloudsql stores versions in the files_versions subfolder -// the incoming path starts with /, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username. -func (fs *owncloudsqlfs) getVersionsPath(ctx context.Context, ip string) string { - // ip = /path/to/data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - } - // ip = //files/foo/bar.txt - parts := strings.SplitN(ip, "/", 4) - - // parts[1] contains the username or userid. - u, err := fs.getUser(ctx, parts[1]) - if err != nil { - // TODO return invalid internal path? - return "" - } - layout := templates.WithUser(u, fs.c.UserLayout) - - switch len(parts) { - case 3: - // parts = "", "" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions") - case 4: - // parts = "", "", "foo/bar.txt" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions", parts[3]) - default: - return "" // TODO Must not happen? - } -} - -// owncloudsql stores trashed items in the files_trashbin subfolder of a users home. -func (fs *owncloudsqlfs) getRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return fs.getRecyclePathForUser(layout) -} - -func (fs *owncloudsqlfs) getRecyclePathForUser(user string) (string, error) { - return filepath.Join(fs.c.DataDirectory, user, "files_trashbin/files"), nil -} - -func (fs *owncloudsqlfs) getVersionRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/versions"), nil -} - -func (fs *owncloudsqlfs) toDatabasePath(ip string) string { - owner := fs.getOwner(ip) - trim := filepath.Join(fs.c.DataDirectory, owner) - p := strings.TrimPrefix(ip, trim) - p = strings.TrimPrefix(p, "/") - return p -} - -func (fs *owncloudsqlfs) toStoragePath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "files") - sp = strings.TrimPrefix(ip, trim) - // root directory - if sp == "" { - sp = "/" - } - } else { - // ip = /data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join(segments[1], segments[3]) - } - } - log := appctx.GetLogger(ctx) - log.Debug().Str("driver", "owncloudsql").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") - return -} - -// TODO the owner needs to come from a different place. -func (fs *owncloudsqlfs) getOwner(ip string) string { - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - parts := strings.SplitN(ip, "/", 3) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// TODO cache user lookup. -func (fs *owncloudsqlfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { - u := ctxpkg.ContextMustGetUser(ctx) - // check if username matches and id is set - if u.Username == usernameOrID && u.Id != nil && u.Id.OpaqueId != "" { - return u, nil - } - // check if userid matches and username is set - if u.Id != nil && u.Id.OpaqueId == usernameOrID && u.Username != "" { - return u, nil - } - // look up at the userprovider - - // parts[0] contains the username or userid. use user service to look up id - c, err := pool.GetUserProviderServiceClient(pool.Endpoint(fs.c.UserProviderEndpoint)) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user provider client") - return nil, err - } - res, err := c.GetUser(ctx, &userpb.GetUserRequest{ - UserId: &userpb.UserId{OpaqueId: usernameOrID}, - }) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user") - return nil, err - } - - if res.Status.Code == rpc.Code_CODE_NOT_FOUND { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user not found") - return nil, fmt.Errorf("user not found") - } - - if res.Status.Code != rpc.Code_CODE_OK { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user lookup failed") - return nil, fmt.Errorf("user lookup failed") - } - return res.User, nil -} - -// permissionSet returns the permission set for the current user. -func (fs *owncloudsqlfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { - if owner == nil { - return &provider.ResourcePermissions{ - Stat: true, - } - } - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id == nil { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id.OpaqueId == owner.OpaqueId && u.Id.Idp == owner.Idp { - return &provider.ResourcePermissions{ - // owner has all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } - } - // TODO fix permissions for share recipients by traversing reading acls up to the root? cache acls for the parent node and reuse it - return &provider.ResourcePermissions{ - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } -} - -func (fs *owncloudsqlfs) getStorage(ip string) (int, error) { - return fs.filecache.GetNumericStorageID("home::" + fs.getOwner(ip)) -} - -func (fs *owncloudsqlfs) getUserStorage(user string) (int, error) { - id, err := fs.filecache.GetNumericStorageID("home::" + user) - if err != nil { - id, err = fs.filecache.CreateStorage("home::" + user) - } - return id, err -} - -// CreateStorageSpace creates a storage space. -func (fs *owncloudsqlfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - return nil, fmt.Errorf("unimplemented: CreateStorageSpace") -} - -func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filecache.File, ip string, mdKeys []string) (*provider.ResourceInfo, error) { - mdKeysMap := make(map[string]struct{}) - for _, k := range mdKeys { - mdKeysMap[k] = struct{}{} - } - - var returnAllKeys bool - if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { - returnAllKeys = true - } - - isDir := entry.MimeTypeString == "httpd/unix-directory" - ri := &provider.ResourceInfo{ - Id: &provider.ResourceId{OpaqueId: strconv.Itoa(entry.ID)}, - Path: fs.toStoragePath(ctx, ip), - Type: getResourceType(isDir), - Etag: entry.Etag, - MimeType: entry.MimeTypeString, - Size: uint64(entry.Size), - Mtime: &types.Timestamp{ - Seconds: uint64(entry.MTime), - }, - ArbitraryMetadata: &provider.ArbitraryMetadata{ - Metadata: map[string]string{}, // TODO aduffeck: which metadata needs to go in here? - }, - } - - if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil { - ri.Owner = owner.Id - } else { - appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner") - } - - ri.PermissionSet = fs.permissionSet(ctx, ri.Owner) - - // checksums - if !isDir { - if _, checksumRequested := mdKeysMap[checksumsKey]; returnAllKeys || checksumRequested { - // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, entry.Checksum, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, entry.Checksum, storageprovider.XSMD5, ri) - readChecksumIntoOpaque(ctx, ip, storageprovider.XSAdler32, ri) - } - } - - return ri, nil -} - -// GetPathByID returns the storage relative path for the file id, without the internal namespace. -func (fs *owncloudsqlfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - ip, err := fs.filecache.Path(id.OpaqueId) - if err != nil { - return "", err - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.GetPath { - return "", errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return "", errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return "", errors.Wrap(err, "owncloudsql: error reading permissions") - } - - return fs.toStoragePath(ctx, ip), nil -} - -// resolve takes in a request path or request id and converts it to an internal path. -func (fs *owncloudsqlfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { - if ref.GetResourceId() != nil { - p, err := fs.filecache.Path(ref.GetResourceId().OpaqueId) - if err != nil { - return "", err - } - p = strings.TrimPrefix(p, "files/") - if !fs.c.EnableHome { - owner, err := fs.filecache.GetStorageOwnerByFileID(ref.GetResourceId().OpaqueId) - if err != nil { - return "", err - } - p = filepath.Join(owner, p) - } - return fs.toInternalPath(ctx, p), nil - } - - if ref.GetPath() != "" { - return fs.toInternalPath(ctx, ref.GetPath()), nil - } - - // reference is invalid - return "", fmt.Errorf("invalid reference %+v", ref) -} - -func (fs *owncloudsqlfs) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - return errtypes.NotSupported("owncloudsqlfs: deny grant not supported") -} - -func (fs *owncloudsqlfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - return errtypes.NotSupported("owncloudsqlfs: add grant not supported") -} - -func (fs *owncloudsqlfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("no user in context, returning default permissions") - return defaultPermissions, nil - } - // check if the current user is the owner - owner := fs.getOwner(ip) - if owner == u.Username { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("user is owner, returning owner permissions") - return ownerPermissions, nil - } - - // otherwise this is a share - ownerStorageID, err := fs.filecache.GetNumericStorageID("home::" + owner) - if err != nil { - return nil, err - } - entry, err := fs.filecache.Get(ownerStorageID, fs.toDatabasePath(ip)) - if err != nil { - return nil, err - } - perms, err := conversions.NewPermissions(entry.Permissions) - if err != nil { - return nil, err - } - return conversions.RoleFromOCSPermissions(perms).CS3ResourcePermissions(), nil -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNotFound(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOENT - } - } - return false -} - -func (fs *owncloudsqlfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - return []*provider.Grant{}, nil // nop -} - -func (fs *owncloudsqlfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - return nil // nop -} - -func (fs *owncloudsqlfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - return nil // nop -} - -func (fs *owncloudsqlfs) CreateHome(ctx context.Context) error { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return err - } - return fs.createHomeForUser(ctx, templates.WithUser(u, fs.c.UserLayout)) -} - -func (fs *owncloudsqlfs) createHomeForUser(ctx context.Context, user string) error { - homePaths := []string{ - filepath.Join(fs.c.DataDirectory, user), - filepath.Join(fs.c.DataDirectory, user, "files"), - filepath.Join(fs.c.DataDirectory, user, "files_trashbin"), - filepath.Join(fs.c.DataDirectory, user, "files_trashbin/files"), - filepath.Join(fs.c.DataDirectory, user, "files_trashbin/versions"), - filepath.Join(fs.c.DataDirectory, user, "uploads"), - } - - storageID, err := fs.getUserStorage(user) - if err != nil { - return err - } - for _, v := range homePaths { - if err := os.MkdirAll(v, 0755); err != nil { - return errors.Wrap(err, "owncloudsql: error creating home path: "+v) - } - - fi, err := os.Stat(v) - if err != nil { - return err - } - data := map[string]interface{}{ - "path": fs.toDatabasePath(v), - "etag": calcEtag(ctx, fi), - "mimetype": "httpd/unix-directory", - "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE - } - - allowEmptyParent := v == filepath.Join(fs.c.DataDirectory, user) // the root doesn't have a parent - _, err = fs.filecache.InsertOrUpdate(storageID, data, allowEmptyParent) - if err != nil { - return err - } - } - return nil -} - -// If home is enabled, the relative home is always the empty string. -func (fs *owncloudsqlfs) GetHome(ctx context.Context) (string, error) { - if !fs.c.EnableHome { - return "", errtypes.NotSupported("owncloudsql: get home not supported") - } - return "", nil -} - -func (fs *owncloudsqlfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return err - } - - // check permissions of parent dir - if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { - if !perm.CreateContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(ref.Path) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - if err = os.Mkdir(ip, 0700); err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(ref.Path) - } - // FIXME we also need already exists error, webdav expects 405 MethodNotAllowed - return errors.Wrap(err, "owncloudsql: error creating dir "+fs.toStoragePath(ctx, filepath.Dir(ip))) - } - - fi, err := os.Stat(ip) - if err != nil { - return err - } - mtime := time.Now().Unix() - - permissions := 31 // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE - if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { - permissions = int(conversions.RoleFromResourcePermissions(perm).OCSPermissions()) // inherit permissions of parent - } - data := map[string]interface{}{ - "path": fs.toDatabasePath(ip), - "etag": calcEtag(ctx, fi), - "mimetype": "httpd/unix-directory", - "permissions": permissions, - "mtime": mtime, - "storage_mtime": mtime, - } - storageID, err := fs.getStorage(ip) - if err != nil { - return err - } - _, err = fs.filecache.InsertOrUpdate(storageID, data, false) - if err != nil { - if err != nil { - return err - } - } - - return fs.propagate(ctx, filepath.Dir(ip)) -} - -// TouchFile as defined in the storage.FS interface. -func (fs *owncloudsqlfs) TouchFile(ctx context.Context, ref *provider.Reference) error { - return fmt.Errorf("unimplemented: TouchFile") -} - -func (fs *owncloudsqlfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { - return errtypes.NotSupported("owncloudsql: operation not supported") -} - -func (fs *owncloudsqlfs) setMtime(ctx context.Context, ip string, mtime string) error { - log := appctx.GetLogger(ctx) - if mt, err := parseMTime(mtime); err == nil { - // updating mtime also updates atime - if err := os.Chtimes(ip, mt, mt); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Time("mtime", mt). - Msg("could not set mtime") - return errors.Wrap(err, "could not set mtime") - } - } else { - log.Error().Err(err). - Str("ipath", ip). - Str("mtime", mtime). - Msg("could not parse mtime") - return errors.Wrap(err, "could not parse mtime") - } - return nil -} -func (fs *owncloudsqlfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - var fi os.FileInfo - fi, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error stating "+ip) - } - - errs := []error{} - - if md.Metadata != nil { - if val, ok := md.Metadata["mtime"]; ok { - err := fs.setMtime(ctx, ip, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set mtime")) - } - // remove from metadata - delete(md.Metadata, "mtime") - } - // TODO(jfd) special handling for atime? - // TODO(jfd) allow setting birth time (btime)? - // TODO(jfd) any other metadata that is interesting? fileid? - if val, ok := md.Metadata["etag"]; ok { - etag := calcEtag(ctx, fi) - val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) - if etag == val { - log.Debug(). - Str("ipath", ip). - Str("etag", val). - Msg("ignoring request to update identical etag") - } else - // etag is only valid until the calculated etag changes - // TODO(jfd) cleanup in a batch job - if err := xattr.Set(ip, etagPrefix+etag, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("calcetag", etag). - Str("etag", val). - Msg("could not set etag") - errs = append(errs, errors.Wrap(err, "could not set etag")) - } - delete(md.Metadata, "etag") - } - if val, ok := md.Metadata["http://owncloud.org/ns/favorite"]; ok { - // TODO we should not mess with the user here ... the favorites is now a user specific property for a file - // that cannot be mapped to extended attributes without leaking who has marked a file as a favorite - // it is a specific case of a tag, which is user individual as well - // TODO there are different types of tags - // 1. public that are managed by everyone - // 2. private tags that are only visible to the user - // 3. system tags that are only visible to the system - // 4. group tags that are only visible to a group ... - // urgh ... well this can be solved using different namespaces - // 1. public = p: - // 2. private = u:: for user specific - // 3. system = s: for system - // 4. group = g:: - // 5. app? = a:: for apps? - // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem - // public tags can be mapped to extended attributes - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Set(ip, fa, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not set favorite flag") - errs = append(errs, errors.Wrap(err, "could not set favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - // remove from metadata - delete(md.Metadata, "http://owncloud.org/ns/favorite") - } - } - for k, v := range md.Metadata { - if err := xattr.Set(ip, mdPrefix+k, []byte(v)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Str("val", v). - Msg("could not set metadata") - errs = append(errs, errors.Wrap(err, "could not set metadata")) - } - } - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - -func (fs *owncloudsqlfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error stating "+ip) - } - - errs := []error{} - for _, k := range keys { - switch k { - case "http://owncloud.org/ns/favorite": - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(ip, fa); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not unset favorite flag") - errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - default: - if err = xattr.Remove(ip, mdPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) - } - } - } - } - - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -// GetLock returns an existing lock on the given reference. -func (fs *owncloudsqlfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) { - return nil, errtypes.NotSupported("unimplemented") -} - -// SetLock puts a lock on the given reference. -func (fs *owncloudsqlfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// RefreshLock refreshes an existing lock on the given reference. -func (fs *owncloudsqlfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error { - return errtypes.NotSupported("unimplemented") -} - -// Unlock removes an existing lock from the given reference. -func (fs *owncloudsqlfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// Delete is actually only a move to trash -// -// This is a first optimistic approach. -// When a file has versions and we want to delete the file it could happen that -// the service crashes before all moves are finished. -// That would result in invalid state like the main files was moved but the -// versions were not. -// We will live with that compromise since this storage driver will be -// deprecated soon. -func (fs *owncloudsqlfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Delete { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error stating "+ip) - } - - // Delete file into the owner's trash, not the user's (in case of shares) - rp, err := fs.getRecyclePathForUser(fs.getOwner(ip)) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - - if err := os.MkdirAll(rp, 0700); err != nil { - return errors.Wrap(err, "owncloudsql: error creating trashbin dir "+rp) - } - - // ip is the path on disk ... we need only the path relative to root - origin := filepath.Dir(fs.toStoragePath(ctx, ip)) - - err = fs.trash(ctx, ip, rp, origin) - if err != nil { - return errors.Wrapf(err, "owncloudsql: error deleting file %s", ip) - } - return nil -} - -func (fs *owncloudsqlfs) trash(ctx context.Context, ip string, rp string, origin string) error { - // move to trash location - dtime := time.Now().Unix() - tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - if err := os.Rename(ip, tgt); err != nil { - if os.IsExist(err) { - // timestamp collision, try again with higher value: - dtime++ - tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - if err := os.Rename(ip, tgt); err != nil { - return errors.Wrap(err, "owncloudsql: could not move item to trash") - } - } - } - - storage, err := fs.getStorage(ip) - if err != nil { - return err - } - - tryDelete := func() error { - return fs.filecache.Delete(storage, fs.getOwner(ip), fs.toDatabasePath(ip), fs.toDatabasePath(tgt)) - } - err = tryDelete() - if err != nil { - err = fs.createHomeForUser(ctx, fs.getOwner(ip)) // Try setting up the owner's home (incl. trash) to fix the problem - if err != nil { - return err - } - err = tryDelete() - if err != nil { - return err - } - } - - err = fs.trashVersions(ctx, ip, origin, dtime) - if err != nil { - return errors.Wrapf(err, "owncloudsql: error deleting versions of file %s", ip) - } - - return fs.propagate(ctx, filepath.Dir(ip)) -} - -func (fs *owncloudsqlfs) trashVersions(ctx context.Context, ip string, origin string, dtime int64) error { - vp := fs.getVersionsPath(ctx, ip) - vrp, err := fs.getVersionRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "error resolving versions recycle path") - } - - if err := os.MkdirAll(vrp, 0700); err != nil { - return errors.Wrap(err, "owncloudsql: error creating trashbin dir "+vrp) - } - - // Ignore error since the only possible error is malformed pattern. - versions, _ := filepath.Glob(vp + ".v*") - storage, err := fs.getStorage(ip) - if err != nil { - return err - } - for _, v := range versions { - tgt := filepath.Join(vrp, fmt.Sprintf("%s.d%d", filepath.Base(v), dtime)) - if err := os.Rename(v, tgt); err != nil { - if os.IsExist(err) { - // timestamp collision, try again with higher value: - dtime++ - tgt := filepath.Join(vrp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - if err := os.Rename(ip, tgt); err != nil { - return errors.Wrap(err, "owncloudsql: could not move item to trash") - } - } - } - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting file "+v) - } - err = fs.filecache.Move(storage, fs.toDatabasePath(v), fs.toDatabasePath(tgt)) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting file "+v) - } - } - return nil -} - -func (fs *owncloudsqlfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldIP string - if oldIP, err = fs.resolve(ctx, oldRef); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, oldIP); err == nil { - if !perm.Move { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(oldIP))) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - var newIP string - if newIP, err = fs.resolve(ctx, newRef); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // TODO check target permissions ... if it exists - storage, err := fs.getStorage(oldIP) - if err != nil { - return err - } - err = fs.filecache.Move(storage, fs.toDatabasePath(oldIP), fs.toDatabasePath(newIP)) - if err != nil { - return err - } - if err = os.Rename(oldIP, newIP); err != nil { - return errors.Wrap(err, "owncloudsql: error moving "+oldIP+" to "+newIP) - } - - if err := fs.propagate(ctx, newIP); err != nil { - return err - } - if filepath.Dir(newIP) != filepath.Dir(oldIP) { - if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { - return err - } - } - return nil -} - -func (fs *owncloudsqlfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - // TODO return correct errtype - if _, ok := err.(errtypes.IsNotFound); ok { - return nil, err - } - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - p := fs.toStoragePath(ctx, ip) - - // If GetMD is called for a path shared with the user then the path is - // already wrapped. (fs.resolve wraps the path) - if strings.HasPrefix(p, fs.c.DataDirectory) { - ip = p - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - ownerStorageID, err := fs.filecache.GetNumericStorageID("home::" + fs.getOwner(ip)) - if err != nil { - return nil, err - } - entry, err := fs.filecache.Get(ownerStorageID, fs.toDatabasePath(ip)) - switch { - case err == sql.ErrNoRows: - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - case err != nil: - return nil, err - } - - return fs.convertToResourceInfo(ctx, entry, ip, mdKeys) -} - -func (fs *owncloudsqlfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - sp := fs.toStoragePath(ctx, ip) - - if fs.c.EnableHome { - log.Debug().Msg("home enabled") - if strings.HasPrefix(sp, "/") { - // permissions checked in listWithHome - return fs.listWithHome(ctx, "/", sp, mdKeys) - } - } - - log.Debug().Msg("list with nominal home") - // permissions checked in listWithNominalHome - return fs.listWithNominalHome(ctx, sp, mdKeys) -} - -func (fs *owncloudsqlfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { - // If a user wants to list a folder shared with him the path will already - // be wrapped with the files directory path of the share owner. - // In that case we don't want to wrap the path again. - if !strings.HasPrefix(ip, fs.c.DataDirectory) { - ip = fs.toInternalPath(ctx, ip) - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - storage, err := fs.getStorage(ip) - if err != nil { - return nil, err - } - entries, err := fs.filecache.List(storage, fs.toDatabasePath(ip)+"/") - if err != nil { - return nil, errors.Wrapf(err, "owncloudsql: error listing %s", ip) - } - owner := fs.getOwner(ip) - finfos := []*provider.ResourceInfo{} - for _, entry := range entries { - cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path) - if err != nil { - return nil, err - } - m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") - } - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *owncloudsqlfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - if p == home { - log.Debug().Msg("listing home") - return fs.listHome(ctx, home, mdKeys) - } - - log.Debug().Msg("listing nominal home") - return fs.listWithNominalHome(ctx, p, mdKeys) -} - -func (fs *owncloudsqlfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { - // list files - ip := fs.toInternalPath(ctx, home) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - storage, err := fs.getStorage(ip) - if err != nil { - return nil, err - } - entries, err := fs.filecache.List(storage, fs.toDatabasePath(ip)+"/") - if err != nil { - return nil, errors.Wrapf(err, "owncloudsql: error listing %s", ip) - } - owner := fs.getOwner(ip) - finfos := []*provider.ResourceInfo{} - for _, entry := range entries { - cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path) - m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") - } - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *owncloudsqlfs) archiveRevision(ctx context.Context, vbp string, ip string) error { - // move existing file to versions dir - vp := fmt.Sprintf("%s.v%d", vbp, time.Now().Unix()) - if err := os.MkdirAll(filepath.Dir(vp), 0700); err != nil { - return errors.Wrap(err, "owncloudsql: error creating versions dir "+vp) - } - - // TODO(jfd): make sure rename is atomic, missing fsync ... - if err := os.Rename(ip, vp); err != nil { - return errors.Wrap(err, "owncloudsql: error renaming from "+ip+" to "+vp) - } - - storage, err := fs.getStorage(ip) - if err != nil { - return err - } - - vdp := fs.toDatabasePath(vp) - basePath := strings.TrimSuffix(vp, vdp) - parts := strings.Split(filepath.Dir(vdp), "/") - walkPath := "" - for i := 0; i < len(parts); i++ { - walkPath = filepath.Join(walkPath, parts[i]) - _, err := fs.filecache.Get(storage, walkPath) - if err == nil { - continue - } - - fi, err := os.Stat(filepath.Join(basePath, walkPath)) - if err != nil { - return errors.Wrap(err, "could not stat parent version directory") - } - data := map[string]interface{}{ - "path": walkPath, - "mimetype": "httpd/unix-directory", - "etag": calcEtag(ctx, fi), - "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE - } - - _, err = fs.filecache.InsertOrUpdate(storage, data, false) - if err != nil { - return errors.Wrap(err, "could not create parent version directory") - } - } - _, err = fs.filecache.Copy(storage, fs.toDatabasePath(ip), vdp) - return err -} - -func (fs *owncloudsqlfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileDownload { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - r, err := os.Open(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "owncloudsql: error reading "+ip) - } - return r, nil -} - -func (fs *owncloudsqlfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListFileVersions { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - bn := filepath.Base(ip) - storageID, err := fs.getStorage(ip) - if err != nil { - return nil, err - } - entries, err := fs.filecache.List(storageID, filepath.Dir(fs.toDatabasePath(vp))+"/") - if err != nil { - return nil, err - } - revisions := []*provider.FileVersion{} - for _, entry := range entries { - if strings.HasPrefix(entry.Name, bn) { - // versions have filename.ext.v12345678 - version := entry.Name[len(bn)+2:] // truncate ".v" to get version mtime - mtime, err := strconv.Atoi(version) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", entry.Name).Msg("invalid version mtime") - return nil, err - } - revisions = append(revisions, &provider.FileVersion{ - Key: version, - Size: uint64(entry.Size), - Mtime: uint64(mtime), - Etag: entry.Etag, - }) - } - } - - return revisions, nil -} - -func (fs *owncloudsqlfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - return nil, errtypes.NotSupported("download revision") -} - -func (fs *owncloudsqlfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.RestoreFileVersion { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - rp := vp + ".v" + revisionKey - - // check revision exists - rs, err := os.Stat(rp) - if err != nil { - return err - } - - if !rs.Mode().IsRegular() { - return fmt.Errorf("%s is not a regular file", rp) - } - - source, err := os.Open(rp) - if err != nil { - return err - } - defer source.Close() - - // destination should be available, otherwise we could not have navigated to its revisions - if err := fs.archiveRevision(ctx, fs.getVersionsPath(ctx, ip), ip); err != nil { - return err - } - - destination, err := os.Create(ip) - if err != nil { - // TODO(jfd) bring back revision in case sth goes wrong? - return err - } - defer destination.Close() - - _, err = io.Copy(destination, source) - if err != nil { - return err - } - - sha1h, md5h, adler32h, err := fs.HashFile(ip) - if err != nil { - log.Err(err).Msg("owncloudsql: could not open file for checksumming") - } - fi, err := os.Stat(ip) - if err != nil { - return err - } - mtime := time.Now().Unix() - data := map[string]interface{}{ - "path": fs.toDatabasePath(ip), - "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), - "etag": calcEtag(ctx, fi), - "size": fi.Size(), - "mimetype": mime.Detect(false, ip), - "mtime": mtime, - "storage_mtime": mtime, - } - storageID, err := fs.getStorage(ip) - if err != nil { - return err - } - _, err = fs.filecache.InsertOrUpdate(storageID, data, false) - if err != nil { - return err - } - - // TODO(jfd) bring back revision in case sth goes wrong? - return fs.propagate(ctx, ip) -} - -func (fs *owncloudsqlfs) PurgeRecycleItem(ctx context.Context, basePath, key, relativePath string) error { - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - vp := filepath.Join(filepath.Dir(rp), "versions") - ip := filepath.Join(rp, filepath.Clean(key)) - // TODO check permission? - - // check permissions - /* are they stored in the trash? - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.unwrap(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - */ - - err = os.RemoveAll(ip) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle item") - } - base, ttime, err := splitTrashKey(key) - if err != nil { - return err - } - err = fs.filecache.PurgeRecycleItem(ctxpkg.ContextMustGetUser(ctx).Username, base, ttime, false) - if err != nil { - return err - } - - versionsGlob := filepath.Join(vp, base+".v*.d"+strconv.Itoa(ttime)) - versionFiles, err := filepath.Glob(versionsGlob) - if err != nil { - return errors.Wrap(err, "owncloudsql: error listing recycle item versions") - } - storageID, err := fs.getStorage(ip) - if err != nil { - return err - } - for _, versionFile := range versionFiles { - err = os.Remove(versionFile) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle item versions") - } - err = fs.filecache.Purge(storageID, fs.toDatabasePath(versionFile)) - if err != nil { - return err - } - } - - // TODO delete keyfiles, keys, share-keys - return nil -} - -func (fs *owncloudsqlfs) EmptyRecycle(ctx context.Context) error { - // TODO check permission? on what? user must be the owner - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - err = os.RemoveAll(rp) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle files") - } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions")) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle files versions") - } - - u := ctxpkg.ContextMustGetUser(ctx) - err = fs.filecache.EmptyRecycle(u.Username) - if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle items from the database") - } - - // TODO delete keyfiles, keys, share-keys ... or just everything? - return nil -} - -func splitTrashKey(key string) (string, int, error) { - // trashbin items have filename.ext.d12345678 - suffix := filepath.Ext(key) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - return "", -1, fmt.Errorf("invalid suffix") - } - trashtime := suffix[2:] // truncate "d" to get trashbin time - ttime, err := strconv.Atoi(trashtime) - if err != nil { - return "", -1, fmt.Errorf("invalid suffix") - } - return strings.TrimSuffix(filepath.Base(key), suffix), ttime, nil -} - -func (fs *owncloudsqlfs) convertToRecycleItem(ctx context.Context, md os.FileInfo) *provider.RecycleItem { - base, ttime, err := splitTrashKey(md.Name()) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Str("path", md.Name()).Msg("invalid trash item key") - } - - u := ctxpkg.ContextMustGetUser(ctx) - item, err := fs.filecache.GetRecycleItem(u.Username, base, ttime) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("could not get trash item") - return nil - } - - // ownCloud 10 stores the parent dir of the deleted item as the location in the oc_files_trashbin table - // we use extended attributes for original location, but also only the parent location, which is why - // we need to join and trim the path when listing it - originalPath := filepath.Join(item.Path, base) - - return &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Key: md.Name(), - // TODO do we need to prefix the path? it should be relative to this storage root, right? - Ref: &provider.Reference{Path: originalPath}, - Size: uint64(md.Size()), - DeletionTime: &types.Timestamp{ - Seconds: uint64(ttime), - // no nanos available - }, - } -} - -func (fs *owncloudsqlfs) ListRecycle(ctx context.Context, basePath, key, relativePath string) ([]*provider.RecycleItem, error) { - // TODO check permission? on what? user must be the owner? - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - - // list files folder - entries, err := os.ReadDir(rp) - if err != nil { - log := appctx.GetLogger(ctx) - log.Debug().Err(err).Str("path", rp).Msg("trash not readable") - // TODO jfd only ignore not found errors - return []*provider.RecycleItem{}, nil - } - mds := make([]iofs.FileInfo, 0, len(entries)) - for _, entry := range entries { - info, err := entry.Info() - if err != nil { - log := appctx.GetLogger(ctx) - log.Debug().Err(err).Str("path", rp).Msg("trash not readable") - // TODO jfd only ignore not found errors - return []*provider.RecycleItem{}, nil - } - mds = append(mds, info) - } - // TODO (jfd) limit and offset - items := []*provider.RecycleItem{} - for i := range mds { - ri := fs.convertToRecycleItem(ctx, mds[i]) - if ri != nil { - items = append(items, ri) - } - } - return items, nil -} - -func (fs *owncloudsqlfs) RestoreRecycleItem(ctx context.Context, basePath, key, relativePath string, restoreRef *provider.Reference) error { - log := appctx.GetLogger(ctx) - - base, ttime, err := splitTrashKey(key) - if err != nil { - log.Error().Str("path", key).Msg("invalid trash item key") - return fmt.Errorf("invalid trash item suffix") - } - - recyclePath, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - src := filepath.Join(recyclePath, filepath.Clean(key)) - - if restoreRef.Path == "" { - u := ctxpkg.ContextMustGetUser(ctx) - item, err := fs.filecache.GetRecycleItem(u.Username, base, ttime) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", key).Msg("could not get trash item") - return nil - } - restoreRef.Path = filepath.Join(item.Path, item.Name) - } - - tgt := fs.toInternalPath(ctx, restoreRef.Path) - // move back to original location - if err := os.Rename(src, tgt); err != nil { - log.Error().Err(err).Str("key", key).Str("restorePath", restoreRef.Path).Str("src", src).Str("tgt", tgt).Msg("could not restore item") - return errors.Wrap(err, "owncloudsql: could not restore item") - } - - storage, err := fs.getStorage(src) - if err != nil { - return err - } - err = fs.filecache.Move(storage, fs.toDatabasePath(src), fs.toDatabasePath(tgt)) - if err != nil { - return err - } - err = fs.filecache.DeleteRecycleItem(ctxpkg.ContextMustGetUser(ctx).Username, base, ttime) - if err != nil { - return err - } - err = fs.RestoreRecycleItemVersions(ctx, key, tgt) - if err != nil { - return err - } - - return fs.propagate(ctx, tgt) -} - -func (fs *owncloudsqlfs) RestoreRecycleItemVersions(ctx context.Context, key, target string) error { - base, ttime, err := splitTrashKey(key) - if err != nil { - return fmt.Errorf("invalid trash item suffix") - } - storage, err := fs.getStorage(target) - if err != nil { - return err - } - - recyclePath, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") - } - versionsRecyclePath := filepath.Join(filepath.Dir(recyclePath), "versions") - - // Restore versions - deleteSuffix := ".d" + strconv.Itoa(ttime) - versionsGlob := filepath.Join(versionsRecyclePath, base+".v*"+deleteSuffix) - versionFiles, err := filepath.Glob(versionsGlob) - versionsRoot := filepath.Dir(fs.getVersionsPath(ctx, target)) - - if err != nil { - return errors.Wrap(err, "owncloudsql: error listing recycle item versions") - } - for _, versionFile := range versionFiles { - versionBase := strings.TrimSuffix(filepath.Base(versionFile), deleteSuffix) - versionsRestorePath := filepath.Join(versionsRoot, versionBase) - if err = os.Rename(versionFile, versionsRestorePath); err != nil { - return errors.Wrap(err, "owncloudsql: could not restore version file") - } - err = fs.filecache.Move(storage, fs.toDatabasePath(versionFile), fs.toDatabasePath(versionsRestorePath)) - if err != nil { - return err - } - } - return nil -} - -func (fs *owncloudsqlfs) propagate(ctx context.Context, leafPath string) error { - var root string - if fs.c.EnableHome { - root = filepath.Clean(fs.toInternalPath(ctx, "/")) - } else { - owner := fs.getOwner(leafPath) - root = filepath.Clean(fs.toInternalPath(ctx, owner)) - } - versionsRoot := filepath.Join(filepath.Dir(root), "files_versions") - if !strings.HasPrefix(leafPath, root) { - err := errors.New("internal path outside root") - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - fi, err := os.Stat(leafPath) - if err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - storageID, err := fs.getStorage(leafPath) - if err != nil { - return err - } - - currentPath := filepath.Clean(leafPath) - for currentPath != root && currentPath != versionsRoot { - appctx.GetLogger(ctx).Debug(). - Str("leafPath", leafPath). - Str("currentPath", currentPath). - Msg("propagating change") - parentFi, err := os.Stat(currentPath) - if err != nil { - return err - } - if fi.ModTime().UnixNano() > parentFi.ModTime().UnixNano() { - if err := os.Chtimes(currentPath, fi.ModTime(), fi.ModTime()); err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("currentPath", currentPath). - Msg("could not propagate change") - return err - } - } - fi, err = os.Stat(currentPath) - if err != nil { - return err - } - etag := calcEtag(ctx, fi) - if err := fs.filecache.SetEtag(storageID, fs.toDatabasePath(currentPath), etag); err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("currentPath", currentPath). - Msg("could not set etag") - return err - } - - currentPath = filepath.Dir(currentPath) - } - return nil -} - -func (fs *owncloudsqlfs) HashFile(path string) (string, string, string, error) { - sha1h := sha1.New() - md5h := md5.New() - adler32h := adler32.New() - { - f, err := os.Open(path) - if err != nil { - return "", "", "", errors.Wrap(err, "owncloudsql: could not copy bytes for checksumming") - } - defer f.Close() - - r1 := io.TeeReader(f, sha1h) - r2 := io.TeeReader(r1, md5h) - - if _, err := io.Copy(adler32h, r2); err != nil { - return "", "", "", errors.Wrap(err, "owncloudsql: could not copy bytes for checksumming") - } - - return string(sha1h.Sum(nil)), string(md5h.Sum(nil)), string(adler32h.Sum(nil)), nil - } -} - -func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { - // TODO(corby): Implement - return nil, errtypes.NotSupported("list storage spaces") -} - -// UpdateStorageSpace updates a storage space. -func (fs *owncloudsqlfs) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { - return nil, errtypes.NotSupported("update storage space") -} - -func readChecksumIntoResourceChecksum(ctx context.Context, checksums, algo string, ri *provider.ResourceInfo) { - re := regexp.MustCompile(strings.ToUpper(algo) + `:(.*)`) - matches := re.FindStringSubmatch(checksums) - if len(matches) < 2 { - appctx.GetLogger(ctx). - Debug(). - Str("nodepath", checksums). - Str("algorithm", algo). - Msg("checksum not set") - } else { - ri.Checksum = &provider.ResourceChecksum{ - Type: storageprovider.PKG2GRPCXS(algo), - Sum: matches[1], - } - } -} - -func readChecksumIntoOpaque(ctx context.Context, checksums, algo string, ri *provider.ResourceInfo) { - re := regexp.MustCompile(strings.ToUpper(algo) + `:(.*)`) - matches := re.FindStringSubmatch(checksums) - if len(matches) < 2 { - appctx.GetLogger(ctx). - Debug(). - Str("nodepath", checksums). - Str("algorithm", algo). - Msg("checksum not set") - } else { - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[algo] = &types.OpaqueEntry{ - Decoder: "plain", - Value: []byte(matches[1]), - } - } -} - -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} - -// TODO propagate etag and mtime or append event to history? propagate on disk ... -// - but propagation is a separate task. only if upload was successful ... diff --git a/pkg/storage/fs/owncloudsql/owncloudsql_unix.go b/pkg/storage/fs/owncloudsql/owncloudsql_unix.go deleted file mode 100644 index f4b08e8a94..0000000000 --- a/pkg/storage/fs/owncloudsql/owncloudsql_unix.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build !windows -// +build !windows - -package owncloudsql - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - "syscall" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" -) - -// TODO(jfd) get rid of the differences between unix and windows. the inode and dev should never be used for the etag because it interferes with backups - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned. -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { - // take device and inode into account - err = binary.Write(h, binary.BigEndian, stat.Ino) - if err != nil { - log.Error().Err(err).Msg("error writing inode") - } - err = binary.Write(h, binary.BigEndian, stat.Dev) - if err != nil { - log.Error().Err(err).Msg("error writing device") - } - } - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf("%x", h.Sum(nil)) - return strings.Trim(etag, "\"") -} - -func (fs *owncloudsqlfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - stat := syscall.Statfs_t{} - err := syscall.Statfs(fs.toInternalPath(ctx, "/"), &stat) - if err != nil { - return 0, 0, err - } - total := stat.Blocks * uint64(stat.Bsize) // Total data blocks in filesystem - used := (stat.Blocks - stat.Bavail) * uint64(stat.Bsize) // Free blocks available to unprivileged user - return total, used, nil -} diff --git a/pkg/storage/fs/owncloudsql/owncloudsql_windows.go b/pkg/storage/fs/owncloudsql/owncloudsql_windows.go deleted file mode 100644 index bdb22c1a03..0000000000 --- a/pkg/storage/fs/owncloudsql/owncloudsql_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build windows -// +build windows - -package owncloudsql - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "golang.org/x/sys/windows" -) - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - // device and inode have no meaning on windows - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) -} - -func (fs *owncloudsqlfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - var free, total, avail uint64 - - pathPtr, err := windows.UTF16PtrFromString(fs.toInternalPath(ctx, "/")) - if err != nil { - return 0, 0, err - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free) - if err != nil { - return 0, 0, err - } - - used := total - free - return total, used, nil -} diff --git a/pkg/storage/fs/owncloudsql/upload.go b/pkg/storage/fs/owncloudsql/upload.go deleted file mode 100644 index 5bcfa25c6d..0000000000 --- a/pkg/storage/fs/owncloudsql/upload.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloudsql - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rs/zerolog/log" - tusd "github.com/tus/tusd/pkg/handler" -) - -var defaultFilePerm = os.FileMode(0664) - -func (fs *owncloudsqlfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { - upload, err := fs.GetUpload(ctx, ref.GetPath()) - if err != nil { - return errors.Wrap(err, "owncloudsql: error retrieving upload") - } - - uploadInfo := upload.(*fileUpload) - - p := uploadInfo.info.Storage["InternalDestination"] - ok, err := chunking.IsChunked(p) - if err != nil { - return errors.Wrap(err, "owncloudsql: error checking path") - } - if ok { - var assembledFile string - p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) - if err != nil { - return err - } - if p == "" { - if err = uploadInfo.Terminate(ctx); err != nil { - return errors.Wrap(err, "owncloudsql: error removing auxiliary files") - } - return errtypes.PartialContent(ref.String()) - } - uploadInfo.info.Storage["InternalDestination"] = p - fd, err := os.Open(assembledFile) - if err != nil { - return errors.Wrap(err, "owncloudsql: error opening assembled file") - } - defer fd.Close() - defer os.RemoveAll(assembledFile) - r = fd - } - - if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "owncloudsql: error writing to binary file") - } - - return uploadInfo.FinishUpload(ctx) -} - -// InitiateUpload returns upload ids corresponding to different protocols it supports -// TODO read optional content for small files in this request. -func (fs *owncloudsqlfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // permissions are checked in NewUpload below - - p := fs.toStoragePath(ctx, ip) - - info := tusd.FileInfo{ - MetaData: tusd.MetaData{ - "filename": filepath.Base(p), - "dir": filepath.Dir(p), - }, - Size: uploadLength, - } - - if metadata != nil { - if metadata["mtime"] != "" { - info.MetaData["mtime"] = metadata["mtime"] - } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - } - - upload, err := fs.NewUpload(ctx, info) - if err != nil { - return nil, err - } - - info, _ = upload.GetInfo(ctx) - - return map[string]string{ - "simple": info.ID, - "tus": info.ID, - }, nil -} - -// UseIn tells the tus upload middleware which extensions it supports. -func (fs *owncloudsqlfs) UseIn(composer *tusd.StoreComposer) { - composer.UseCore(fs) - composer.UseTerminater(fs) - composer.UseConcater(fs) - composer.UseLengthDeferrer(fs) -} - -// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol -// - the storage needs to implement NewUpload and GetUpload -// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload - -func (fs *owncloudsqlfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("owncloudsql: NewUpload") - - if info.MetaData["filename"] == "" { - return nil, errors.New("owncloudsql: missing filename in metadata") - } - info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) - - dir := info.MetaData["dir"] - if dir == "" { - return nil, errors.New("owncloudsql: missing dir in metadata") - } - info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) - - ip := fs.toInternalPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) - - // check permissions - var perm *provider.ResourcePermissions - var perr error - // if destination exists - if _, err := os.Stat(ip); err == nil { - // check permissions of file to be overwritten - perm, perr = fs.readPermissions(ctx, ip) - } else { - // check permissions of parent folder - perm, perr = fs.readPermissions(ctx, filepath.Dir(ip)) - } - if perr == nil { - if !perm.InitiateFileUpload { - return nil, errtypes.PermissionDenied("") - } - } else { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - log.Debug().Interface("info", info).Msg("owncloudsql: resolved filename") - - info.ID = uuid.New().String() - - binPath, err := fs.getUploadPath(ctx, info.ID) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving upload path") - } - usr := ctxpkg.ContextMustGetUser(ctx) - storageID, err := fs.getStorage(ip) - if err != nil { - return nil, err - } - info.Storage = map[string]string{ - "Type": "OwnCloudStore", - "BinPath": binPath, - "InternalDestination": ip, - "Permissions": strconv.Itoa((int)(conversions.RoleFromResourcePermissions(perm).OCSPermissions())), - - "Idp": usr.Id.Idp, - "UserId": usr.Id.OpaqueId, - "UserName": usr.Username, - - "LogLevel": log.GetLevel().String(), - - "StorageId": strconv.Itoa(storageID), - } - // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("owncloudsql: built storage info") - file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return nil, err - } - defer file.Close() - - u := &fileUpload{ - info: info, - binPath: binPath, - infoPath: filepath.Join(fs.c.UploadInfoDir, info.ID+".info"), - fs: fs, - ctx: ctx, - } - - // writeInfo creates the file by itself if necessary - err = u.writeInfo() - if err != nil { - return nil, err - } - - return u, nil -} - -func (fs *owncloudsqlfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "uploads", uploadID), nil -} - -// GetUpload returns the Upload for the given upload id. -func (fs *owncloudsqlfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - infoPath := filepath.Join(fs.c.UploadInfoDir, id+".info") - - info := tusd.FileInfo{} - data, err := os.ReadFile(infoPath) - if err != nil { - if os.IsNotExist(err) { - // Interpret os.ErrNotExist as 404 Not Found - err = tusd.ErrNotFound - } - return nil, err - } - if err := json.Unmarshal(data, &info); err != nil { - return nil, err - } - - stat, err := os.Stat(info.Storage["BinPath"]) - if err != nil { - return nil, err - } - - info.Offset = stat.Size() - - u := &userpb.User{ - Id: &userpb.UserId{ - Idp: info.Storage["Idp"], - OpaqueId: info.Storage["UserId"], - }, - Username: info.Storage["UserName"], - } - - ctx = ctxpkg.ContextSetUser(ctx, u) - // TODO configure the logger the same way ... store and add traceid in file info - - var opts []logger.Option - opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) - opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) - l := logger.New(opts...) - - sub := l.With().Int("pid", os.Getpid()).Logger() - - ctx = appctx.WithLogger(ctx, &sub) - - return &fileUpload{ - info: info, - binPath: info.Storage["BinPath"], - infoPath: infoPath, - fs: fs, - ctx: ctx, - }, nil -} - -type fileUpload struct { - // info stores the current information about the upload - info tusd.FileInfo - // infoPath is the path to the .info file - infoPath string - // binPath is the path to the binary file (which has no extension) - binPath string - // only fs knows how to handle metadata and versions - fs *owncloudsqlfs - // a context with a user - // TODO add logger as well? - ctx context.Context -} - -// GetInfo returns the FileInfo. -func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { - return upload.info, nil -} - -// WriteChunk writes the stream from the reader to the given offset of the upload. -func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return 0, err - } - defer file.Close() - - n, err := io.Copy(file, src) - - // If the HTTP PATCH request gets interrupted in the middle (e.g. because - // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for OwnCloudStore it's not important whether the stream has ended - // on purpose or accidentally. - if err != nil { - if err != io.ErrUnexpectedEOF { - return n, err - } - } - - upload.info.Offset += n - err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk - - return n, err -} - -// GetReader returns an io.Reader for the upload. -func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { - return os.Open(upload.binPath) -} - -// writeInfo updates the entire information. Everything will be overwritten. -func (upload *fileUpload) writeInfo() error { - log.Debug().Str("path", upload.infoPath).Msg("Writing info file") - data, err := json.Marshal(upload.info) - if err != nil { - return err - } - return os.WriteFile(upload.infoPath, data, defaultFilePerm) -} - -// FinishUpload finishes an upload and moves the file to the internal destination. -func (upload *fileUpload) FinishUpload(ctx context.Context) error { - ip := upload.info.Storage["InternalDestination"] - - // if destination exists - // TODO check etag with If-Match header - if _, err := os.Stat(ip); err == nil { - // create revision - if err := upload.fs.archiveRevision(upload.ctx, upload.fs.getVersionsPath(upload.ctx, ip), ip); err != nil { - return err - } - } - - sha1h, md5h, adler32h, err := upload.fs.HashFile(upload.binPath) - if err != nil { - log.Err(err).Msg("owncloudsql: could not open file for checksumming") - } - - err = os.Rename(upload.binPath, ip) - if err != nil { - log.Err(err).Interface("info", upload.info). - Str("binPath", upload.binPath). - Str("ipath", ip). - Msg("owncloudsql: could not rename") - return err - } - - var fi os.FileInfo - fi, err = os.Stat(ip) - if err != nil { - return err - } - - perms, err := strconv.Atoi(upload.info.Storage["Permissions"]) - if err != nil { - return err - } - data := map[string]interface{}{ - "path": upload.fs.toDatabasePath(ip), - "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), - "etag": calcEtag(upload.ctx, fi), - "size": upload.info.Size, - "mimetype": mime.Detect(false, ip), - "permissions": perms, - "mtime": upload.info.MetaData["mtime"], - "storage_mtime": upload.info.MetaData["mtime"], - } - _, err = upload.fs.filecache.InsertOrUpdate(upload.info.Storage["StorageId"], data, false) - if err != nil { - return err - } - - // only delete the upload if it was successfully written to the storage - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - log.Err(err).Interface("info", upload.info).Msg("owncloudsql: could not delete upload info") - return err - } - } - - return upload.fs.propagate(upload.ctx, ip) -} - -// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination -// - the storage needs to implement AsTerminatableUpload -// - the upload needs to implement Terminate - -// AsTerminatableUpload returns a TerminatableUpload. -func (fs *owncloudsqlfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { - return upload.(*fileUpload) -} - -// Terminate terminates the upload. -func (upload *fileUpload) Terminate(ctx context.Context) error { - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return nil -} - -// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation -// - the storage needs to implement AsLengthDeclarableUpload -// - the upload needs to implement DeclareLength - -// AsLengthDeclarableUpload returns a LengthDeclarableUpload. -func (fs *owncloudsqlfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { - return upload.(*fileUpload) -} - -// DeclareLength updates the upload length information. -func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { - upload.info.Size = length - upload.info.SizeIsDeferred = false - return upload.writeInfo() -} - -// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation -// - the storage needs to implement AsConcatableUpload -// - the upload needs to implement ConcatUploads - -// AsConcatableUpload returns a ConcatableUpload. -func (fs *owncloudsqlfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { - return upload.(*fileUpload) -} - -// ConcatUploads concatenates multiple uploads. -func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return err - } - defer file.Close() - - for _, partialUpload := range uploads { - fileUpload := partialUpload.(*fileUpload) - - src, err := os.Open(fileUpload.binPath) - if err != nil { - return err - } - - if _, err := io.Copy(file, src); err != nil { - return err - } - } - - return -} diff --git a/pkg/storage/fs/s3/s3.go b/pkg/storage/fs/s3/s3.go index 3b4983ee3b..f07d5c74b5 100644 --- a/pkg/storage/fs/s3/s3.go +++ b/pkg/storage/fs/s3/s3.go @@ -323,7 +323,7 @@ func (fs *s3FS) CreateDir(ctx context.Context, ref *provider.Reference) error { result, err := fs.client.PutObject(input) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { if aerr.Code() == s3.ErrCodeNoSuchBucket { return errtypes.NotFound(ref.Path) @@ -333,7 +333,7 @@ func (fs *s3FS) CreateDir(ctx context.Context, ref *provider.Reference) error { return errors.Wrap(err, "s3fs: error creating dir "+ref.Path) } - log.Debug().Interface("result", result) // todo cache etag? + log.Debug().Interface("result", result).Send() // todo cache etag? return nil } @@ -357,7 +357,7 @@ func (fs *s3FS) Delete(ctx context.Context, ref *provider.Reference) error { Key: aws.String(fn), }) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: @@ -384,7 +384,7 @@ func (fs *s3FS) Delete(ctx context.Context, ref *provider.Reference) error { Key: aws.String(fn), }) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: @@ -395,7 +395,7 @@ func (fs *s3FS) Delete(ctx context.Context, ref *provider.Reference) error { return errors.Wrap(err, "s3fs: error deleting "+fn) } - log.Debug().Interface("result", result) + log.Debug().Interface("result", result).Send() return nil } @@ -457,7 +457,7 @@ func (fs *s3FS) Move(ctx context.Context, oldRef, newRef *provider.Reference) er Key: aws.String(fn), }) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: @@ -525,7 +525,7 @@ func (fs *s3FS) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []str } output, err := fs.client.HeadObject(input) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: @@ -622,7 +622,7 @@ func (fs *s3FS) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCl result, err := uploader.Upload(upParams) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { if aerr.Code() == s3.ErrCodeNoSuchBucket { return errtypes.NotFound(fn) @@ -631,7 +631,7 @@ func (fs *s3FS) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCl return errors.Wrap(err, "s3fs: error creating object "+fn) } - log.Debug().Interface("result", result) // todo cache etag? + log.Debug().Interface("result", result).Send() // todo cache etag? return nil } @@ -651,7 +651,7 @@ func (fs *s3FS) Download(ctx context.Context, ref *provider.Reference) (io.ReadC Key: aws.String(fn), }) if err != nil { - log.Error().Err(err) + log.Error().Err(err).Send() if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket: diff --git a/pkg/storage/fs/s3ng/blobstore/blobstore.go b/pkg/storage/fs/s3ng/blobstore/blobstore.go deleted file mode 100644 index b35bd67962..0000000000 --- a/pkg/storage/fs/s3ng/blobstore/blobstore.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package blobstore - -import ( - "context" - "io" - "net/url" - "os" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/pkg/errors" -) - -// Blobstore provides an interface to an s3 compatible blobstore. -type Blobstore struct { - client *minio.Client - - bucket string -} - -// New returns a new Blobstore. -func New(endpoint, region, bucket, accessKey, secretKey string) (*Blobstore, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, errors.Wrap(err, "failed to parse s3 endpoint") - } - - useSSL := u.Scheme != "http" - client, err := minio.New(u.Host, &minio.Options{ - Region: region, - Creds: credentials.NewStaticV4(accessKey, secretKey, ""), - Secure: useSSL, - }) - if err != nil { - return nil, errors.Wrap(err, "failed to setup s3 client") - } - - return &Blobstore{ - client: client, - bucket: bucket, - }, nil -} - -// Upload stores some data in the blobstore under the given key. -func (bs *Blobstore) Upload(key string, reader io.Reader) error { - size := int64(-1) - if file, ok := reader.(*os.File); ok { - info, err := file.Stat() - if err != nil { - return errors.Wrapf(err, "could not determine file size for object '%s'", key) - } - size = info.Size() - } - - _, err := bs.client.PutObject(context.Background(), bs.bucket, key, reader, size, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - - if err != nil { - return errors.Wrapf(err, "could not store object '%s' into bucket '%s'", key, bs.bucket) - } - return nil -} - -// Download retrieves a blob from the blobstore for reading. -func (bs *Blobstore) Download(key string) (io.ReadCloser, error) { - reader, err := bs.client.GetObject(context.Background(), bs.bucket, key, minio.GetObjectOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "could not download object '%s' from bucket '%s'", key, bs.bucket) - } - return reader, nil -} - -// Delete deletes a blob from the blobstore. -func (bs *Blobstore) Delete(key string) error { - err := bs.client.RemoveObject(context.Background(), bs.bucket, key, minio.RemoveObjectOptions{}) - if err != nil { - return errors.Wrapf(err, "could not delete object '%s' from bucket '%s'", key, bs.bucket) - } - return nil -} diff --git a/pkg/storage/fs/s3ng/option.go b/pkg/storage/fs/s3ng/option.go deleted file mode 100644 index af2d9c7268..0000000000 --- a/pkg/storage/fs/s3ng/option.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package s3ng - -import ( - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" -) - -// Option defines a single option function. -type Option func(o *Options) - -// Options defines the available options for this package. -type Options struct { - - // Endpoint of the s3 blobstore - S3Endpoint string `mapstructure:"s3.endpoint"` - - // Region of the s3 blobstore - S3Region string `mapstructure:"s3.region"` - - // Bucket of the s3 blobstore - S3Bucket string `mapstructure:"s3.bucket"` - - // Access key for the s3 blobstore - S3AccessKey string `mapstructure:"s3.access_key"` - - // Secret key for the s3 blobstore - S3SecretKey string `mapstructure:"s3.secret_key"` -} - -// S3ConfigComplete return true if all required s3 fields are set. -func (o *Options) S3ConfigComplete() bool { - return o.S3Endpoint != "" && - o.S3Region != "" && - o.S3Bucket != "" && - o.S3AccessKey != "" && - o.S3SecretKey != "" -} - -func parseConfig(m map[string]interface{}) (*Options, error) { - o := &Options{} - if err := mapstructure.Decode(m, o); err != nil { - err = errors.Wrap(err, "error decoding conf") - return nil, err - } - return o, nil -} diff --git a/pkg/storage/fs/s3ng/option_test.go b/pkg/storage/fs/s3ng/option_test.go deleted file mode 100644 index 46d1d60642..0000000000 --- a/pkg/storage/fs/s3ng/option_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package s3ng_test - -import ( - "github.com/cs3org/reva/pkg/storage/fs/s3ng" - "github.com/mitchellh/mapstructure" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Options", func() { - var ( - o *s3ng.Options - raw map[string]interface{} - ) - - BeforeEach(func() { - raw := map[string]interface{}{ - "s3.endpoint": "http://1.2.3.4:5000", - "s3.region": "default", - "s3.bucket": "the-bucket", - "s3.access_key": "foo", - "s3.secret_key": "bar", - } - o = &s3ng.Options{} - err := mapstructure.Decode(raw, o) - Expect(err).ToNot(HaveOccurred()) - }) - - It("parses s3 configuration", func() { - Expect(o.S3Endpoint).To(Equal("http://1.2.3.4:5000")) - Expect(o.S3Region).To(Equal("default")) - Expect(o.S3AccessKey).To(Equal("foo")) - Expect(o.S3SecretKey).To(Equal("bar")) - }) - - Describe("S3ConfigComplete", func() { - It("returns true", func() { - Expect(o.S3ConfigComplete()).To(BeTrue()) - }) - - It("returns false", func() { - fields := []string{"s3.endpoint", "s3.region", "s3.bucket", "s3.access_key", "s3.secret_key"} - for _, f := range fields { - delete(raw, f) - o = &s3ng.Options{} - err := mapstructure.Decode(raw, o) - Expect(err).ToNot(HaveOccurred()) - - Expect(o.S3ConfigComplete()).To(BeFalse(), "failed to return false on missing '%s' field", f) - } - }) - }) -}) diff --git a/pkg/storage/fs/s3ng/s3ng.go b/pkg/storage/fs/s3ng/s3ng.go deleted file mode 100644 index bda1bac61e..0000000000 --- a/pkg/storage/fs/s3ng/s3ng.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package s3ng - -import ( - "context" - "fmt" - - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/fs/s3ng/blobstore" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" -) - -func init() { - registry.Register("s3ng", New) -} - -// New returns an implementation to of the storage.FS interface that talk to -// a local filesystem. -func New(ctx context.Context, m map[string]interface{}) (storage.FS, error) { - o, err := parseConfig(m) - if err != nil { - return nil, err - } - - if !o.S3ConfigComplete() { - return nil, fmt.Errorf("s3 configuration incomplete") - } - - bs, err := blobstore.New(o.S3Endpoint, o.S3Region, o.S3Bucket, o.S3AccessKey, o.S3SecretKey) - if err != nil { - return nil, err - } - - return decomposedfs.NewDefault(m, bs) -} diff --git a/pkg/storage/fs/s3ng/s3ng_suite_test.go b/pkg/storage/fs/s3ng/s3ng_suite_test.go deleted file mode 100644 index 00293b99b9..0000000000 --- a/pkg/storage/fs/s3ng/s3ng_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package s3ng_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestS3ng(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "S3ng Suite") -} diff --git a/pkg/storage/fs/s3ng/s3ng_test.go b/pkg/storage/fs/s3ng/s3ng_test.go deleted file mode 100644 index 107b25c0f7..0000000000 --- a/pkg/storage/fs/s3ng/s3ng_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package s3ng_test - -import ( - "context" - "os" - - "github.com/cs3org/reva/pkg/storage/fs/s3ng" - "github.com/cs3org/reva/tests/helpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("S3ng", func() { - var ( - options map[string]interface{} - tmpRoot string - ) - - BeforeEach(func() { - tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - - options = map[string]interface{}{ - "root": tmpRoot, - "enable_home": true, - "share_folder": "/Shares", - "s3.endpoint": "http://1.2.3.4:5000", - "s3.region": "default", - "s3.bucket": "the-bucket", - "s3.access_key": "foo", - "s3.secret_key": "bar", - } - }) - - AfterEach(func() { - if tmpRoot != "" { - os.RemoveAll(tmpRoot) - } - }) - - Describe("New", func() { - It("fails on missing s3 configuration", func() { - _, err := s3ng.New(context.Background(), map[string]interface{}{}) - Expect(err).To(MatchError("s3 configuration incomplete")) - }) - - It("works with complete configuration", func() { - _, err := s3ng.New(context.Background(), options) - Expect(err).ToNot(HaveOccurred()) - }) - }) -}) diff --git a/pkg/storage/migrate/metadata.go b/pkg/storage/migrate/metadata.go deleted file mode 100644 index 361682d4ee..0000000000 --- a/pkg/storage/migrate/metadata.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package migrate - -import ( - "bufio" - "context" - "encoding/json" - "log" - "os" - "path" - "strconv" - "strings" - - gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// metaData representation in the import data. -type metaData struct { - Type string `json:"type"` - Path string `json:"path"` - Etag string `json:"eTag"` - Permissions int `json:"permissions"` - MTime int `json:"mtime"` -} - -// ImportMetadata from a files.jsonl file in exportPath. The files must already be present on the storage -// Will set etag and mtime. -func ImportMetadata(ctx context.Context, client gateway.GatewayAPIClient, exportPath string, ns string) error { - filesJSONL, err := os.Open(path.Join(exportPath, "files.jsonl")) - if err != nil { - return err - } - jsonLines := bufio.NewScanner(filesJSONL) - filesJSONL.Close() - - for jsonLines.Scan() { - var fileData metaData - if err := json.Unmarshal(jsonLines.Bytes(), &fileData); err != nil { - log.Fatal(err) - return err - } - - m := make(map[string]string) - if fileData.Etag != "" { - // TODO sanitize etag? eg double quotes at beginning and end? - m["etag"] = fileData.Etag - } - if fileData.MTime != 0 { - m["mtime"] = strconv.Itoa(fileData.MTime) - } - // TODO permissions? is done via share? but this is owner permissions - - if len(m) > 0 { - resourcePath := path.Join(ns, path.Base(exportPath), strings.TrimPrefix(fileData.Path, "/files/")) - samReq := &storageprovider.SetArbitraryMetadataRequest{ - Ref: &storageprovider.Reference{Path: resourcePath}, - ArbitraryMetadata: &storageprovider.ArbitraryMetadata{ - Metadata: m, - }, - } - samResp, err := client.SetArbitraryMetadata(ctx, samReq) - if err != nil { - log.Fatal(err) - } - - if samResp.Status.Code == rpc.Code_CODE_NOT_FOUND { - log.Print("File does not exist on target system, skipping metadata import: " + resourcePath) - } - if samResp.Status.Code != rpc.Code_CODE_OK { - log.Print("Error importing metadata, skipping metadata import: " + resourcePath + ", " + samResp.Status.Message) - } - } else { - log.Print("no etag or mtime for : " + fileData.Path) - } - } - return nil -} diff --git a/pkg/storage/migrate/shares.go b/pkg/storage/migrate/shares.go deleted file mode 100644 index 4ef7023f9b..0000000000 --- a/pkg/storage/migrate/shares.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package migrate - -import ( - "bufio" - "context" - "encoding/json" - "log" - "os" - "path" - - gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" - user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// share representation in the import metadata. -type share struct { - Path string `json:"path"` - ShareType string `json:"shareType"` - Type string `json:"type"` - Owner string `json:"owner"` - SharedBy string `json:"sharedBy"` - SharedWith string `json:"sharedWith"` - Permissions int `json:"permissions"` - ExpirationDate string `json:"expirationDate"` - Password string `json:"password"` - Name string `json:"name"` - Token string `json:"token"` -} - -// ImportShares from a shares.jsonl file in exportPath. The files must already be present on the storage. -func ImportShares(ctx context.Context, client gateway.GatewayAPIClient, exportPath string, ns string) error { - sharesJSONL, err := os.Open(path.Join(exportPath, "shares.jsonl")) - if err != nil { - return err - } - jsonLines := bufio.NewScanner(sharesJSONL) - sharesJSONL.Close() - - for jsonLines.Scan() { - var shareData share - if err := json.Unmarshal(jsonLines.Bytes(), &shareData); err != nil { - log.Fatal(err) - return err - } - - // Stat file, skip share creation if it does not exist on the target system - resourcePath := path.Join(ns, path.Base(exportPath), shareData.Path) - statReq := &provider.StatRequest{Ref: &provider.Reference{Path: resourcePath}} - statResp, err := client.Stat(ctx, statReq) - - if err != nil { - log.Fatal(err) - } - - if statResp.Status.Code == rpc.Code_CODE_NOT_FOUND { - log.Print("File does not exist on target system, skipping share import: " + resourcePath) - continue - } - - _, err = client.CreateShare(ctx, shareReq(statResp.Info, &shareData)) - if err != nil { - return err - } - } - return nil -} - -func shareReq(info *provider.ResourceInfo, share *share) *collaboration.CreateShareRequest { - return &collaboration.CreateShareRequest{ - ResourceInfo: info, - Grant: &collaboration.ShareGrant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{UserId: &user.UserId{ - OpaqueId: share.SharedWith, - }}, - }, - Permissions: &collaboration.SharePermissions{ - Permissions: convertPermissions(share.Permissions), - }, - }, - } -} - -// Maps oc10 permissions to roles. -var ocPermToRole = map[int]string{ - 1: "viewer", - 15: "co-owner", - 31: "editor", -} - -// Create resource permission-set from ownCloud permissions int. -func convertPermissions(ocPermissions int) *provider.ResourcePermissions { - perms := &provider.ResourcePermissions{} - switch ocPermToRole[ocPermissions] { - case "viewer": - perms.Stat = true - perms.ListContainer = true - perms.InitiateFileDownload = true - perms.ListGrants = true - case "editor": - perms.Stat = true - perms.ListContainer = true - perms.InitiateFileDownload = true - - perms.CreateContainer = true - perms.InitiateFileUpload = true - perms.Delete = true - perms.Move = true - perms.ListGrants = true - case "co-owner": - perms.Stat = true - perms.ListContainer = true - perms.InitiateFileDownload = true - - perms.CreateContainer = true - perms.InitiateFileUpload = true - perms.Delete = true - perms.Move = true - - perms.ListGrants = true - perms.AddGrant = true - perms.RemoveGrant = true - perms.UpdateGrant = true - } - - return perms -} diff --git a/pkg/storage/utils/ace/ace.go b/pkg/storage/utils/ace/ace.go deleted file mode 100644 index 5260aa70e0..0000000000 --- a/pkg/storage/utils/ace/ace.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ace - -import ( - "encoding/csv" - "fmt" - "strconv" - "strings" - - grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// ACE represents an Access Control Entry, mimicing NFSv4 ACLs -// The difference is tht grant ACEs are not propagated down the tree when being set on a dir. -// The tradeoff is that every read has to check the permissions of all path segments up to the root, -// to determine the permissions. But reads can be scaled better than writes, so here we are. -// See https://github.com/cs3org/reva/pull/1170#issuecomment-700526118 for more details. -// -// The following is taken from the nfs4_acl man page, -// see https://linux.die.net/man/5/nfs4_acl: -// the extended attributes will look like this -// "user.oc.grant.:::" -// - *type* will be limited to A for now -// A: Allow - allow *principal* to perform actions requiring *permissions* -// In the future we can use: -// U: aUdit - log any attempted access by principal which requires -// permissions. -// L: aLarm - generate a system alarm at any attempted access by -// principal which requires permissions -// D: for Deny is not recommended -// - *flags* for now empty or g for group, no inheritance yet -// - d directory-inherit - newly-created subdirectories will inherit the -// ACE. -// - f file-inherit - newly-created files will inherit the ACE, minus its -// inheritance flags. Newly-created subdirectories -// will inherit the ACE; if directory-inherit is not -// also specified in the parent ACE, inherit-only will -// be added to the inherited ACE. -// - n no-propagate-inherit - newly-created subdirectories will inherit -// the ACE, minus its inheritance flags. -// - i inherit-only - the ACE is not considered in permissions checks, -// but it is heritable; however, the inherit-only -// flag is stripped from inherited ACEs. -// - *principal* a named user, group or special principal -// - the oidc sub@iss maps nicely to this -// - 'OWNER@', 'GROUP@', and 'EVERYONE@', which are, respectively, analogous to the POSIX user/group/other -// - *permissions* -// - r read-data (files) / list-directory (directories) -// - w write-data (files) / create-file (directories) -// - a append-data (files) / create-subdirectory (directories) -// - x execute (files) / change-directory (directories) -// - d delete - delete the file/directory. Some servers will allow a delete to occur if either this permission is set in the file/directory or if the delete-child permission is set in its parent directory. -// - D delete-child - remove a file or subdirectory from within the given directory (directories only) -// - t read-attributes - read the attributes of the file/directory. -// - T write-attributes - write the attributes of the file/directory. -// - n read-named-attributes - read the named attributes of the file/directory. -// - N write-named-attributes - write the named attributes of the file/directory. -// - c read-ACL - read the file/directory NFSv4 ACL. -// - C write-ACL - write the file/directory NFSv4 ACL. -// - o write-owner - change ownership of the file/directory. -// - y synchronize - allow clients to use synchronous I/O with the server. -// -// TODO implement OWNER@ as "user.oc.grant.A::OWNER@:rwaDxtTnNcCy" -// attribute names are limited to 255 chars by the linux kernel vfs, values to 64 kb -// ext3 extended attributes must fit inside a single filesystem block ... 4096 bytes -// that leaves us with "user.oc.grant.A::someonewithaslightlylongersubject@whateverissuer:rwaDxtTnNcCy" ~80 chars -// 4096/80 = 51 shares ... with luck we might move the actual permissions to the value, saving ~15 chars -// 4096/64 = 64 shares ... still meh ... we can do better by using ints instead of strings for principals -// -// "user.oc.grant.u:100000" is pretty neat, but we can still do better: base64 encode the int -// "user.oc.grant.u:6Jqg" but base64 always has at least 4 chars, maybe hex is better for smaller numbers -// well use 4 chars in addition to the ace: "user.oc.grant.u:////" = 65535 -> 18 chars -// -// 4096/18 = 227 shares -// still ... ext attrs for this are not infinite scale ... -// so .. attach shares via fileid. -// /metadata//shares, similar to /files -// /metadata//shares/u///A:fdi:rwaDxtTnNcCy permissions as filename to keep them in the stat cache? -// -// whatever ... 50 shares is good enough. If more is needed we can delegate to the metadata -// if "user.oc.grant.M" is present look inside the metadata app. -// - if we cannot set an ace we might get an io error. -// in that case convert all shares to metadata and try to set "user.oc.grant.m" -// -// what about metadata like share creator, share time, expiry? -// - creator is same as owner, but can be set -// - share date, or abbreviated st is a unix timestamp -// - expiry is a unix timestamp -// - can be put inside the value -// - we need to reorder the fields: -// "user.oc.grant.:" -> "kv:t=:f=:p=:st=:c=:e=:pw=:n=" -// "user.oc.grant.:" -> "v1::::::::" -// or the first byte determines the format -// 0x00 = key value -// 0x01 = v1 ... -type ACE struct { - // NFSv4 acls - _type string // t - flags string // f - principal string // im key - permissions string // p - - // sharing specific - shareTime int // s - creator string // c - expires int // e - password string // w passWord TODO h = hash - label string // l -} - -// FromGrant creates an ACE from a CS3 grant. -func FromGrant(g *provider.Grant) *ACE { - e := &ACE{ - _type: "A", - permissions: getACEPerm(g.Permissions), - // TODO creator ... - } - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - e.flags = "g" - e.principal = "g:" + g.Grantee.GetGroupId().OpaqueId - } else { - e.principal = "u:" + g.Grantee.GetUserId().OpaqueId - } - return e -} - -// Principal returns the principal of the ACE, eg. `u:` or `g:`. -func (e *ACE) Principal() string { - return e.principal -} - -// Marshal renders a principal and byte[] that can be used to persist the ACE as an extended attribute. -func (e *ACE) Marshal() (string, []byte) { - // first byte will be replaced after converting to byte array - val := fmt.Sprintf("_t=%s:f=%s:p=%s", e._type, e.flags, e.permissions) - b := []byte(val) - b[0] = 0 // indicate key value - return e.principal, b -} - -// Unmarshal parses a principal string and byte[] into an ACE. -func Unmarshal(principal string, v []byte) (e *ACE, err error) { - // first byte indicates type of value - switch v[0] { - case 0: // = ':' separated key=value pairs - s := string(v[1:]) - if e, err = unmarshalKV(s); err == nil { - e.principal = principal - } - // check consistency of Flags and principal type - if strings.Contains(e.flags, "g") { - if principal[:1] != "g" { - return nil, fmt.Errorf("inconsistent ace: expected group") - } - } else { - if principal[:1] != "u" { - return nil, fmt.Errorf("inconsistent ace: expected user") - } - } - default: - return nil, fmt.Errorf("unknown ace encoding") - } - return -} - -// Grant returns a CS3 grant. -func (e *ACE) Grant() *provider.Grant { - g := &provider.Grant{ - Grantee: &provider.Grantee{ - Type: e.granteeType(), - }, - Permissions: e.grantPermissionSet(), - } - id := e.principal[2:] - if e.granteeType() == provider.GranteeType_GRANTEE_TYPE_GROUP { - g.Grantee.Id = &provider.Grantee_GroupId{GroupId: &grouppb.GroupId{OpaqueId: id}} - } else if e.granteeType() == provider.GranteeType_GRANTEE_TYPE_USER { - g.Grantee.Id = &provider.Grantee_UserId{UserId: &userpb.UserId{OpaqueId: id}} - } - return g -} - -// granteeType returns the CS3 grantee type. -func (e *ACE) granteeType() provider.GranteeType { - if strings.Contains(e.flags, "g") { - return provider.GranteeType_GRANTEE_TYPE_GROUP - } - return provider.GranteeType_GRANTEE_TYPE_USER -} - -// grantPermissionSet returns the set of CS3 resource permissions representing the ACE. -func (e *ACE) grantPermissionSet() *provider.ResourcePermissions { - p := &provider.ResourcePermissions{} - // r - if strings.Contains(e.permissions, "r") { - p.Stat = true - p.GetPath = true - p.InitiateFileDownload = true - p.ListContainer = true - } - // w - if strings.Contains(e.permissions, "w") { - p.InitiateFileUpload = true - if p.InitiateFileDownload { - p.Move = true - } - } - // a - if strings.Contains(e.permissions, "a") { - // TODO append data to file permission? - p.CreateContainer = true - } - // x - // if strings.Contains(e.Permissions, "x") { - // TODO execute file permission? - // TODO change directory permission? - // } - // d - if strings.Contains(e.permissions, "d") { - p.Delete = true - } - // D ? - - // sharing - if strings.Contains(e.permissions, "C") { - p.AddGrant = true - p.RemoveGrant = true - p.UpdateGrant = true - } - if strings.Contains(e.permissions, "c") { - p.ListGrants = true - } - - // trash - if strings.Contains(e.permissions, "u") { // u = undelete - p.ListRecycle = true - } - if strings.Contains(e.permissions, "U") { - p.RestoreRecycleItem = true - } - if strings.Contains(e.permissions, "P") { - p.PurgeRecycle = true - } - - // versions - if strings.Contains(e.permissions, "v") { - p.ListFileVersions = true - } - if strings.Contains(e.permissions, "V") { - p.RestoreFileVersion = true - } - - // ? - if strings.Contains(e.permissions, "q") { - p.GetQuota = true - } - // TODO set quota permission? - return p -} - -func unmarshalKV(s string) (*ACE, error) { - e := &ACE{} - r := csv.NewReader(strings.NewReader(s)) - r.Comma = ':' - r.Comment = 0 - r.FieldsPerRecord = -1 - r.LazyQuotes = false - r.TrimLeadingSpace = false - records, err := r.ReadAll() - if err != nil { - return nil, err - } - if len(records) != 1 { - return nil, fmt.Errorf("more than one row of ace kvs") - } - for i := range records[0] { - kv := strings.Split(records[0][i], "=") - switch kv[0] { - case "t": - e._type = kv[1] - case "f": - e.flags = kv[1] - case "p": - e.permissions = kv[1] - case "s": - v, err := strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - e.shareTime = v - case "c": - e.creator = kv[1] - case "e": - v, err := strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - e.expires = v - case "w": - e.password = kv[1] - case "l": - e.label = kv[1] - // TODO default ... log unknown keys? or add as opaque? hm we need that for tagged shares ... - } - } - return e, nil -} - -func getACEPerm(set *provider.ResourcePermissions) string { - var b strings.Builder - - if set.Stat || set.InitiateFileDownload || set.ListContainer || set.GetPath { - b.WriteString("r") - } - if set.InitiateFileUpload || set.Move { - b.WriteString("w") - } - if set.CreateContainer { - b.WriteString("a") - } - if set.Delete { - b.WriteString("d") - } - - // sharing - if set.AddGrant || set.RemoveGrant || set.UpdateGrant { - b.WriteString("C") - } - if set.ListGrants { - b.WriteString("c") - } - - // trash - if set.ListRecycle { - b.WriteString("u") - } - if set.RestoreRecycleItem { - b.WriteString("U") - } - if set.PurgeRecycle { - b.WriteString("P") - } - - // versions - if set.ListFileVersions { - b.WriteString("v") - } - if set.RestoreFileVersion { - b.WriteString("V") - } - - // quota - if set.GetQuota { - b.WriteString("q") - } - // TODO set quota permission? - // TODO GetPath - return b.String() -} diff --git a/pkg/storage/utils/ace/ace_suite_test.go b/pkg/storage/utils/ace/ace_suite_test.go deleted file mode 100644 index e65266ee7d..0000000000 --- a/pkg/storage/utils/ace/ace_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ace_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestAce(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Ace Suite") -} diff --git a/pkg/storage/utils/ace/ace_test.go b/pkg/storage/utils/ace/ace_test.go deleted file mode 100644 index 18fcfa06d3..0000000000 --- a/pkg/storage/utils/ace/ace_test.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package ace_test - -import ( - "fmt" - - grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/storage/utils/ace" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("ACE", func() { - - var ( - userGrant = &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: &userpb.UserId{ - OpaqueId: "foo", - }, - }, - }, - Permissions: &provider.ResourcePermissions{}, - } - - groupGrant = &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_GROUP, - Id: &provider.Grantee_GroupId{ - GroupId: &grouppb.GroupId{ - OpaqueId: "foo", - }, - }, - }, - Permissions: &provider.ResourcePermissions{}, - } - ) - - Describe("FromGrant", func() { - It("creates an ACE from a user grant", func() { - ace := ace.FromGrant(userGrant) - Expect(ace.Principal()).To(Equal("u:foo")) - }) - - It("creates an ACE from a group grant", func() { - ace := ace.FromGrant(groupGrant) - Expect(ace.Principal()).To(Equal("g:foo")) - }) - }) - - Describe("Grant", func() { - It("returns a proper Grant", func() { - ace := ace.FromGrant(userGrant) - grant := ace.Grant() - Expect(grant).To(Equal(userGrant)) - }) - }) - - Describe("marshalling", func() { - It("works", func() { - a := ace.FromGrant(userGrant) - - marshalled, principal := a.Marshal() - unmarshalled, err := ace.Unmarshal(marshalled, principal) - Expect(err).ToNot(HaveOccurred()) - - Expect(unmarshalled).To(Equal(a)) - }) - }) - - Describe("converting permissions", func() { - It("converts r", func() { - userGrant.Permissions.Stat = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.Stat = false - Expect(newGrant.Permissions.Stat).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.ListContainer = true - newGrant = ace.FromGrant(userGrant).Grant() - userGrant.Permissions.ListContainer = false - Expect(newGrant.Permissions.ListContainer).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.InitiateFileDownload = true - newGrant = ace.FromGrant(userGrant).Grant() - userGrant.Permissions.InitiateFileDownload = false - Expect(newGrant.Permissions.InitiateFileDownload).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.GetPath = true - newGrant = ace.FromGrant(userGrant).Grant() - fmt.Println(newGrant.Permissions) - userGrant.Permissions.GetPath = false - Expect(newGrant.Permissions.GetPath).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts w", func() { - userGrant.Permissions.InitiateFileUpload = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.InitiateFileUpload = false - Expect(newGrant.Permissions.InitiateFileUpload).To(BeTrue()) - Expect(newGrant.Permissions.Move).To(BeFalse()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.InitiateFileUpload = true - userGrant.Permissions.InitiateFileDownload = true - newGrant = ace.FromGrant(userGrant).Grant() - userGrant.Permissions.InitiateFileUpload = false - Expect(newGrant.Permissions.InitiateFileUpload).To(BeTrue()) - Expect(newGrant.Permissions.Move).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts a", func() { - userGrant.Permissions.CreateContainer = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.CreateContainer = false - Expect(newGrant.Permissions.CreateContainer).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts d", func() { - userGrant.Permissions.Delete = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.Delete = false - Expect(newGrant.Permissions.Delete).To(BeTrue()) - Expect(newGrant.Permissions.Move).To(BeFalse()) - }) - - It("converts C", func() { - userGrant.Permissions.AddGrant = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.AddGrant = false - Expect(newGrant.Permissions.AddGrant).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.RemoveGrant = true - newGrant = ace.FromGrant(userGrant).Grant() - userGrant.Permissions.RemoveGrant = false - Expect(newGrant.Permissions.RemoveGrant).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - - userGrant.Permissions.UpdateGrant = true - newGrant = ace.FromGrant(userGrant).Grant() - userGrant.Permissions.UpdateGrant = false - Expect(newGrant.Permissions.UpdateGrant).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts c", func() { - userGrant.Permissions.ListGrants = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.ListGrants = false - Expect(newGrant.Permissions.ListGrants).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts u", func() { - userGrant.Permissions.ListRecycle = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.ListRecycle = false - Expect(newGrant.Permissions.ListRecycle).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts U", func() { - userGrant.Permissions.RestoreRecycleItem = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.RestoreRecycleItem = false - Expect(newGrant.Permissions.RestoreRecycleItem).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts P", func() { - userGrant.Permissions.PurgeRecycle = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.PurgeRecycle = false - Expect(newGrant.Permissions.PurgeRecycle).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts v", func() { - userGrant.Permissions.ListFileVersions = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.ListFileVersions = false - Expect(newGrant.Permissions.ListFileVersions).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts V", func() { - userGrant.Permissions.RestoreFileVersion = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.RestoreFileVersion = false - Expect(newGrant.Permissions.RestoreFileVersion).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - - It("converts q", func() { - userGrant.Permissions.GetQuota = true - newGrant := ace.FromGrant(userGrant).Grant() - userGrant.Permissions.GetQuota = false - Expect(newGrant.Permissions.GetQuota).To(BeTrue()) - Expect(newGrant.Permissions.Delete).To(BeFalse()) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go deleted file mode 100644 index b9436c76f1..0000000000 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -// go:generate mockery -name PermissionsChecker -// go:generate mockery -name Tree - -import ( - "context" - "fmt" - "io" - "net/url" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/sharedconf" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/storage/utils/templates" - rtrace "github.com/cs3org/reva/pkg/trace" - "github.com/cs3org/reva/pkg/utils" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// PermissionsChecker defines an interface for checking permissions on a Node. -type PermissionsChecker interface { - AssemblePermissions(ctx context.Context, n *node.Node) (ap provider.ResourcePermissions, err error) - HasPermission(ctx context.Context, n *node.Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) -} - -// Tree is used to manage a tree hierarchy. -type Tree interface { - Setup(owner *userpb.UserId, propagateToRoot bool) error - - GetMD(ctx context.Context, node *node.Node) (os.FileInfo, error) - ListFolder(ctx context.Context, node *node.Node) ([]*node.Node, error) - // CreateHome(owner *userpb.UserId) (n *node.Node, err error) - CreateDir(ctx context.Context, node *node.Node) (err error) - // CreateReference(ctx context.Context, node *node.Node, targetURI *url.URL) error - Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) - Delete(ctx context.Context, node *node.Node) (err error) - RestoreRecycleItemFunc(ctx context.Context, key, trashPath, restorePath string) (*node.Node, *node.Node, func() error, error) // FIXME REFERENCE use ref instead of path - PurgeRecycleItemFunc(ctx context.Context, key, purgePath string) (*node.Node, func() error, error) - - WriteBlob(key string, reader io.Reader) error - ReadBlob(key string) (io.ReadCloser, error) - DeleteBlob(key string) error - - Propagate(ctx context.Context, node *node.Node) (err error) -} - -// Decomposedfs provides the base for decomposed filesystem implementations. -type Decomposedfs struct { - lu *Lookup - tp Tree - o *options.Options - p PermissionsChecker - chunkHandler *chunking.ChunkHandler -} - -// NewDefault returns an instance with default components. -func NewDefault(m map[string]interface{}, bs tree.Blobstore) (storage.FS, error) { - o, err := options.New(m) - if err != nil { - return nil, err - } - - lu := &Lookup{} - p := node.NewPermissions(lu) - - lu.Options = o - - tp := tree.New(o.Root, o.TreeTimeAccounting, o.TreeSizeAccounting, lu, bs) - - o.GatewayAddr = sharedconf.GetGatewaySVC(o.GatewayAddr) - return New(o, lu, p, tp) -} - -// when enable home is false we want propagation to root if tree size or mtime accounting is enabled. -func enablePropagationForRoot(o *options.Options) bool { - return (!o.EnableHome && (o.TreeSizeAccounting || o.TreeTimeAccounting)) -} - -// New returns an implementation of the storage.FS interface that talks to -// a local filesystem. -func New(o *options.Options, lu *Lookup, p PermissionsChecker, tp Tree) (storage.FS, error) { - err := tp.Setup(&userpb.UserId{ - OpaqueId: o.Owner, - Idp: o.OwnerIDP, - Type: userpb.UserType(userpb.UserType_value[o.OwnerType]), - }, enablePropagationForRoot(o)) - if err != nil { - logger.New().Error().Err(err). - Msg("could not setup tree") - return nil, errors.Wrap(err, "could not setup tree") - } - - return &Decomposedfs{ - tp: tp, - lu: lu, - o: o, - p: p, - chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), - }, nil -} - -// Shutdown shuts down the storage. -func (fs *Decomposedfs) Shutdown(ctx context.Context) error { - return nil -} - -// GetQuota returns the quota available -// TODO Document in the cs3 should we return quota or free space? -func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, err error) { - var n *node.Node - if ref != nil { - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return 0, 0, err - } - } else { - if n, err = fs.lu.HomeOrRootNode(ctx); err != nil { - return 0, 0, err - } - } - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return 0, 0, err - } - - rp, err := fs.p.AssemblePermissions(ctx, n) - switch { - case err != nil: - return 0, 0, errtypes.InternalError(err.Error()) - case !rp.GetQuota: - return 0, 0, errtypes.PermissionDenied(n.ID) - } - - ri, err := n.AsResourceInfo(ctx, &rp, []string{"treesize", "quota"}, true) - if err != nil { - return 0, 0, err - } - - quotaStr := node.QuotaUnknown - if ri.Opaque != nil && ri.Opaque.Map != nil && ri.Opaque.Map["quota"] != nil && ri.Opaque.Map["quota"].Decoder == "plain" { - quotaStr = string(ri.Opaque.Map["quota"].Value) - } - - avail, err := node.GetAvailableSize(n.InternalPath()) - if err != nil { - return 0, 0, err - } - total = avail + ri.Size - - switch { - case quotaStr == node.QuotaUncalculated, quotaStr == node.QuotaUnknown, quotaStr == node.QuotaUnlimited: - // best we can do is return current total - // TODO indicate unlimited total? -> in opaque data? - default: - if quota, err := strconv.ParseUint(quotaStr, 10, 64); err == nil { - if total > quota { - total = quota - } - } - } - - return total, ri.Size, nil -} - -// CreateHome creates a new home node for the given user. -func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled") - } - - var n, h *node.Node - if n, err = fs.lu.RootNode(ctx); err != nil { - return - } - h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), false, func(ctx context.Context, n *node.Node) error { - if !n.Exists { - if err := fs.tp.CreateDir(ctx, n); err != nil { - return err - } - } - return nil - }) - if err != nil { - return - } - - // update the owner - u := ctxpkg.ContextMustGetUser(ctx) - if err = h.WriteMetadata(u.Id); err != nil { - return - } - - if fs.o.TreeTimeAccounting || fs.o.TreeSizeAccounting { - homePath := h.InternalPath() - // mark the home node as the end of propagation - if err = xattr.Set(homePath, xattrs.PropagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") - return - } - } - - if err := h.SetMetadata(xattrs.SpaceNameAttr, u.DisplayName); err != nil { - return err - } - - // add storage space - if err := fs.createStorageSpace(ctx, "personal", h.ID); err != nil { - return err - } - - return -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isAlreadyExists(err error) bool { - if xerr, ok := err.(*os.LinkError); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.EEXIST - } - } - return false -} - -// GetHome is called to look up the home path for a user -// It is NOT supposed to return the internal path but the external path. -func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) { - if !fs.o.EnableHome || fs.o.UserLayout == "" { - return "", errtypes.NotSupported("Decomposedfs: GetHome() home supported disabled") - } - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.o.UserLayout) - return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace? -} - -// GetPathByID returns the fn pointed by the file id, without the internal namespace. -func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - node, err := fs.lu.NodeFromID(ctx, id) - if err != nil { - return "", err - } - - return fs.lu.Path(ctx, node) -} - -// CreateDir creates the specified directory. -func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) { - name := path.Base(ref.Path) - if name == "" || name == "." || name == "/" { - return errtypes.BadRequest("Invalid path") - } - ref.Path = path.Dir(ref.Path) - var n *node.Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if n, err = n.Child(ctx, name); err != nil { - return - } - - if n.Exists { - return errtypes.AlreadyExists(ref.Path) - } - pn, err := n.Parent() - if err != nil { - return errors.Wrap(err, "decomposedfs: error getting parent "+n.ParentID) - } - ok, err := fs.p.HasPermission(ctx, pn, func(rp *provider.ResourcePermissions) bool { - return rp.CreateContainer - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - err = fs.tp.CreateDir(ctx, n) - - if fs.o.TreeTimeAccounting || fs.o.TreeSizeAccounting { - nodePath := n.InternalPath() - // mark the home node as the end of propagation - if err = xattr.Set(nodePath, xattrs.PropagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return - } - } - return -} - -// TouchFile as defined in the storage.FS interface. -func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference) error { - return fmt.Errorf("unimplemented: TouchFile") -} - -// CreateReference creates a reference as a node folder with the target stored in extended attributes -// There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible without the storage provider. -// In effect everything is a shadow namespace. -// To mimic the eos end owncloud driver we only allow references as children of the "/Shares" folder -// TODO when home support is enabled should the "/Shares" folder still be listed? -func (fs *Decomposedfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) { - p = strings.Trim(p, "/") - parts := strings.Split(p, "/") - - if len(parts) != 2 { - return errtypes.PermissionDenied("Decomposedfs: references must be a child of the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - if parts[0] != strings.Trim(fs.o.ShareFolder, "/") { - return errtypes.PermissionDenied("Decomposedfs: cannot create references outside the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) - } - - // create Shares folder if it does not exist - var n *node.Node - if n, err = fs.lu.NodeFromPath(ctx, fs.o.ShareFolder, false); err != nil { - return errtypes.InternalError(err.Error()) - } else if !n.Exists { - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - } - - if n, err = n.Child(ctx, parts[1]); err != nil { - return errtypes.InternalError(err.Error()) - } - - if n.Exists { - // TODO append increasing number to mountpoint name - return errtypes.AlreadyExists(p) - } - - if err = fs.tp.CreateDir(ctx, n); err != nil { - return - } - - internal := n.InternalPath() - if err = xattr.Set(internal, xattrs.ReferenceAttr, []byte(targetURI.String())); err != nil { - return errors.Wrapf(err, "Decomposedfs: error setting the target %s on the reference file %s", targetURI.String(), internal) - } - return nil -} - -// Move moves a resource from one reference to another. -func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldNode, newNode *node.Node - if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil { - return - } - - if !oldNode.Exists { - err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, oldNode, func(rp *provider.ResourcePermissions) bool { - return rp.Move - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(oldNode.ID) - } - - if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil { - return - } - if newNode.Exists { - err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name)) - return - } - - return fs.tp.Move(ctx, oldNode, newNode) -} - -// GetMD returns the metadata for the specified resource. -func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (ri *provider.ResourceInfo, err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, node) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.Stat: - return nil, errtypes.PermissionDenied(node.ID) - } - - return node.AsResourceInfo(ctx, &rp, mdKeys, utils.IsRelativeReference(ref)) -} - -// ListFolder returns a list of resources in the specified folder. -func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) (finfos []*provider.ResourceInfo, err error) { - var n *node.Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - - ctx, span := rtrace.Provider.Tracer("decomposedfs").Start(ctx, "ListFolder") - defer span.End() - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return - } - - rp, err := fs.p.AssemblePermissions(ctx, n) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !rp.ListContainer: - return nil, errtypes.PermissionDenied(n.ID) - } - - var children []*node.Node - children, err = fs.tp.ListFolder(ctx, n) - if err != nil { - return - } - - for i := range children { - np := rp - // add this childs permissions - pset := n.PermissionSet(ctx) - node.AddPermissions(&np, &pset) - if ri, err := children[i].AsResourceInfo(ctx, &np, mdKeys, utils.IsRelativeReference(ref)); err == nil { - finfos = append(finfos, ri) - } - } - return -} - -// Delete deletes the specified resource. -func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.Delete - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - return fs.tp.Delete(ctx, node) -} - -// Download returns a reader to the specified resource. -func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - node, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: error resolving ref") - } - - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return nil, err - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileDownload - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - reader, err := fs.tp.ReadBlob(node.BlobID) - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: error download blob '"+node.ID+"'") - } - return reader, nil -} - -// GetLock returns an existing lock on the given reference. -func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) { - return nil, errtypes.NotSupported("unimplemented") -} - -// SetLock puts a lock on the given reference. -func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// RefreshLock refreshes an existing lock on the given reference. -func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error { - return errtypes.NotSupported("unimplemented") -} - -// Unlock removes an existing lock from the given reference. -func (fs *Decomposedfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go deleted file mode 100644 index ccb92d145f..0000000000 --- a/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - "context" - "os" - "path" - "sync" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" - treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" - "github.com/cs3org/reva/tests/helpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Decomposed", func() { - var ( - options map[string]interface{} - ctx context.Context - tmpRoot string - fs storage.FS - ) - - BeforeEach(func() { - tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - - options = map[string]interface{}{ - "root": tmpRoot, - "share_folder": "/Shares", - "enable_home": false, - "user_layout": "{{.Id.OpaqueId}}", - "owner": "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", - } - u := &userpb.User{ - Id: &userpb.UserId{ - OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", - }, - Username: "test", - Mail: "marie@example.org", - DisplayName: "Marie Curie", - Groups: []string{ - "radium-lovers", - "polonium-lovers", - "physics-lovers", - }, - } - ctx = ctxpkg.ContextSetUser(context.Background(), u) - - bs := &treemocks.Blobstore{} - fs, err = decomposedfs.NewDefault(options, bs) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if tmpRoot != "" { - os.RemoveAll(tmpRoot) - } - }) - - Describe("concurrent", func() { - Describe("Upload", func() { - var ( - r1 = []byte("test") - r2 = []byte("another run") - ) - - PIt("generates two revisions", func() { - // runtime.GOMAXPROCS(1) // uncomment to remove concurrency and see revisions working. - wg := &sync.WaitGroup{} - wg.Add(2) - - // upload file with contents: "test" - go func(wg *sync.WaitGroup) { - _ = helpers.Upload(ctx, fs, &provider.Reference{Path: "uploaded.txt"}, r1) - wg.Done() - }(wg) - - // upload file with contents: "another run" - go func(wg *sync.WaitGroup) { - _ = helpers.Upload(ctx, fs, &provider.Reference{Path: "uploaded.txt"}, r2) - wg.Done() - }(wg) - - // this test, by the way the oCIS storage is implemented, is non-deterministic, and the contents - // of uploaded.txt will change on each run depending on which of the 2 routines above makes it - // first into the scheduler. In order to make it deterministic, we have to consider the Upload impl- - // ementation and we can leverage concurrency and add locks only when the destination path are the - // same for 2 uploads. - - wg.Wait() - revisions, err := fs.ListRevisions(ctx, &provider.Reference{Path: "uploaded.txt"}) - Expect(err).ToNot(HaveOccurred()) - Expect(len(revisions)).To(Equal(1)) - - _, err = os.ReadFile(path.Join(tmpRoot, "nodes", "root", "uploaded.txt")) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Describe("CreateDir", func() { - It("handle already existing directories", func() { - for i := 0; i < 10; i++ { - go func() { - defer GinkgoRecover() - err := fs.CreateDir(ctx, &provider.Reference{Path: "/fightforit"}) - if err != nil { - rinfo, err := fs.GetMD(ctx, &provider.Reference{Path: "/fightforit"}, nil) - Expect(err).ToNot(HaveOccurred()) - Expect(rinfo).ToNot(BeNil()) - } - }() - } - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go deleted file mode 100644 index 17172c441c..0000000000 --- a/pkg/storage/utils/decomposedfs/decomposedfs_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestDecomposed(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Decomposed Suite") -} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_test.go deleted file mode 100644 index f9be4a88be..0000000000 --- a/pkg/storage/utils/decomposedfs/decomposedfs_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" - helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" - treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stretchr/testify/mock" -) - -var _ = Describe("Decomposed", func() { - var ( - env *helpers.TestEnv - - ref *provider.Reference - ) - - BeforeEach(func() { - ref = &provider.Reference{Path: "/dir1"} - }) - - JustBeforeEach(func() { - var err error - env, err = helpers.NewTestEnv() - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if env != nil { - env.Cleanup() - } - }) - - Describe("NewDefault", func() { - It("works", func() { - bs := &treemocks.Blobstore{} - _, err := decomposedfs.NewDefault(map[string]interface{}{ - "root": env.Root, - }, bs) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Describe("Delete", func() { - Context("with insufficient permissions", func() { - It("returns an error", func() { - env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - - err := env.Fs.Delete(env.Ctx, ref) - - Expect(err).To(MatchError(ContainSubstring("permission denied"))) - }) - }) - - Context("with sufficient permissions", func() { - JustBeforeEach(func() { - env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - }) - - It("does not (yet) delete the blob from the blobstore", func() { - err := env.Fs.Delete(env.Ctx, ref) - - Expect(err).ToNot(HaveOccurred()) - env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go deleted file mode 100644 index bf61e3b6d9..0000000000 --- a/pkg/storage/utils/decomposedfs/grants.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "path/filepath" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/pkg/xattr" -) - -// SpaceGrant is the key used to signal not to create a new space when a grant is assigned to a storage space. -var SpaceGrant struct{} - -// DenyGrant denies access to a resource. -func (fs *Decomposedfs) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - return errtypes.NotSupported("decomposedfs: not supported") -} - -// AddGrant adds a grant to a resource. -func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()") - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 - return rp.AddGrant || rp.UpdateGrant - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - np := fs.lu.InternalPath(node.ID) - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(np, xattrs.GrantPrefix+principal, value); err != nil { - return err - } - - // when a grant is added to a space, do not add a new space under "shares" - if spaceGrant := ctx.Value(SpaceGrant); spaceGrant == nil { - err := fs.createStorageSpace(ctx, "share", node.ID) - if err != nil { - return err - } - } - - return fs.tp.Propagate(ctx, node) -} - -// ListGrants lists the grants on the specified resource. -func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.ListGrants - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - log := appctx.GetLogger(ctx) - np := fs.lu.InternalPath(node.ID) - var attrs []string - if attrs, err = xattr.List(np); err != nil { - log.Error().Err(err).Msg("error listing attributes") - return nil, err - } - - log.Debug().Interface("attrs", attrs).Msg("read attributes") - - aces := extractACEsFromAttrs(ctx, np, attrs) - - grants = make([]*provider.Grant, 0, len(aces)) - for i := range aces { - grants = append(grants, aces[i].Grant()) - } - - return grants, nil -} - -// RemoveGrant removes a grant from resource. -func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - var node *node.Node - if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { - return rp.RemoveGrant - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) - } - - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = xattrs.GrantPrefix + xattrs.GroupAcePrefix + g.Grantee.GetGroupId().OpaqueId - } else { - attr = xattrs.GrantPrefix + xattrs.UserAcePrefix + g.Grantee.GetUserId().OpaqueId - } - - np := fs.lu.InternalPath(node.ID) - if err = xattr.Remove(np, attr); err != nil { - return - } - - return fs.tp.Propagate(ctx, node) -} - -// UpdateGrant updates a grant on a resource. -func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 - return fs.AddGrant(ctx, ref, g) -} - -// extractACEsFromAttrs reads ACEs in the list of attrs from the node. -func extractACEsFromAttrs(ctx context.Context, fsfn string, attrs []string) (entries []*ace.ACE) { - log := appctx.GetLogger(ctx) - entries = []*ace.ACE{} - for i := range attrs { - if strings.HasPrefix(attrs[i], xattrs.GrantPrefix) { - var value []byte - var err error - if value, err = xattr.Get(fsfn, attrs[i]); err != nil { - log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") - continue - } - var e *ace.ACE - principal := attrs[i][len(xattrs.GrantPrefix):] - if e, err = ace.Unmarshal(principal, value); err != nil { - log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") - continue - } - entries = append(entries, e) - } - } - return -} diff --git a/pkg/storage/utils/decomposedfs/grants_test.go b/pkg/storage/utils/decomposedfs/grants_test.go deleted file mode 100644 index ace54e7730..0000000000 --- a/pkg/storage/utils/decomposedfs/grants_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - "io/fs" - "os" - "path" - "path/filepath" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" -) - -type testFS struct { - root string -} - -func (t testFS) Open(name string) (fs.File, error) { - return os.Open(filepath.Join(t.root, name)) -} - -var _ = Describe("Grants", func() { - var ( - env *helpers.TestEnv - ref *provider.Reference - grant *provider.Grant - tfs = &testFS{} - ) - - BeforeEach(func() { - ref = &provider.Reference{Path: "/dir1"} - - grant = &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: &userpb.UserId{ - OpaqueId: "4c510ada-c86b-4815-8820-42cdf82c3d51", - }, - }, - }, - Permissions: &provider.ResourcePermissions{ - Stat: true, - Move: true, - Delete: false, - }, - } - }) - - JustBeforeEach(func() { - var err error - env, err = helpers.NewTestEnv() - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if env != nil { - env.Cleanup() - } - }) - - Context("with insufficient permissions", func() { - JustBeforeEach(func() { - env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - }) - - Describe("AddGrant", func() { - It("adds grants", func() { - err := env.Fs.AddGrant(env.Ctx, ref, grant) - Expect(err).To(MatchError(ContainSubstring("permission denied"))) - }) - }) - }) - - Context("with sufficient permissions", func() { - JustBeforeEach(func() { - env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - }) - - Describe("AddGrant", func() { - It("adds grants", func() { - n, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1", false) - Expect(err).ToNot(HaveOccurred()) - - err = env.Fs.AddGrant(env.Ctx, ref, grant) - Expect(err).ToNot(HaveOccurred()) - - localPath := path.Join(env.Root, "nodes", n.ID) - attr, err := xattr.Get(localPath, xattrs.GrantPrefix+xattrs.UserAcePrefix+grant.Grantee.GetUserId().OpaqueId) - Expect(err).ToNot(HaveOccurred()) - Expect(string(attr)).To(Equal("\x00t=A:f=:p=rw")) - }) - - It("creates a storage space per created grant", func() { - err := env.Fs.AddGrant(env.Ctx, ref, grant) - Expect(err).ToNot(HaveOccurred()) - - spacesPath := filepath.Join(env.Root, "spaces") - tfs.root = spacesPath - entries, err := fs.ReadDir(tfs, "share") - Expect(err).ToNot(HaveOccurred()) - Expect(len(entries)).To(BeNumerically(">=", 1)) - }) - }) - - Describe("ListGrants", func() { - It("lists existing grants", func() { - err := env.Fs.AddGrant(env.Ctx, ref, grant) - Expect(err).ToNot(HaveOccurred()) - - grants, err := env.Fs.ListGrants(env.Ctx, ref) - Expect(err).ToNot(HaveOccurred()) - Expect(len(grants)).To(Equal(1)) - - g := grants[0] - Expect(g.Grantee.GetUserId().OpaqueId).To(Equal(grant.Grantee.GetUserId().OpaqueId)) - Expect(g.Permissions.Stat).To(BeTrue()) - Expect(g.Permissions.Move).To(BeTrue()) - Expect(g.Permissions.Delete).To(BeFalse()) - }) - }) - - Describe("RemoveGrants", func() { - It("removes the grant", func() { - err := env.Fs.AddGrant(env.Ctx, ref, grant) - Expect(err).ToNot(HaveOccurred()) - - grants, err := env.Fs.ListGrants(env.Ctx, ref) - Expect(err).ToNot(HaveOccurred()) - Expect(len(grants)).To(Equal(1)) - - err = env.Fs.RemoveGrant(env.Ctx, ref, grant) - Expect(err).ToNot(HaveOccurred()) - - grants, err = env.Fs.ListGrants(env.Ctx, ref) - Expect(err).ToNot(HaveOccurred()) - Expect(len(grants)).To(Equal(0)) - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/lookup.go b/pkg/storage/utils/decomposedfs/lookup.go deleted file mode 100644 index acb7c7fa92..0000000000 --- a/pkg/storage/utils/decomposedfs/lookup.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/pkg/xattr" -) - -// Lookup implements transformations from filepath to node and back. -type Lookup struct { - Options *options.Options -} - -// NodeFromResource takes in a request path or request id and converts it to a Node. -func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) { - if ref.ResourceId != nil { - // check if a storage space reference is used - // currently, the decomposed fs uses the root node id as the space id - n, err := lu.NodeFromID(ctx, ref.ResourceId) - if err != nil { - return nil, err - } - // is this a relative reference? - if ref.Path != "" { - p := filepath.Clean(ref.Path) - if p != "." { - // walk the relative path - n, err = lu.WalkPath(ctx, n, p, false, func(ctx context.Context, n *node.Node) error { - return nil - }) - if err != nil { - return nil, err - } - } - } - return n, nil - } - - if ref.Path != "" { - return lu.NodeFromPath(ctx, ref.GetPath(), false) - } - - // reference is invalid - return nil, fmt.Errorf("invalid reference %+v. at least resource_id or path must be set", ref) -} - -// NodeFromPath converts a filename into a Node. -func (lu *Lookup) NodeFromPath(ctx context.Context, fn string, followReferences bool) (*node.Node, error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("fn", fn).Msg("NodeFromPath()") - - root, err := lu.HomeOrRootNode(ctx) - if err != nil { - return nil, err - } - - n := root - // TODO collect permissions of the current user on every segment - fn = filepath.Clean(fn) - if fn != "/" && fn != "." { - n, err = lu.WalkPath(ctx, n, fn, followReferences, func(ctx context.Context, n *node.Node) error { - log.Debug().Interface("node", n).Msg("NodeFromPath() walk") - if n.SpaceRoot != nil && n.SpaceRoot != root { - root = n.SpaceRoot - } - return nil - }) - if err != nil { - return nil, err - } - } - n.SpaceRoot = root - return n, nil -} - -// NodeFromID returns the internal path for the id. -func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) { - if id == nil || id.OpaqueId == "" { - return nil, fmt.Errorf("invalid resource id %+v", id) - } - n, err = node.ReadNode(ctx, lu, id.OpaqueId) - if err != nil { - return nil, err - } - - return n, n.FindStorageSpaceRoot() -} - -// Path returns the path for node. -func (lu *Lookup) Path(ctx context.Context, n *node.Node) (p string, err error) { - var root *node.Node - if root, err = lu.HomeOrRootNode(ctx); err != nil { - return - } - for n.ID != root.ID { - p = filepath.Join(n.Name, p) - if n, err = n.Parent(); err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("path", p). - Interface("node", n). - Msg("Path()") - return - } - } - p = filepath.Join("/", p) - return -} - -// RootNode returns the root node of the storage. -func (lu *Lookup) RootNode(ctx context.Context) (*node.Node, error) { - n := node.New("root", "", "", 0, "", nil, lu) - n.Exists = true - return n, nil -} - -// HomeNode returns the home node of a user. -func (lu *Lookup) HomeNode(ctx context.Context) (node *node.Node, err error) { - if !lu.Options.EnableHome { - return nil, errtypes.NotSupported("Decomposedfs: home supported disabled") - } - - if node, err = lu.RootNode(ctx); err != nil { - return - } - node, err = lu.WalkPath(ctx, node, lu.mustGetUserLayout(ctx), false, nil) - return -} - -// WalkPath calls n.Child(segment) on every path segment in p starting at the node r. -// If a function f is given it will be executed for every segment node, but not the root node r. -// If followReferences is given the current visited reference node is replaced by the referenced node. -func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followReferences bool, f func(ctx context.Context, n *node.Node) error) (*node.Node, error) { - segments := strings.Split(strings.Trim(p, "/"), "/") - var err error - for i := range segments { - if r, err = r.Child(ctx, segments[i]); err != nil { - return r, err - } - - if followReferences { - if attrBytes, err := xattr.Get(r.InternalPath(), xattrs.ReferenceAttr); err == nil { - realNodeID := attrBytes - ref, err := xattrs.ReferenceFromAttr(realNodeID) - if err != nil { - return nil, err - } - - r, err = lu.NodeFromID(ctx, ref.ResourceId) - if err != nil { - return nil, err - } - } - } - if node.IsSpaceRoot(r) { - r.SpaceRoot = r - } - - if !r.Exists && i < len(segments)-1 { - return r, errtypes.NotFound(segments[i]) - } - if f != nil { - if err = f(ctx, r); err != nil { - return r, err - } - } - } - return r, nil -} - -// HomeOrRootNode returns the users home node when home support is enabled. -// it returns the storages root node otherwise. -func (lu *Lookup) HomeOrRootNode(ctx context.Context) (node *node.Node, err error) { - if lu.Options.EnableHome { - return lu.HomeNode(ctx) - } - return lu.RootNode(ctx) -} - -// InternalRoot returns the internal storage root directory. -func (lu *Lookup) InternalRoot() string { - return lu.Options.Root -} - -// InternalPath returns the internal path for a given ID. -func (lu *Lookup) InternalPath(id string) string { - return filepath.Join(lu.Options.Root, "nodes", id) -} - -func (lu *Lookup) mustGetUserLayout(ctx context.Context) string { - u := ctxpkg.ContextMustGetUser(ctx) - return templates.WithUser(u, lu.Options.UserLayout) -} - -// ShareFolder returns the internal storage root directory. -func (lu *Lookup) ShareFolder() string { - return lu.Options.ShareFolder -} diff --git a/pkg/storage/utils/decomposedfs/lookup_test.go b/pkg/storage/utils/decomposedfs/lookup_test.go deleted file mode 100644 index 1c9df693ce..0000000000 --- a/pkg/storage/utils/decomposedfs/lookup_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Lookup", func() { - var ( - env *helpers.TestEnv - ) - - JustBeforeEach(func() { - var err error - env, err = helpers.NewTestEnv() - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if env != nil { - env.Cleanup() - } - }) - - Describe("Node from path", func() { - It("returns the path including a leading slash", func() { - n, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - - path, err := env.Lookup.Path(env.Ctx, n) - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("/dir1/file1")) - }) - }) - - Describe("Node From Resource only by path", func() { - It("returns the path including a leading slash and the space root is set", func() { - n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{Path: "/dir1/subdir1/file2"}) - Expect(err).ToNot(HaveOccurred()) - - path, err := env.Lookup.Path(env.Ctx, n) - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("/dir1/subdir1/file2")) - Expect(n.SpaceRoot.Name).To(Equal("userid")) - Expect(n.SpaceRoot.ParentID).To(Equal("root")) - }) - }) - - Describe("Node From Resource only by id", func() { - It("returns the path including a leading slash and the space root is set", func() { - // do a node lookup by path - nRef, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - - // try to find the same node by id - n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ResourceId: &provider.ResourceId{OpaqueId: nRef.ID}}) - Expect(err).ToNot(HaveOccurred()) - - // Check if we got the right node and spaceRoot - path, err := env.Lookup.Path(env.Ctx, n) - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("/dir1/file1")) - Expect(n.SpaceRoot.Name).To(Equal("userid")) - Expect(n.SpaceRoot.ParentID).To(Equal("root")) - }) - }) - - Describe("Node From Resource by id and relative path", func() { - It("returns the path including a leading slash and the space root is set", func() { - // do a node lookup by path for the parent - nRef, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1", false) - Expect(err).ToNot(HaveOccurred()) - - // try to find the child node by parent id and relative path - n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ResourceId: &provider.ResourceId{OpaqueId: nRef.ID}, Path: "./file1"}) - Expect(err).ToNot(HaveOccurred()) - - // Check if we got the right node and spaceRoot - path, err := env.Lookup.Path(env.Ctx, n) - Expect(err).ToNot(HaveOccurred()) - Expect(path).To(Equal("/dir1/file1")) - Expect(n.SpaceRoot.Name).To(Equal("userid")) - Expect(n.SpaceRoot.ParentID).To(Equal("root")) - }) - }) - - Describe("Reference Parsing", func() { - It("parses a valid cs3 reference", func() { - in := []byte("cs3:bede11a0-ea3d-11eb-a78b-bf907adce8ed/c402d01c-ea3d-11eb-a0fc-c32f9d32528f") - ref, err := xattrs.ReferenceFromAttr(in) - - Expect(err).ToNot(HaveOccurred()) - Expect(ref.ResourceId.StorageId).To(Equal("bede11a0-ea3d-11eb-a78b-bf907adce8ed")) - Expect(ref.ResourceId.OpaqueId).To(Equal("c402d01c-ea3d-11eb-a0fc-c32f9d32528f")) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go deleted file mode 100644 index be5d52a92d..0000000000 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "fmt" - "path/filepath" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/utils" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// SetArbitraryMetadata sets the metadata on a resource. -func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return errors.Wrap(err, "decomposedfs: error resolving ref") - } - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO add explicit SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 - return rp.InitiateFileUpload - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - nodePath := n.InternalPath() - - errs := []error{} - // TODO should we really continue updating when an error occurs? - if md.Metadata != nil { - if val, ok := md.Metadata["mtime"]; ok { - delete(md.Metadata, "mtime") - err := n.SetMtime(ctx, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set mtime")) - } - } - // TODO(jfd) special handling for atime? - // TODO(jfd) allow setting birth time (btime)? - // TODO(jfd) any other metadata that is interesting? fileid? - // TODO unset when file is updated - // TODO unset when folder is updated or add timestamp to etag? - if val, ok := md.Metadata["etag"]; ok { - delete(md.Metadata, "etag") - err := n.SetEtag(ctx, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set etag")) - } - } - if val, ok := md.Metadata[node.FavoriteKey]; ok { - delete(md.Metadata, node.FavoriteKey) - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - if uid := u.GetId(); uid != nil { - if err := n.SetFavorite(uid, val); err != nil { - sublog.Error().Err(err). - Interface("user", u). - Msg("could not set favorite flag") - errs = append(errs, errors.Wrap(err, "could not set favorite flag")) - } - } else { - sublog.Error().Interface("user", u).Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - sublog.Error().Interface("user", u).Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - } - } - for k, v := range md.Metadata { - attrName := xattrs.MetadataPrefix + k - if err = xattr.Set(nodePath, attrName, []byte(v)); err != nil { - errs = append(errs, errors.Wrap(err, "decomposedfs: could not set metadata attribute "+attrName+" to "+k)) - } - } - - switch len(errs) { - case 0: - return fs.tp.Propagate(ctx, n) - case 1: - // TODO Propagate if anything changed - return errs[0] - default: - // TODO Propagate if anything changed - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -// UnsetArbitraryMetadata unsets the metadata on the given resource. -func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return errors.Wrap(err, "decomposedfs: error resolving ref") - } - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO use SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 - return rp.InitiateFileUpload - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - nodePath := n.InternalPath() - errs := []error{} - for _, k := range keys { - switch k { - case node.FavoriteKey: - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(nodePath, fa); err != nil { - sublog.Error().Err(err). - Interface("user", u). - Str("key", fa). - Msg("could not unset favorite flag") - errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) - } - } else { - sublog.Error(). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - sublog.Error(). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - default: - if err = xattr.Remove(nodePath, xattrs.MetadataPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - sublog.Error().Err(err). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) - } - } - } - } - switch len(errs) { - case 0: - return fs.tp.Propagate(ctx, n) - case 1: - // TODO Propagate if anything changed - return errs[0] - default: - // TODO Propagate if anything changed - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} diff --git a/pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go b/pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go deleted file mode 100644 index 54e078220b..0000000000 --- a/pkg/storage/utils/decomposedfs/mocks/PermissionsChecker.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - node "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - mock "github.com/stretchr/testify/mock" - - providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// PermissionsChecker is an autogenerated mock type for the PermissionsChecker type -type PermissionsChecker struct { - mock.Mock -} - -// AssemblePermissions provides a mock function with given fields: ctx, n -func (_m *PermissionsChecker) AssemblePermissions(ctx context.Context, n *node.Node) (providerv1beta1.ResourcePermissions, error) { - ret := _m.Called(ctx, n) - - var r0 providerv1beta1.ResourcePermissions - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) providerv1beta1.ResourcePermissions); ok { - r0 = rf(ctx, n) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(providerv1beta1.ResourcePermissions) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *node.Node) error); ok { - r1 = rf(ctx, n) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasPermission provides a mock function with given fields: ctx, n, check -func (_m *PermissionsChecker) HasPermission(ctx context.Context, n *node.Node, check func(*providerv1beta1.ResourcePermissions) bool) (bool, error) { - ret := _m.Called(ctx, n, check) - - var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, *node.Node, func(*providerv1beta1.ResourcePermissions) bool) bool); ok { - r0 = rf(ctx, n, check) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *node.Node, func(*providerv1beta1.ResourcePermissions) bool) error); ok { - r1 = rf(ctx, n, check) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/pkg/storage/utils/decomposedfs/mocks/Tree.go b/pkg/storage/utils/decomposedfs/mocks/Tree.go deleted file mode 100644 index 51acf3190a..0000000000 --- a/pkg/storage/utils/decomposedfs/mocks/Tree.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - io "io" - - mock "github.com/stretchr/testify/mock" - - node "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - - os "os" - - providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// Tree is an autogenerated mock type for the Tree type -type Tree struct { - mock.Mock -} - -// CreateDir provides a mock function with given fields: ctx, _a1 -func (_m *Tree) CreateDir(ctx context.Context, _a1 *node.Node) error { - ret := _m.Called(ctx, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) error); ok { - r0 = rf(ctx, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Delete provides a mock function with given fields: ctx, _a1 -func (_m *Tree) Delete(ctx context.Context, _a1 *node.Node) error { - ret := _m.Called(ctx, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) error); ok { - r0 = rf(ctx, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteBlob provides a mock function with given fields: key -func (_m *Tree) DeleteBlob(key string) error { - ret := _m.Called(key) - - var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetMD provides a mock function with given fields: ctx, _a1 -func (_m *Tree) GetMD(ctx context.Context, _a1 *node.Node) (os.FileInfo, error) { - ret := _m.Called(ctx, _a1) - - var r0 os.FileInfo - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) os.FileInfo); ok { - r0 = rf(ctx, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(os.FileInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *node.Node) error); ok { - r1 = rf(ctx, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetPathByID provides a mock function with given fields: ctx, id -func (_m *Tree) GetPathByID(ctx context.Context, id *providerv1beta1.Reference) (string, error) { - ret := _m.Called(ctx, id) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta1.Reference) string); ok { - r0 = rf(ctx, id) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta1.Reference) error); ok { - r1 = rf(ctx, id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListFolder provides a mock function with given fields: ctx, _a1 -func (_m *Tree) ListFolder(ctx context.Context, _a1 *node.Node) ([]*node.Node, error) { - ret := _m.Called(ctx, _a1) - - var r0 []*node.Node - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) []*node.Node); ok { - r0 = rf(ctx, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*node.Node) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *node.Node) error); ok { - r1 = rf(ctx, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Move provides a mock function with given fields: ctx, oldNode, newNode -func (_m *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) error { - ret := _m.Called(ctx, oldNode, newNode) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *node.Node, *node.Node) error); ok { - r0 = rf(ctx, oldNode, newNode) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Propagate provides a mock function with given fields: ctx, _a1 -func (_m *Tree) Propagate(ctx context.Context, _a1 *node.Node) error { - ret := _m.Called(ctx, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *node.Node) error); ok { - r0 = rf(ctx, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PurgeRecycleItemFunc provides a mock function with given fields: ctx, key -func (_m *Tree) PurgeRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) { - ret := _m.Called(ctx, key) - - var r0 *node.Node - if rf, ok := ret.Get(0).(func(context.Context, string) *node.Node); ok { - r0 = rf(ctx, key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*node.Node) - } - } - - var r1 func() error - if rf, ok := ret.Get(1).(func(context.Context, string) func() error); ok { - r1 = rf(ctx, key) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(func() error) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { - r2 = rf(ctx, key) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// ReadBlob provides a mock function with given fields: key -func (_m *Tree) ReadBlob(key string) (io.ReadCloser, error) { - ret := _m.Called(key) - - var r0 io.ReadCloser - if rf, ok := ret.Get(0).(func(string) io.ReadCloser); ok { - r0 = rf(key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(io.ReadCloser) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RestoreRecycleItemFunc provides a mock function with given fields: ctx, key -func (_m *Tree) RestoreRecycleItemFunc(ctx context.Context, key string) (*node.Node, func() error, error) { - ret := _m.Called(ctx, key) - - var r0 *node.Node - if rf, ok := ret.Get(0).(func(context.Context, string) *node.Node); ok { - r0 = rf(ctx, key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*node.Node) - } - } - - var r1 func() error - if rf, ok := ret.Get(1).(func(context.Context, string) func() error); ok { - r1 = rf(ctx, key) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(func() error) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { - r2 = rf(ctx, key) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Setup provides a mock function with given fields: owner -func (_m *Tree) Setup(owner string) error { - ret := _m.Called(owner) - - var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(owner) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WriteBlob provides a mock function with given fields: key, reader -func (_m *Tree) WriteBlob(key string, reader io.Reader) error { - ret := _m.Called(key, reader) - - var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader) error); ok { - r0 = rf(key, reader) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go deleted file mode 100644 index bc1b1317e2..0000000000 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ /dev/null @@ -1,995 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package node - -import ( - "context" - "crypto/md5" - "encoding/hex" - "fmt" - "hash" - "io" - "io/fs" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/internal/grpc/services/storageprovider" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// Define keys and values used in the node metadata. -const ( - FavoriteKey = "http://owncloud.org/ns/favorite" - ShareTypesKey = "http://owncloud.org/ns/share-types" - ChecksumsKey = "http://owncloud.org/ns/checksums" - UserShareType = "0" - QuotaKey = "quota" - - QuotaUncalculated = "-1" - QuotaUnknown = "-2" - QuotaUnlimited = "-3" -) - -// Node represents a node in the tree and provides methods to get a Parent or Child instance. -type Node struct { - ParentID string - ID string - Name string - Blobsize int64 - BlobID string - owner *userpb.UserId - Exists bool - SpaceRoot *Node - - lu PathLookup -} - -// PathLookup defines the interface for the lookup component. -type PathLookup interface { - RootNode(ctx context.Context) (node *Node, err error) - HomeOrRootNode(ctx context.Context) (node *Node, err error) - - InternalRoot() string - InternalPath(ID string) string - Path(ctx context.Context, n *Node) (path string, err error) - ShareFolder() string -} - -// New returns a new instance of Node. -func New(id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { - if blobID == "" { - blobID = uuid.New().String() - } - return &Node{ - ID: id, - ParentID: parentID, - Name: name, - Blobsize: blobsize, - owner: owner, - lu: lu, - BlobID: blobID, - } -} - -// ChangeOwner sets the owner of n to newOwner. -func (n *Node) ChangeOwner(new *userpb.UserId) (err error) { - nodePath := n.InternalPath() - n.owner = new - if err = xattr.Set(nodePath, xattrs.OwnerIDAttr, []byte(new.OpaqueId)); err != nil { - return errors.Wrap(err, "decomposedfs: could not reset owner id attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerIDPAttr, []byte(new.Idp)); err != nil { - return errors.Wrap(err, "decomposedfs: could not reset owner idp attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerTypeAttr, []byte(utils.UserTypeToString(new.Type))); err != nil { - return errors.Wrap(err, "decomposedfs: could not reset owner idp attribute") - } - - return -} - -// SetMetadata populates a given key with its value. -// Note that consumers should be aware of the metadata options on xattrs.go. -func (n *Node) SetMetadata(key string, val string) (err error) { - nodePath := n.InternalPath() - if err := xattr.Set(nodePath, key, []byte(val)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set parentid attribute") - } - return nil -} - -// WriteMetadata writes the Node metadata to disk. -func (n *Node) WriteMetadata(owner *userpb.UserId) (err error) { - nodePath := n.InternalPath() - if err = xattr.Set(nodePath, xattrs.ParentidAttr, []byte(n.ParentID)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set parentid attribute") - } - if err = xattr.Set(nodePath, xattrs.NameAttr, []byte(n.Name)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set name attribute") - } - if err = xattr.Set(nodePath, xattrs.BlobIDAttr, []byte(n.BlobID)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set blobid attribute") - } - if err = xattr.Set(nodePath, xattrs.BlobsizeAttr, []byte(fmt.Sprintf("%d", n.Blobsize))); err != nil { - return errors.Wrap(err, "decomposedfs: could not set blobsize attribute") - } - if owner == nil { - if err = xattr.Set(nodePath, xattrs.OwnerIDAttr, []byte("")); err != nil { - return errors.Wrap(err, "decomposedfs: could not set empty owner id attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerIDPAttr, []byte("")); err != nil { - return errors.Wrap(err, "decomposedfs: could not set empty owner idp attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerTypeAttr, []byte("")); err != nil { - return errors.Wrap(err, "decomposedfs: could not set empty owner type attribute") - } - } else { - if err = xattr.Set(nodePath, xattrs.OwnerIDAttr, []byte(owner.OpaqueId)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set owner id attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerIDPAttr, []byte(owner.Idp)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set owner idp attribute") - } - if err = xattr.Set(nodePath, xattrs.OwnerTypeAttr, []byte(utils.UserTypeToString(owner.Type))); err != nil { - return errors.Wrap(err, "decomposedfs: could not set owner idp attribute") - } - } - return -} - -// ReadNode creates a new instance from an id and checks if it exists. -func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error) { - n = &Node{ - lu: lu, - ID: id, - } - - nodePath := n.InternalPath() - - // lookup parent id in extended attributes - var attrBytes []byte - attrBytes, err = xattr.Get(nodePath, xattrs.ParentidAttr) - switch { - case err == nil: - n.ParentID = string(attrBytes) - case isAttrUnset(err): - return nil, errtypes.InternalError(err.Error()) - case isNotFound(err): - return n, nil // swallow not found, the node defaults to exists = false - default: - return nil, errtypes.InternalError(err.Error()) - } - - // check if this is a space root - if _, err = xattr.Get(nodePath, xattrs.SpaceNameAttr); err == nil { - n.SpaceRoot = n - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(nodePath, xattrs.NameAttr); err == nil { - n.Name = string(attrBytes) - } else { - return - } - // lookup blobID in extended attributes - if attrBytes, err = xattr.Get(nodePath, xattrs.BlobIDAttr); err == nil { - n.BlobID = string(attrBytes) - } else { - return - } - // Lookup blobsize - var blobSize int64 - if blobSize, err = ReadBlobSizeAttr(nodePath); err == nil { - n.Blobsize = blobSize - } else { - return - } - - // Check if parent exists. Otherwise this node is part of a deleted subtree - _, err = os.Stat(lu.InternalPath(n.ParentID)) - if err != nil { - if isNotFound(err) { - return nil, errtypes.NotFound(err.Error()) - } - return nil, err - } - n.Exists = true - return -} - -// The os error is buried inside the fs.PathError error. -func isNotDir(err error) bool { - if perr, ok := err.(*fs.PathError); ok { - if serr, ok2 := perr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOTDIR - } - } - return false -} - -// Child returns the child node with the given name. -func (n *Node) Child(ctx context.Context, name string) (*Node, error) { - link, err := os.Readlink(filepath.Join(n.InternalPath(), filepath.Join("/", name))) - if err != nil { - if os.IsNotExist(err) || isNotDir(err) { - c := &Node{ - lu: n.lu, - ParentID: n.ID, - Name: name, - SpaceRoot: n.SpaceRoot, - } - return c, nil // if the file does not exist we return a node that has Exists = false - } - - return nil, errors.Wrap(err, "decomposedfs: Wrap: readlink error") - } - - var c *Node - if strings.HasPrefix(link, "../") { - c, err = ReadNode(ctx, n.lu, filepath.Base(link)) - if err != nil { - return nil, errors.Wrap(err, "could not read child node") - } - c.SpaceRoot = n.SpaceRoot - } else { - return nil, fmt.Errorf("decomposedfs: expected '../ prefix, got' %+v", link) - } - - return c, nil -} - -// Parent returns the parent node. -func (n *Node) Parent() (p *Node, err error) { - if n.ParentID == "" { - return nil, fmt.Errorf("decomposedfs: root has no parent") - } - p = &Node{ - lu: n.lu, - ID: n.ParentID, - SpaceRoot: n.SpaceRoot, - } - - parentPath := n.lu.InternalPath(n.ParentID) - - // lookup parent id in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(parentPath, xattrs.ParentidAttr); err == nil { - p.ParentID = string(attrBytes) - } else { - return - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(parentPath, xattrs.NameAttr); err == nil { - p.Name = string(attrBytes) - } else { - return - } - - // check node exists - if _, err := os.Stat(parentPath); err == nil { - p.Exists = true - } - return -} - -// Owner returns the cached owner id or reads it from the extended attributes -// TODO can be private as only the AsResourceInfo uses it. -func (n *Node) Owner() (*userpb.UserId, error) { - if n.owner != nil { - return n.owner, nil - } - - owner := &userpb.UserId{} - - // FIXME ... do we return the owner of the reference or the owner of the target? - // we don't really know the owner of the target ... and as the reference may point anywhere we cannot really find out - // but what are the permissions? all? none? the gateway has to fill in? - // TODO what if this is a reference? - nodePath := n.InternalPath() - // lookup parent id in extended attributes - var attrBytes []byte - var err error - // lookup ID in extended attributes - attrBytes, err = xattr.Get(nodePath, xattrs.OwnerIDAttr) - switch { - case err == nil: - owner.OpaqueId = string(attrBytes) - case isAttrUnset(err), isNotFound(err): - fallthrough - default: - return nil, err - } - - // lookup IDP in extended attributes - attrBytes, err = xattr.Get(nodePath, xattrs.OwnerIDPAttr) - switch { - case err == nil: - owner.Idp = string(attrBytes) - case isAttrUnset(err), isNotFound(err): - fallthrough - default: - return nil, err - } - - // lookup type in extended attributes - attrBytes, err = xattr.Get(nodePath, xattrs.OwnerTypeAttr) - switch { - case err == nil: - owner.Type = utils.UserTypeMap(string(attrBytes)) - case isAttrUnset(err), isNotFound(err): - fallthrough - default: - // TODO the user type defaults to invalid, which is the case - err = nil - } - - n.owner = owner - return n.owner, err -} - -// PermissionSet returns the permission set for the current user -// the parent nodes are not taken into account. -func (n *Node) PermissionSet(ctx context.Context) provider.ResourcePermissions { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - return NoPermissions() - } - if o, _ := n.Owner(); utils.UserEqual(u.Id, o) { - return OwnerPermissions() - } - // read the permissions for the current user from the acls of the current node - if np, err := n.ReadUserPermissions(ctx, u); err == nil { - return np - } - return NoPermissions() -} - -// InternalPath returns the internal path of the Node. -func (n *Node) InternalPath() string { - return n.lu.InternalPath(n.ID) -} - -// CalculateEtag returns a hash of fileid + tmtime (or mtime). -func CalculateEtag(nodeID string, tmTime time.Time) (string, error) { - return calculateEtag(nodeID, tmTime) -} - -// calculateEtag returns a hash of fileid + tmtime (or mtime). -func calculateEtag(nodeID string, tmTime time.Time) (string, error) { - h := md5.New() - if _, err := io.WriteString(h, nodeID); err != nil { - return "", err - } - if tb, err := tmTime.UTC().MarshalBinary(); err == nil { - if _, err := h.Write(tb); err != nil { - return "", err - } - } else { - return "", err - } - return fmt.Sprintf(`"%x"`, h.Sum(nil)), nil -} - -// SetMtime sets the mtime and atime of a node. -func (n *Node) SetMtime(ctx context.Context, mtime string) error { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - if mt, err := parseMTime(mtime); err == nil { - nodePath := n.lu.InternalPath(n.ID) - // updating mtime also updates atime - if err := os.Chtimes(nodePath, mt, mt); err != nil { - sublog.Error().Err(err). - Time("mtime", mt). - Msg("could not set mtime") - return errors.Wrap(err, "could not set mtime") - } - } else { - sublog.Error().Err(err). - Str("mtime", mtime). - Msg("could not parse mtime") - return errors.Wrap(err, "could not parse mtime") - } - return nil -} - -// SetEtag sets the temporary etag of a node if it differs from the current etag. -func (n *Node) SetEtag(ctx context.Context, val string) (err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - nodePath := n.lu.InternalPath(n.ID) - var tmTime time.Time - if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - var fi os.FileInfo - if fi, err = os.Lstat(nodePath); err != nil { - return - } - tmTime = fi.ModTime() - } - var etag string - if etag, err = calculateEtag(n.ID, tmTime); err != nil { - return - } - - // sanitize etag - val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) - if etag == val { - sublog.Debug(). - Str("etag", val). - Msg("ignoring request to update identical etag") - return nil - } - // etag is only valid until the calculated etag changes, is part of propagation - return xattr.Set(nodePath, xattrs.TmpEtagAttr, []byte(val)) -} - -// SetFavorite sets the favorite for the current user -// TODO we should not mess with the user here ... the favorites is now a user specific property for a file -// that cannot be mapped to extended attributes without leaking who has marked a file as a favorite -// it is a specific case of a tag, which is user individual as well -// TODO there are different types of tags -// 1. public that are managed by everyone -// 2. private tags that are only visible to the user -// 3. system tags that are only visible to the system -// 4. group tags that are only visible to a group ... -// urgh ... well this can be solved using different namespaces -// 1. public = p: -// 2. private = u:: for user specific -// 3. system = s: for system -// 4. group = g:: -// 5. app? = a:: for apps? -// obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem -// public tags can be mapped to extended attributes. -func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { - nodePath := n.lu.InternalPath(n.ID) - // the favorite flag is specific to the user, so we need to incorporate the userid - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - return xattr.Set(nodePath, fa, []byte(val)) -} - -// AsResourceInfo return the node as CS3 ResourceInfo. -func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissions, mdKeys []string, returnBasename bool) (ri *provider.ResourceInfo, err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - - var fn string - nodePath := n.lu.InternalPath(n.ID) - - var fi os.FileInfo - - nodeType := provider.ResourceType_RESOURCE_TYPE_INVALID - if fi, err = os.Lstat(nodePath); err != nil { - return - } - - var target []byte - switch { - case fi.IsDir(): - if target, err = xattr.Get(nodePath, xattrs.ReferenceAttr); err == nil { - nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE - } else { - nodeType = provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - case fi.Mode().IsRegular(): - nodeType = provider.ResourceType_RESOURCE_TYPE_FILE - case fi.Mode()&os.ModeSymlink != 0: - nodeType = provider.ResourceType_RESOURCE_TYPE_SYMLINK - // TODO reference using ext attr on a symlink - // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE - } - - id := &provider.ResourceId{OpaqueId: n.ID} - - if returnBasename { - fn = n.Name - } else { - fn, err = n.lu.Path(ctx, n) - if err != nil { - return nil, err - } - } - - ri = &provider.ResourceInfo{ - Id: id, - Path: fn, - Type: nodeType, - MimeType: mime.Detect(nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER, fn), - Size: uint64(n.Blobsize), - Target: string(target), - PermissionSet: rp, - } - - if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - ts, err := n.GetTreeSize() - if err == nil { - ri.Size = ts - } else { - ri.Size = 0 // make dirs always return 0 if it is unknown - sublog.Debug().Err(err).Msg("could not read treesize") - } - } - - if ri.Owner, err = n.Owner(); err != nil { - sublog.Debug().Err(err).Msg("could not determine owner") - } - - // TODO make etag of files use fileid and checksum - - var tmTime time.Time - if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - tmTime = fi.ModTime() - } - - // use temporary etag if it is set - if b, err := xattr.Get(nodePath, xattrs.TmpEtagAttr); err == nil { - ri.Etag = fmt.Sprintf(`"%x"`, string(b)) // TODO why do we convert string(b)? is the temporary etag stored as string? -> should we use bytes? use hex.EncodeToString? - } else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil { - sublog.Debug().Err(err).Msg("could not calculate etag") - } - - // mtime uses tmtime if present - // TODO expose mtime and tmtime separately? - un := tmTime.UnixNano() - ri.Mtime = &types.Timestamp{ - Seconds: uint64(un / 1000000000), - Nanos: uint32(un % 1000000000), - } - - mdKeysMap := make(map[string]struct{}) - for _, k := range mdKeys { - mdKeysMap[k] = struct{}{} - } - - var returnAllKeys bool - if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { - returnAllKeys = true - } - - metadata := map[string]string{} - - // read favorite flag for the current user - if _, ok := mdKeysMap[FavoriteKey]; returnAllKeys || ok { - favorite := "" - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - if val, err := xattr.Get(nodePath, fa); err == nil { - sublog.Debug(). - Str("favorite", fa). - Msg("found favorite flag") - favorite = string(val) - } - } else { - sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("user has no id") - } - } else { - sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("error getting user from ctx") - } - metadata[FavoriteKey] = favorite - } - - // share indicator - if _, ok := mdKeysMap[ShareTypesKey]; returnAllKeys || ok { - if n.hasUserShares(ctx) { - metadata[ShareTypesKey] = UserShareType - } - } - - // checksums - if _, ok := mdKeysMap[ChecksumsKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_FILE) && returnAllKeys || ok { - // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, nodePath, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSMD5, ri) - readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSAdler32, ri) - } - // quota - if _, ok := mdKeysMap[QuotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { - var quotaPath string - if n.SpaceRoot == nil { - root, err := n.lu.HomeOrRootNode(ctx) - if err == nil { - quotaPath = root.InternalPath() - } else { - sublog.Debug().Err(err).Msg("error determining the space root node for quota") - } - } else { - quotaPath = n.SpaceRoot.InternalPath() - } - if quotaPath != "" { - readQuotaIntoOpaque(ctx, quotaPath, ri) - } - } - - // only read the requested metadata attributes - attrs, err := xattr.List(nodePath) - if err != nil { - sublog.Error().Err(err).Msg("error getting list of extended attributes") - } else { - for i := range attrs { - // filter out non-custom properties - if !strings.HasPrefix(attrs[i], xattrs.MetadataPrefix) { - continue - } - // only read when key was requested - k := attrs[i][len(xattrs.MetadataPrefix):] - if _, ok := mdKeysMap[k]; returnAllKeys || ok { - if val, err := xattr.Get(nodePath, attrs[i]); err == nil { - metadata[k] = string(val) - } else { - sublog.Error().Err(err). - Str("entry", attrs[i]). - Msg("error retrieving xattr metadata") - } - } - } - } - ri.ArbitraryMetadata = &provider.ArbitraryMetadata{ - Metadata: metadata, - } - - sublog.Debug(). - Interface("ri", ri). - Msg("AsResourceInfo") - - return ri, nil -} - -func readChecksumIntoResourceChecksum(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, xattrs.ChecksumPrefix+algo) - switch { - case err == nil: - ri.Checksum = &provider.ResourceChecksum{ - Type: storageprovider.PKG2GRPCXS(algo), - Sum: hex.EncodeToString(v), - } - case isAttrUnset(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") - } -} - -func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, xattrs.ChecksumPrefix+algo) - switch { - case err == nil: - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[algo] = &types.OpaqueEntry{ - Decoder: "plain", - Value: []byte(hex.EncodeToString(v)), - } - case isAttrUnset(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") - } -} - -// quota is always stored on the root node. -func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, xattrs.QuotaAttr) - switch { - case err == nil: - // make sure we have a proper signed int - // we use the same magic numbers to indicate: - // -1 = uncalculated - // -2 = unknown - // -3 = unlimited - if _, err := strconv.ParseInt(string(v), 10, 64); err == nil { - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[QuotaKey] = &types.OpaqueEntry{ - Decoder: "plain", - Value: v, - } - } else { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("quota", string(v)).Msg("malformed quota") - } - case isAttrUnset(err): - appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Msg("quota not set") - case isNotFound(err): - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("file not found when reading quota") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read quota") - } -} - -// HasPropagation checks if the propagation attribute exists and is set to "1". -func (n *Node) HasPropagation() (propagation bool) { - if b, err := xattr.Get(n.lu.InternalPath(n.ID), xattrs.PropagationAttr); err == nil { - return string(b) == "1" - } - return false -} - -// GetTMTime reads the tmtime from the extended attributes. -func (n *Node) GetTMTime() (tmTime time.Time, err error) { - var b []byte - if b, err = xattr.Get(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr); err != nil { - return - } - return time.Parse(time.RFC3339Nano, string(b)) -} - -// SetTMTime writes the tmtime to the extended attributes. -func (n *Node) SetTMTime(t time.Time) (err error) { - return xattr.Set(n.lu.InternalPath(n.ID), xattrs.TreeMTimeAttr, []byte(t.UTC().Format(time.RFC3339Nano))) -} - -// GetTreeSize reads the treesize from the extended attributes. -func (n *Node) GetTreeSize() (treesize uint64, err error) { - var b []byte - if b, err = xattr.Get(n.InternalPath(), xattrs.TreesizeAttr); err != nil { - return - } - return strconv.ParseUint(string(b), 10, 64) -} - -// SetTreeSize writes the treesize to the extended attributes. -func (n *Node) SetTreeSize(ts uint64) (err error) { - return xattr.Set(n.InternalPath(), xattrs.TreesizeAttr, []byte(strconv.FormatUint(ts, 10))) -} - -// SetChecksum writes the checksum with the given checksum type to the extended attributes. -func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { - return xattr.Set(n.lu.InternalPath(n.ID), xattrs.ChecksumPrefix+csType, h.Sum(nil)) -} - -// UnsetTempEtag removes the temporary etag attribute. -func (n *Node) UnsetTempEtag() (err error) { - if err = xattr.Remove(n.lu.InternalPath(n.ID), xattrs.TmpEtagAttr); err != nil { - if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - return nil - } - } - return err -} - -// ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes. -func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap provider.ResourcePermissions, err error) { - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - return NoOwnerPermissions(), nil - } - if utils.UserEqual(u.Id, o) { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - return OwnerPermissions(), nil - } - - ap = provider.ResourcePermissions{} - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var g *provider.Grant - - // we read all grantees from the node - var grantees []string - if grantees, err = n.ListGrantees(ctx); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing grantees") - return NoPermissions(), err - } - - // instead of making n getxattr syscalls we are going to list the acls and filter them here - // we have two options here: - // 1. we can start iterating over the acls / grants on the node or - // 2. we can iterate over the number of groups - // The current implementation tries to be defensive for cases where users have hundreds or thousands of groups, so we iterate over the existing acls. - userace := xattrs.GrantPrefix + xattrs.UserAcePrefix + u.Id.OpaqueId - userFound := false - for i := range grantees { - switch { - // we only need to find the user once - case !userFound && grantees[i] == userace: - g, err = n.ReadGrant(ctx, grantees[i]) - case strings.HasPrefix(grantees[i], xattrs.GrantPrefix+xattrs.GroupAcePrefix): // only check group grantees - gr := strings.TrimPrefix(grantees[i], xattrs.GrantPrefix+xattrs.GroupAcePrefix) - if groupsMap[gr] { - g, err = n.ReadGrant(ctx, grantees[i]) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - AddPermissions(&ap, g.GetPermissions()) - case isAttrUnset(err): - err = nil - appctx.GetLogger(ctx).Error().Interface("node", n).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") - // continue with next segment - default: - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Str("grant", grantees[i]).Msg("error reading permissions") - // continue with next segment - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning aggregated permissions") - return ap, nil -} - -// ListGrantees lists the grantees of the current node -// We don't want to wast time and memory by creating grantee objects. -// The function will return a list of opaque strings that can be used to make a ReadGrant call. -func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) { - var attrs []string - if attrs, err = xattr.List(n.InternalPath()); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing attributes") - return nil, err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], xattrs.GrantPrefix) { - grantees = append(grantees, attrs[i]) - } - } - return -} - -// ReadGrant reads a CS3 grant. -func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant, err error) { - var b []byte - if b, err = xattr.Get(n.InternalPath(), grantee); err != nil { - return nil, err - } - var e *ace.ACE - if e, err = ace.Unmarshal(strings.TrimPrefix(grantee, xattrs.GrantPrefix), b); err != nil { - return nil, err - } - return e.Grant(), nil -} - -// ReadBlobSizeAttr reads the blobsize from the xattrs. -func ReadBlobSizeAttr(path string) (int64, error) { - attrBytes, err := xattr.Get(path, xattrs.BlobsizeAttr) - if err != nil { - return 0, errors.Wrapf(err, "error reading blobsize xattr") - } - blobSize, err := strconv.ParseInt(string(attrBytes), 10, 64) - if err != nil { - return 0, errors.Wrapf(err, "invalid blobsize xattr format") - } - return blobSize, nil -} - -func (n *Node) hasUserShares(ctx context.Context) bool { - g, err := n.ListGrantees(ctx) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("hasUserShares: listGrantees") - return false - } - - for i := range g { - if strings.Contains(g[i], xattrs.GrantPrefix+xattrs.UserAcePrefix) { - return true - } - } - return false -} - -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - -// FindStorageSpaceRoot calls n.Parent() and climbs the tree -// until it finds the space root node and adds it to the node. -func (n *Node) FindStorageSpaceRoot() error { - var err error - // remember the node we ask for and use parent to climb the tree - parent := n - for parent.ParentID != "" { - if parent, err = parent.Parent(); err != nil { - return err - } - if IsSpaceRoot(parent) { - n.SpaceRoot = parent - break - } - } - return nil -} - -// IsSpaceRoot checks if the node is a space root. -func IsSpaceRoot(r *Node) bool { - path := r.InternalPath() - if spaceNameBytes, err := xattr.Get(path, xattrs.SpaceNameAttr); err == nil { - if string(spaceNameBytes) != "" { - return true - } - } - return false -} - -// CheckQuota checks if both disk space and available quota are sufficient. -var CheckQuota = func(spaceRoot *Node, fileSize uint64) (quotaSufficient bool, err error) { - used, _ := spaceRoot.GetTreeSize() - if !enoughDiskSpace(spaceRoot.InternalPath(), fileSize) { - return false, errtypes.InsufficientStorage("disk full") - } - quotaByte, _ := xattr.Get(spaceRoot.InternalPath(), xattrs.QuotaAttr) - var total uint64 - if quotaByte == nil { - // if quota is not set, it means unlimited - return true, nil - } - total, _ = strconv.ParseUint(string(quotaByte), 10, 64) - // if total is smaller than used, total-used could overflow and be bigger than fileSize - if fileSize > total-used || total < used { - return false, errtypes.InsufficientStorage("quota exceeded") - } - return true, nil -} - -func enoughDiskSpace(path string, fileSize uint64) bool { - avalB, err := GetAvailableSize(path) - if err != nil { - return false - } - return avalB > fileSize -} diff --git a/pkg/storage/utils/decomposedfs/node/node_suite_test.go b/pkg/storage/utils/decomposedfs/node/node_suite_test.go deleted file mode 100644 index 0030f54278..0000000000 --- a/pkg/storage/utils/decomposedfs/node/node_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package node_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestNode(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Node Suite") -} diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go deleted file mode 100644 index 3ece643caa..0000000000 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package node_test - -import ( - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Node", func() { - var ( - env *helpers.TestEnv - - id string - name string - ) - - BeforeEach(func() { - var err error - env, err = helpers.NewTestEnv() - Expect(err).ToNot(HaveOccurred()) - - id = "fooId" - name = "foo" - }) - - AfterEach(func() { - if env != nil { - env.Cleanup() - } - }) - - Describe("New", func() { - It("generates unique blob ids if none are given", func() { - n1 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) - n2 := node.New(id, "", name, 10, "", env.Owner.Id, env.Lookup) - - Expect(len(n1.BlobID)).To(Equal(36)) - Expect(n1.BlobID).ToNot(Equal(n2.BlobID)) - }) - }) - - Describe("ReadNode", func() { - It("reads the blobID from the xattrs", func() { - lookupNode, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - - n, err := node.ReadNode(env.Ctx, env.Lookup, lookupNode.ID) - Expect(err).ToNot(HaveOccurred()) - Expect(n.BlobID).To(Equal("file1-blobid")) - }) - }) - - Describe("WriteMetadata", func() { - It("writes all xattrs", func() { - n, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - - blobsize := 239485734 - n.Name = "TestName" - n.BlobID = "TestBlobID" - n.Blobsize = int64(blobsize) - owner := &userpb.UserId{ - Idp: "testidp", - OpaqueId: "testuserid", - Type: userpb.UserType_USER_TYPE_PRIMARY, - } - - err = n.WriteMetadata(owner) - Expect(err).ToNot(HaveOccurred()) - n2, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - Expect(n2.Name).To(Equal("TestName")) - Expect(n2.BlobID).To(Equal("TestBlobID")) - Expect(n2.Blobsize).To(Equal(int64(blobsize))) - }) - }) - - Describe("Parent", func() { - It("returns the parent node", func() { - child, err := env.Lookup.NodeFromPath(env.Ctx, "/dir1/subdir1", false) - Expect(err).ToNot(HaveOccurred()) - Expect(child).ToNot(BeNil()) - - parent, err := child.Parent() - Expect(err).ToNot(HaveOccurred()) - Expect(parent).ToNot(BeNil()) - Expect(parent.ID).To(Equal(child.ParentID)) - }) - }) - - Describe("Child", func() { - var ( - parent *node.Node - ) - - BeforeEach(func() { - var err error - parent, err = env.Lookup.NodeFromPath(env.Ctx, "/dir1", false) - Expect(err).ToNot(HaveOccurred()) - Expect(parent).ToNot(BeNil()) - }) - - It("returns an empty node if the child does not exist", func() { - child, err := parent.Child(env.Ctx, "does-not-exist") - Expect(err).ToNot(HaveOccurred()) - Expect(child).ToNot(BeNil()) - Expect(child.Exists).To(BeFalse()) - }) - - It("returns a directory node with all metadata", func() { - child, err := parent.Child(env.Ctx, "subdir1") - Expect(err).ToNot(HaveOccurred()) - Expect(child).ToNot(BeNil()) - Expect(child.Exists).To(BeTrue()) - Expect(child.ParentID).To(Equal(parent.ID)) - Expect(child.Name).To(Equal("subdir1")) - Expect(child.Blobsize).To(Equal(int64(0))) - }) - - It("returns a file node with all metadata", func() { - child, err := parent.Child(env.Ctx, "file1") - Expect(err).ToNot(HaveOccurred()) - Expect(child).ToNot(BeNil()) - Expect(child.Exists).To(BeTrue()) - Expect(child.ParentID).To(Equal(parent.ID)) - Expect(child.Name).To(Equal("file1")) - Expect(child.Blobsize).To(Equal(int64(1234))) - }) - - It("handles (broken) links including file segments by returning an non-existent node", func() { - child, err := parent.Child(env.Ctx, "file1/broken") - Expect(err).ToNot(HaveOccurred()) - Expect(child).ToNot(BeNil()) - Expect(child.Exists).To(BeFalse()) - }) - }) - - Describe("AsResourceInfo", func() { - var ( - n *node.Node - ) - - BeforeEach(func() { - var err error - n, err = env.Lookup.NodeFromPath(env.Ctx, "dir1/file1", false) - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("the Etag field", func() { - It("is set", func() { - perms := node.OwnerPermissions() - ri, err := n.AsResourceInfo(env.Ctx, &perms, []string{}, false) - Expect(err).ToNot(HaveOccurred()) - Expect(len(ri.Etag)).To(Equal(34)) - }) - - It("changes when the tmtime is set", func() { - perms := node.OwnerPermissions() - ri, err := n.AsResourceInfo(env.Ctx, &perms, []string{}, false) - Expect(err).ToNot(HaveOccurred()) - Expect(len(ri.Etag)).To(Equal(34)) - before := ri.Etag - - Expect(n.SetTMTime(time.Now().UTC())).To(Succeed()) - - ri, err = n.AsResourceInfo(env.Ctx, &perms, []string{}, false) - Expect(err).ToNot(HaveOccurred()) - Expect(len(ri.Etag)).To(Equal(34)) - Expect(ri.Etag).ToNot(Equal(before)) - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/node/node_windows.go b/pkg/storage/utils/decomposedfs/node/node_windows.go deleted file mode 100644 index db11894b29..0000000000 --- a/pkg/storage/utils/decomposedfs/node/node_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build windows -// +build windows - -package node - -import "golang.org/x/sys/windows" - -// GetAvailableSize stats the filesystem and return the available bytes -func GetAvailableSize(path string) (uint64, error) { - var free, total, avail uint64 - pathPtr, err := windows.UTF16PtrFromString(path) - if err != nil { - return 0, err - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free) - if err != nil { - return 0, err - } - return avail, nil -} diff --git a/pkg/storage/utils/decomposedfs/node/permissions.go b/pkg/storage/utils/decomposedfs/node/permissions.go deleted file mode 100644 index 66662c9740..0000000000 --- a/pkg/storage/utils/decomposedfs/node/permissions.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package node - -import ( - "context" - "strings" - "syscall" - - userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/utils" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// NoPermissions represents an empty set of permissions. -func NoPermissions() provider.ResourcePermissions { - return provider.ResourcePermissions{} -} - -// NoOwnerPermissions defines permissions for nodes that don't have an owner set, eg the root node. -func NoOwnerPermissions() provider.ResourcePermissions { - return provider.ResourcePermissions{ - Stat: true, - } -} - -// ShareFolderPermissions defines permissions for the shared jail. -func ShareFolderPermissions() provider.ResourcePermissions { - return provider.ResourcePermissions{ - // read permissions - ListContainer: true, - Stat: true, - InitiateFileDownload: true, - GetPath: true, - GetQuota: true, - ListFileVersions: true, - } -} - -// OwnerPermissions defines permissions for nodes owned by the user. -func OwnerPermissions() provider.ResourcePermissions { - return provider.ResourcePermissions{ - // all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } -} - -// Permissions implements permission checks. -type Permissions struct { - lu PathLookup -} - -// NewPermissions returns a new Permissions instance. -func NewPermissions(lu PathLookup) *Permissions { - return &Permissions{ - lu: lu, - } -} - -// AssemblePermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes. -func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap provider.ResourcePermissions, err error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - return NoPermissions(), nil - } - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - // TODO check if a parent folder has the owner set? - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - return NoPermissions(), err - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - return NoOwnerPermissions(), nil - } - if utils.UserEqual(u.Id, o) { - lp, err := n.lu.Path(ctx, n) - if err == nil && lp == n.lu.ShareFolder() { - return ShareFolderPermissions(), nil - } - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - return OwnerPermissions(), nil - } - // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return NoPermissions(), err - } - - cn := n - - ap = provider.ResourcePermissions{} - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - // for all segments, starting at the leaf - for cn.ID != rn.ID { - if np, err := cn.ReadUserPermissions(ctx, u); err == nil { - AddPermissions(&ap, &np) - } else { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error reading permissions") - // continue with next segment - } - if cn, err = cn.Parent(); err != nil { - return ap, errors.Wrap(err, "decomposedfs: error getting parent "+cn.ParentID) - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning agregated permissions") - return ap, nil -} - -// AddPermissions merges a set of permissions into another -// TODO we should use a bitfield for this ... -func AddPermissions(l *provider.ResourcePermissions, r *provider.ResourcePermissions) { - l.AddGrant = l.AddGrant || r.AddGrant - l.CreateContainer = l.CreateContainer || r.CreateContainer - l.Delete = l.Delete || r.Delete - l.GetPath = l.GetPath || r.GetPath - l.GetQuota = l.GetQuota || r.GetQuota - l.InitiateFileDownload = l.InitiateFileDownload || r.InitiateFileDownload - l.InitiateFileUpload = l.InitiateFileUpload || r.InitiateFileUpload - l.ListContainer = l.ListContainer || r.ListContainer - l.ListFileVersions = l.ListFileVersions || r.ListFileVersions - l.ListGrants = l.ListGrants || r.ListGrants - l.ListRecycle = l.ListRecycle || r.ListRecycle - l.Move = l.Move || r.Move - l.PurgeRecycle = l.PurgeRecycle || r.PurgeRecycle - l.RemoveGrant = l.RemoveGrant || r.RemoveGrant - l.RestoreFileVersion = l.RestoreFileVersion || r.RestoreFileVersion - l.RestoreRecycleItem = l.RestoreRecycleItem || r.RestoreRecycleItem - l.Stat = l.Stat || r.Stat - l.UpdateGrant = l.UpdateGrant || r.UpdateGrant -} - -// HasPermission call check() for every node up to the root until check returns true. -func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) { - var u *userv1beta1.User - var perms *provider.ResourcePermissions - if u, perms = p.getUserAndPermissions(ctx, n); perms != nil { - return check(perms), nil - } - - // determine root - var rn *Node - if rn, err = p.lu.RootNode(ctx); err != nil { - return false, err - } - - cn := n - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var g *provider.Grant - // for all segments, starting at the leaf - for cn.ID != rn.ID { - var grantees []string - if grantees, err = cn.ListGrantees(ctx); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error listing grantees") - return false, err - } - - userace := xattrs.GrantPrefix + xattrs.UserAcePrefix + u.Id.OpaqueId - userFound := false - for i := range grantees { - // we only need the find the user once per node - switch { - case !userFound && grantees[i] == userace: - g, err = cn.ReadGrant(ctx, grantees[i]) - case strings.HasPrefix(grantees[i], xattrs.GrantPrefix+xattrs.GroupAcePrefix): - gr := strings.TrimPrefix(grantees[i], xattrs.GrantPrefix+xattrs.GroupAcePrefix) - if groupsMap[gr] { - g, err = cn.ReadGrant(ctx, grantees[i]) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - appctx.GetLogger(ctx).Debug().Interface("node", cn).Str("grant", grantees[i]).Interface("permissions", g.GetPermissions()).Msg("checking permissions") - if check(g.GetPermissions()) { - return true, nil - } - case isAttrUnset(err): - err = nil - appctx.GetLogger(ctx).Error().Interface("node", cn).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") - default: - appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Str("grant", grantees[i]).Msg("error reading permissions") - return false, err - } - } - - if cn, err = cn.Parent(); err != nil { - return false, errors.Wrap(err, "decomposedfs: error getting parent "+cn.ParentID) - } - } - - appctx.GetLogger(ctx).Debug().Interface("permissions", NoPermissions()).Interface("node", n).Interface("user", u).Msg("no grant found, returning default permissions") - return false, nil -} - -func (p *Permissions) getUserAndPermissions(ctx context.Context, n *Node) (*userv1beta1.User, *provider.ResourcePermissions) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") - perms := NoPermissions() - return nil, &perms - } - // check if the current user is the owner - o, err := n.Owner() - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") - perms := NoPermissions() - return nil, &perms - } - if o.OpaqueId == "" { - // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner - // TODO what if no owner is set but grants are present? - perms := NoOwnerPermissions() - return nil, &perms - } - if utils.UserEqual(u.Id, o) { - appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") - perms := OwnerPermissions() - return u, &perms - } - return u, nil -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNotFound(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOENT - } - } - return false -} diff --git a/pkg/storage/utils/decomposedfs/node/permissions_unix.go b/pkg/storage/utils/decomposedfs/node/permissions_unix.go deleted file mode 100644 index 4260a3b002..0000000000 --- a/pkg/storage/utils/decomposedfs/node/permissions_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build !darwin -// +build !darwin - -package node - -import ( - "syscall" - - "github.com/pkg/xattr" -) - -func isAttrUnset(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} diff --git a/pkg/storage/utils/decomposedfs/options/options.go b/pkg/storage/utils/decomposedfs/options/options.go deleted file mode 100644 index 66e981163e..0000000000 --- a/pkg/storage/utils/decomposedfs/options/options.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package options - -import ( - "path/filepath" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" -) - -// Option defines a single option function. -type Option func(o *Options) - -// Options defines the available options for this package. -type Options struct { - // ocis fs works on top of a dir of uuid nodes - Root string `mapstructure:"root"` - - // UserLayout describes the relative path from the storage's root node to the users home node. - UserLayout string `mapstructure:"user_layout"` - - // TODO NodeLayout option to save nodes as eg. nodes/1d/d8/1dd84abf-9466-4e14-bb86-02fc4ea3abcf - ShareFolder string `mapstructure:"share_folder"` - - // EnableHome enables the creation of home directories. - EnableHome bool `mapstructure:"enable_home"` - - // propagate mtime changes as tmtime (tree modification time) to the parent directory when user.ocis.propagation=1 is set on a node - TreeTimeAccounting bool `mapstructure:"treetime_accounting"` - - // propagate size changes as treesize - TreeSizeAccounting bool `mapstructure:"treesize_accounting"` - - // set an owner for the root node - Owner string `mapstructure:"owner"` - OwnerIDP string `mapstructure:"owner_idp"` - OwnerType string `mapstructure:"owner_type"` - - GatewayAddr string `mapstructure:"gateway_addr"` -} - -// New returns a new Options instance for the given configuration. -func New(m map[string]interface{}) (*Options, error) { - o := &Options{} - if err := mapstructure.Decode(m, o); err != nil { - err = errors.Wrap(err, "error decoding conf") - return nil, err - } - - if o.UserLayout == "" { - o.UserLayout = "{{.Id.OpaqueId}}" - } - // ensure user layout has no starting or trailing / - o.UserLayout = strings.Trim(o.UserLayout, "/") - - if o.ShareFolder == "" { - o.ShareFolder = "/Shares" - } - // ensure share folder always starts with slash - o.ShareFolder = filepath.Join("/", o.ShareFolder) - - // c.DataDirectory should never end in / unless it is the root - o.Root = filepath.Clean(o.Root) - - return o, nil -} diff --git a/pkg/storage/utils/decomposedfs/options/options_suite_test.go b/pkg/storage/utils/decomposedfs/options/options_suite_test.go deleted file mode 100644 index 141f49fbdf..0000000000 --- a/pkg/storage/utils/decomposedfs/options/options_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package options_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestOptions(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Options Suite") -} diff --git a/pkg/storage/utils/decomposedfs/options/options_test.go b/pkg/storage/utils/decomposedfs/options/options_test.go deleted file mode 100644 index 3f5faa6257..0000000000 --- a/pkg/storage/utils/decomposedfs/options/options_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package options_test - -import ( - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Options", func() { - var ( - o *options.Options - config map[string]interface{} - ) - - BeforeEach(func() { - config = map[string]interface{}{} - }) - - Describe("New", func() { - JustBeforeEach(func() { - var err error - o, err = options.New(config) - Expect(err).ToNot(HaveOccurred()) - }) - - It("sets defaults", func() { - Expect(len(o.ShareFolder) > 0).To(BeTrue()) - Expect(len(o.UserLayout) > 0).To(BeTrue()) - }) - - Context("with unclean root path configuration", func() { - BeforeEach(func() { - config["root"] = "foo/" - }) - - It("sanitizes the root path", func() { - Expect(o.Root).To(Equal("foo")) - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go deleted file mode 100644 index b1153674ff..0000000000 --- a/pkg/storage/utils/decomposedfs/recycle.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "os" - "path" - "path/filepath" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// Recycle items are stored inside the node folder and start with the uuid of the deleted node. -// The `.T.` indicates it is a trash item and what follows is the timestamp of the deletion. -// The deleted file is kept in the same location/dir as the original node. This prevents deletes -// from triggering cross storage moves when the trash is accidentally stored on another partition, -// because the admin mounted a different partition there. -// TODO For an efficient listing of deleted nodes the ocis storages trash folder should have -// contain a directory with symlinks to trash files for every userid/"root" - -// ListRecycle returns the list of available recycle items. -func (fs *Decomposedfs) ListRecycle(ctx context.Context, basePath, key, relativePath string) ([]*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - - items := make([]*provider.RecycleItem, 0) - - // TODO how do we check if the storage allows listing the recycle for the current user? check owner of the root of the storage? - // use permissions ReadUserPermissions? - if fs.o.EnableHome { - if !node.OwnerPermissions().ListContainer { - log.Debug().Msg("owner not allowed to list trash") - return items, errtypes.PermissionDenied("owner not allowed to list trash") - } - } else { - if !node.NoPermissions().ListContainer { - log.Debug().Msg("default permissions prevent listing trash") - return items, errtypes.PermissionDenied("default permissions prevent listing trash") - } - } - - if key == "" && relativePath == "/" { - return fs.listTrashRoot(ctx) - } - - trashRoot := fs.getRecycleRoot(ctx) - f, err := os.Open(filepath.Join(trashRoot, key, relativePath)) - if err != nil { - if os.IsNotExist(err) { - return items, nil - } - return nil, errors.Wrapf(err, "tree: error listing %s", trashRoot) - } - defer f.Close() - - parentNode, err := os.Readlink(filepath.Join(trashRoot, key)) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Msg("error reading trash link, skipping") - return nil, err - } - - if md, err := f.Stat(); err != nil { - return nil, err - } else if !md.IsDir() { - // this is the case when we want to directly list a file in the trashbin - item, err := fs.createTrashItem(ctx, parentNode, filepath.Dir(relativePath), filepath.Join(trashRoot, key, relativePath)) - if err != nil { - return items, err - } - items = append(items, item) - return items, err - } - - names, err := f.Readdirnames(0) - if err != nil { - return nil, err - } - for i := range names { - if item, err := fs.createTrashItem(ctx, parentNode, relativePath, filepath.Join(trashRoot, key, relativePath, names[i])); err == nil { - items = append(items, item) - } - } - return items, nil -} - -func (fs *Decomposedfs) createTrashItem(ctx context.Context, parentNode, intermediatePath, itemPath string) (*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - trashRoot := fs.getRecycleRoot(ctx) - trashnode, err := os.Readlink(itemPath) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Msg("error reading trash link, skipping") - return nil, err - } - parts := strings.SplitN(filepath.Base(parentNode), ".T.", 2) - if len(parts) != 2 { - log.Error().Str("trashRoot", trashRoot).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") - return nil, errors.New("malformed trash link") - } - - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) - md, err := os.Stat(nodePath) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("trashnode", trashnode).Msg("could not stat trash item, skipping") - return nil, err - } - - item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: path.Join(parts[0], intermediatePath, filepath.Base(itemPath)), - } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { - item.DeletionTime = &types.Timestamp{ - Seconds: uint64(deletionTime.Unix()), - // TODO nanos - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") - } - - // lookup origin path in extended attributes - parentPath := fs.lu.InternalPath(filepath.Base(parentNode)) - if attrBytes, err := xattr.Get(parentPath, xattrs.TrashOriginAttr); err == nil { - item.Ref = &provider.Reference{Path: filepath.Join(string(attrBytes), intermediatePath, filepath.Base(itemPath))} - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("link", trashnode).Msg("could not read origin path, skipping") - return nil, err - } - // TODO filter results by permission ... on the original parent? or the trashed node? - // if it were on the original parent it would be possible to see files that were trashed before the current user got access - // so -> check the trash node itself - // hmm listing trash currently lists the current users trash or the 'root' trash. from ocs only the home storage is queried for trash items. - // for now we can only really check if the current user is the owner - if attrBytes, err := xattr.Get(nodePath, xattrs.OwnerIDAttr); err == nil { - if fs.o.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - if u.Id.OpaqueId != string(attrBytes) { - log.Warn().Str("trashRoot", trashRoot).Str("link", trashnode).Msg("trash item not owned by current user, skipping") - // continue - return nil, errors.New("trash item not owned by current user") - } - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("link", trashnode).Msg("could not read owner, skipping") - return nil, err - } - - return item, nil -} - -func (fs *Decomposedfs) listTrashRoot(ctx context.Context) ([]*provider.RecycleItem, error) { - log := appctx.GetLogger(ctx) - items := make([]*provider.RecycleItem, 0) - - trashRoot := fs.getRecycleRoot(ctx) - f, err := os.Open(trashRoot) - if err != nil { - if os.IsNotExist(err) { - return items, nil - } - return nil, errors.Wrap(err, "tree: error listing "+trashRoot) - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - return nil, err - } - - for i := range names { - trashnode, err := os.Readlink(filepath.Join(trashRoot, names[i])) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Msg("error reading trash link, skipping") - continue - } - parts := strings.SplitN(filepath.Base(trashnode), ".T.", 2) - if len(parts) != 2 { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") - continue - } - - nodePath := fs.lu.InternalPath(filepath.Base(trashnode)) - md, err := os.Stat(nodePath) - if err != nil { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode). /*.Interface("parts", parts)*/ Msg("could not stat trash item, skipping") - continue - } - - item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: parts[0], - } - if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { - item.DeletionTime = &types.Timestamp{ - Seconds: uint64(deletionTime.Unix()), - // TODO nanos - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") - } - - // lookup origin path in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(nodePath, xattrs.TrashOriginAttr); err == nil { - item.Ref = &provider.Reference{Path: string(attrBytes)} - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read origin path, skipping") - continue - } - // TODO filter results by permission ... on the original parent? or the trashed node? - // if it were on the original parent it would be possible to see files that were trashed before the current user got access - // so -> check the trash node itself - // hmm listing trash currently lists the current users trash or the 'root' trash. from ocs only the home storage is queried for trash items. - // for now we can only really check if the current user is the owner - if attrBytes, err = xattr.Get(nodePath, xattrs.OwnerIDAttr); err == nil { - if fs.o.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - if u.Id.OpaqueId != string(attrBytes) { - log.Warn().Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("trash item not owned by current user, skipping") - continue - } - } - } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read owner, skipping") - continue - } - - items = append(items, item) - } - return items, nil -} - -// RestoreRecycleItem restores the specified item. -func (fs *Decomposedfs) RestoreRecycleItem(ctx context.Context, basePath, key, relativePath string, restoreRef *provider.Reference) error { - if restoreRef == nil { - restoreRef = &provider.Reference{} - } - rn, p, restoreFunc, err := fs.tp.RestoreRecycleItemFunc(ctx, key, relativePath, restoreRef.Path) - if err != nil { - return err - } - - // check permissions of deleted node - ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { - return rp.RestoreRecycleItem - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(key) - } - - ps, err := fs.p.AssemblePermissions(ctx, p) - if err != nil { - return errtypes.InternalError(err.Error()) - } - - // share receiver cannot restore to a shared resource to which she does not have write permissions. - if !ps.InitiateFileUpload { - return errtypes.PermissionDenied(key) - } - - // Run the restore func - return restoreFunc() -} - -// PurgeRecycleItem purges the specified item. -func (fs *Decomposedfs) PurgeRecycleItem(ctx context.Context, basePath, key, relativePath string) error { - rn, purgeFunc, err := fs.tp.PurgeRecycleItemFunc(ctx, key, relativePath) - if err != nil { - return err - } - - // check permissions of deleted node - ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { - return rp.PurgeRecycle - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(key) - } - - // Run the purge func - return purgeFunc() -} - -// EmptyRecycle empties the trash. -func (fs *Decomposedfs) EmptyRecycle(ctx context.Context) error { - u, ok := ctxpkg.ContextGetUser(ctx) - // TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node? - // The current impl will wipe your own trash. or when no user provided the trash of 'root' - if !ok { - return os.RemoveAll(fs.getRecycleRoot(ctx)) - } - - // TODO use layout, see Tree.Delete() for problem - return os.RemoveAll(filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId)) -} - -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} - -func (fs *Decomposedfs) getRecycleRoot(ctx context.Context) string { - if fs.o.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - // TODO use layout, see Tree.Delete() for problem - return filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId) - } - return filepath.Join(fs.o.Root, "trash", "root") -} diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go deleted file mode 100644 index 84c00bee58..0000000000 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/pkg/errors" -) - -// Revision entries are stored inside the node folder and start with the same uuid as the current version. -// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions -// can be kept in the same location as the current file content. This prevents new fileuploads -// to trigger cross storage moves when revisions accidentally are stored on another partition, -// because the admin mounted a different partition there. -// We can add a background process to move old revisions to a slower storage -// and replace the revision file with a symbolic link in the future, if necessary. - -// ListRevisions lists the revisions of the given resource. -func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { - var n *node.Node - if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { - return - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.ListFileVersions - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - revisions = []*provider.FileVersion{} - np := n.InternalPath() - if items, err := filepath.Glob(np + ".REV.*"); err == nil { - for i := range items { - if fi, err := os.Stat(items[i]); err == nil { - mtime := fi.ModTime() - rev := &provider.FileVersion{ - Key: filepath.Base(items[i]), - Mtime: uint64(mtime.Unix()), - } - blobSize, err := node.ReadBlobSizeAttr(items[i]) - if err != nil { - return nil, errors.Wrapf(err, "error reading blobsize xattr") - } - rev.Size = uint64(blobSize) - etag, err := node.CalculateEtag(np, mtime) - if err != nil { - return nil, errors.Wrapf(err, "error calculating etag") - } - rev.Etag = etag - revisions = append(revisions, rev) - } - } - } - return -} - -// DownloadRevision returns a reader for the specified revision. -func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - log := appctx.GetLogger(ctx) - - // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) - if len(kp) != 2 { - log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") - return nil, errtypes.NotFound(revisionKey) - } - log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") - - // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) - if err != nil { - return nil, err - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return nil, err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - // TODO add explicit permission in the CS3 api? - return rp.ListFileVersions && rp.RestoreFileVersion && rp.InitiateFileDownload - }) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - contentPath := fs.lu.InternalPath(revisionKey) - - r, err := os.Open(contentPath) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(contentPath) - } - return nil, errors.Wrap(err, "decomposedfs: error opening revision "+revisionKey) - } - return r, nil -} - -// RestoreRevision restores the specified revision of the resource. -func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (err error) { - log := appctx.GetLogger(ctx) - - // verify revision key format - kp := strings.SplitN(revisionKey, ".REV.", 2) - if len(kp) != 2 { - log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") - return errtypes.NotFound(revisionKey) - } - - // check if the node is available and has not been deleted - n, err := node.ReadNode(ctx, fs.lu, kp[0]) - if err != nil { - return err - } - if !n.Exists { - err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) - return err - } - - ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.RestoreFileVersion - }) - switch { - case err != nil: - return errtypes.InternalError(err.Error()) - case !ok: - return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - // move current version to new revision - nodePath := fs.lu.InternalPath(kp[0]) - var fi os.FileInfo - if fi, err = os.Stat(nodePath); err == nil { - // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := fs.lu.InternalPath(kp[0] + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) - - err = os.Rename(nodePath, versionsPath) - if err != nil { - return - } - - // copy old revision to current location - - revisionPath := fs.lu.InternalPath(revisionKey) - - if err = os.Rename(revisionPath, nodePath); err != nil { - return - } - - return fs.tp.Propagate(ctx, n) - } - - log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("original node does not exist") - return -} diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go deleted file mode 100644 index d3295781af..0000000000 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "fmt" - "math" - "os" - "path/filepath" - "strconv" - "strings" - - userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - permissionsv1beta1 "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" - v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - ocsconv "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -const ( - spaceTypeAny = "*" - spaceIDAny = "*" -) - -// CreateStorageSpace creates a storage space. -func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - // spaces will be located by default in the root of the storage. - r, err := fs.lu.RootNode(ctx) - if err != nil { - return nil, err - } - - // "everything is a resource" this is the unique ID for the Space resource. - spaceID := uuid.New().String() - - n, err := r.Child(ctx, spaceID) - if err != nil { - return nil, err - } - - if n.Exists { - return nil, fmt.Errorf("decomposedfs: spaces: invalid duplicated node with id `%s`", n.ID) - } - - if err := fs.tp.CreateDir(ctx, n); err != nil { - return nil, err - } - - // always enable propagation on the storage space root - nodePath := n.InternalPath() - // mark the space root node as the end of propagation - if err = xattr.Set(nodePath, xattrs.PropagationAttr, []byte("1")); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") - return nil, err - } - - if err := fs.createHiddenSpaceFolder(ctx, n); err != nil { - return nil, err - } - - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, fmt.Errorf("decomposedfs: spaces: contextual user not found") - } - - if err := n.ChangeOwner(u.Id); err != nil { - return nil, err - } - - err = fs.createStorageSpace(ctx, req.Type, n.ID) - if err != nil { - return nil, err - } - - if q := req.GetQuota(); q != nil { - // set default space quota - if err := n.SetMetadata(xattrs.QuotaAttr, strconv.FormatUint(q.QuotaMaxBytes, 10)); err != nil { - return nil, err - } - } - - if err := n.SetMetadata(xattrs.SpaceNameAttr, req.Name); err != nil { - return nil, err - } - - resp := &provider.CreateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_OK, - }, - StorageSpace: &provider.StorageSpace{ - Owner: u, - Id: &provider.StorageSpaceId{ - OpaqueId: spaceID, - }, - // TODO we have to omit Root information because the storage driver does not know its mount point. - // Root: &provider.ResourceId{ - // StorageId: "", - // OpaqueId: "", - // }, - Name: req.GetName(), - Quota: req.GetQuota(), - SpaceType: req.GetType(), - }, - } - - nPath, err := fs.lu.Path(ctx, n) - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: spaces: could not create space. invalid node path") - } - - ctx = context.WithValue(ctx, SpaceGrant, struct{}{}) - - if err := fs.AddGrant(ctx, &provider.Reference{ - Path: nPath, - }, &provider.Grant{ - Grantee: &provider.Grantee{ - Type: provider.GranteeType_GRANTEE_TYPE_USER, - Id: &provider.Grantee_UserId{ - UserId: u.Id, - }, - }, - Permissions: ocsconv.NewManagerRole().CS3ResourcePermissions(), - }); err != nil { - return nil, err - } - - return resp, nil -} - -// ListStorageSpaces returns a list of StorageSpaces. -// The list can be filtered by space type or space id. -// Spaces are persisted with symlinks in /spaces// pointing to ../../nodes/, the root node of the space -// The spaceid is a concatenation of storageid + "!" + nodeid. -func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { - // TODO check filters - - // TODO when a space symlink is broken delete the space for cleanup - // read permissions are deduced from the node? - - // TODO for absolute references this actually requires us to move all user homes into a subfolder of /nodes/root, - // e.g. /nodes/root/ otherwise storage space names might collide even though they are of different types - // /nodes/root/personal/foo and /nodes/root/shares/foo might be two very different spaces, a /nodes/root/foo is not expressive enough - // we would not need /nodes/root if access always happened via spaceid+relative path - - var ( - spaceType = spaceTypeAny - spaceID = spaceIDAny - ) - - for i := range filter { - switch filter[i].Type { - case provider.ListStorageSpacesRequest_Filter_TYPE_SPACE_TYPE: - spaceType = filter[i].GetSpaceType() - case provider.ListStorageSpacesRequest_Filter_TYPE_ID: - parts := strings.SplitN(filter[i].GetId().OpaqueId, "!", 2) - if len(parts) == 2 { - spaceID = parts[1] - } - } - } - - // build the glob path, eg. - // /path/to/root/spaces/personal/nodeid - // /path/to/root/spaces/shared/nodeid - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceType, spaceID)) - if err != nil { - return nil, err - } - - spaces := make([]*provider.StorageSpace, 0, len(matches)) - - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Msg("expected user in context") - return spaces, nil - } - - client, err := pool.GetGatewayServiceClient(pool.Endpoint(fs.o.GatewayAddr)) - if err != nil { - return nil, err - } - - checkRes, err := client.CheckPermission(ctx, &permissionsv1beta1.CheckPermissionRequest{ - Permission: "list-all-spaces", - SubjectRef: &permissionsv1beta1.SubjectReference{ - Spec: &permissionsv1beta1.SubjectReference_UserId{ - UserId: u.Id, - }, - }, - }) - if err != nil { - return nil, err - } - - canListAllSpaces := false - if checkRes.Status.Code == v1beta11.Code_CODE_OK { - canListAllSpaces = true - } - - for i := range matches { - // always read link in case storage space id != node id - if target, err := os.Readlink(matches[i]); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[i]).Msg("could not read link, skipping") - continue - } else { - n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("id", filepath.Base(target)).Msg("could not read node, skipping") - continue - } - - spaceType := filepath.Base(filepath.Dir(matches[i])) - - owner, err := n.Owner() - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not read owner, skipping") - continue - } - - if spaceType == "share" && utils.UserEqual(u.Id, owner) { - // do not list shares as spaces for the owner - continue - } - - // TODO apply more filters - space, err := fs.storageSpaceFromNode(ctx, n, matches[i], spaceType, canListAllSpaces) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not convert to storage space") - continue - } - spaces = append(spaces, space) - } - } - - return spaces, nil -} - -// UpdateStorageSpace updates a storage space. -func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { - space := req.StorageSpace - - _, spaceID, err := utils.SplitStorageSpaceID(space.Id.OpaqueId) - if err != nil { - return nil, err - } - - matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceTypeAny, spaceID)) - if err != nil { - return nil, err - } - - if len(matches) != 1 { - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{ - Code: v1beta11.Code_CODE_NOT_FOUND, - Message: fmt.Sprintf("update space failed: found %d matching spaces", len(matches)), - }, - }, nil - } - - target, err := os.Readlink(matches[0]) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[0]).Msg("could not read link, skipping") - } - - node, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) - if err != nil { - return nil, err - } - - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return nil, fmt.Errorf("decomposedfs: spaces: contextual user not found") - } - space.Owner = u - - if space.Name != "" { - if err := node.SetMetadata(xattrs.SpaceNameAttr, space.Name); err != nil { - return nil, err - } - } - - if space.Quota != nil { - if err := node.SetMetadata(xattrs.QuotaAttr, strconv.FormatUint(space.Quota.QuotaMaxBytes, 10)); err != nil { - return nil, err - } - } - - return &provider.UpdateStorageSpaceResponse{ - Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, - StorageSpace: space, - }, nil -} - -// createHiddenSpaceFolder bootstraps a storage space root with a hidden ".space" folder used to store space related -// metadata such as a description or an image. -// Internally createHiddenSpaceFolder leverages the use of node.Child() to create a new node under the space root. -// createHiddenSpaceFolder is just a contextual alias for node.Child() for ".spaces". -func (fs *Decomposedfs) createHiddenSpaceFolder(ctx context.Context, r *node.Node) error { - hiddenSpace, err := r.Child(ctx, ".space") - if err != nil { - return err - } - - return fs.tp.CreateDir(ctx, hiddenSpace) -} - -func (fs *Decomposedfs) createStorageSpace(ctx context.Context, spaceType, nodeID string) error { - // create space type dir - if err := os.MkdirAll(filepath.Join(fs.o.Root, "spaces", spaceType), 0700); err != nil { - return err - } - - // we can reuse the node id as the space id - err := os.Symlink("../../nodes/"+nodeID, filepath.Join(fs.o.Root, "spaces", spaceType, nodeID)) - if err != nil { - if isAlreadyExists(err) { - appctx.GetLogger(ctx).Debug().Err(err).Str("node", nodeID).Str("spacetype", spaceType).Msg("symlink already exists") - } else { - // TODO how should we handle error cases here? - appctx.GetLogger(ctx).Error().Err(err).Str("node", nodeID).Str("spacetype", spaceType).Msg("could not create symlink") - } - } - - return nil -} - -func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, node *node.Node, nodePath, spaceType string, canListAllSpaces bool) (*provider.StorageSpace, error) { - owner, err := node.Owner() - if err != nil { - return nil, err - } - - // TODO apply more filters - - sname, err := xattr.Get(node.InternalPath(), xattrs.SpaceNameAttr) - if err != nil { - return nil, err - } - space := &provider.StorageSpace{ - // FIXME the driver should know its id move setting the spaceid from the storage provider to the drivers - //Id: &provider.StorageSpaceId{OpaqueId: "1284d238-aa92-42ce-bdc4-0b0000009157!" + n.ID}, - Root: &provider.ResourceId{ - // FIXME the driver should know its id move setting the spaceid from the storage provider to the drivers - //StorageId: "1284d238-aa92-42ce-bdc4-0b0000009157", - OpaqueId: node.ID, - }, - Name: string(sname), - SpaceType: spaceType, - // Mtime is set either as node.tmtime or as fi.mtime below - } - - user := ctxpkg.ContextMustGetUser(ctx) - - // filter out spaces user cannot access (currently based on stat permission) - if !canListAllSpaces { - p, err := node.ReadUserPermissions(ctx, user) - if err != nil { - return nil, err - } - if !p.Stat { - return nil, errors.New("user is not allowed to Stat the space") - } - } - - space.Owner = &userv1beta1.User{ // FIXME only return a UserID, not a full blown user object - Id: owner, - } - - // we set the space mtime to the root item mtime - // override the stat mtime with a tmtime if it is present - if tmt, err := node.GetTMTime(); err == nil { - un := tmt.UnixNano() - space.Mtime = &types.Timestamp{ - Seconds: uint64(un / 1000000000), - Nanos: uint32(un % 1000000000), - } - } else if fi, err := os.Stat(nodePath); err == nil { - // fall back to stat mtime - un := fi.ModTime().UnixNano() - space.Mtime = &types.Timestamp{ - Seconds: uint64(un / 1000000000), - Nanos: uint32(un % 1000000000), - } - } - - // quota - v, err := xattr.Get(nodePath, xattrs.QuotaAttr) - if err == nil { - // make sure we have a proper signed int - // we use the same magic numbers to indicate: - // -1 = uncalculated - // -2 = unknown - // -3 = unlimited - if quota, err := strconv.ParseUint(string(v), 10, 64); err == nil { - space.Quota = &provider.Quota{ - QuotaMaxBytes: quota, - QuotaMaxFiles: math.MaxUint64, // TODO MaxUInt64? = unlimited? why even max files? 0 = unlimited? - } - } else { - return nil, err - } - } - - return space, nil -} diff --git a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go deleted file mode 100644 index a80f3e3755..0000000000 --- a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package helpers - -import ( - "context" - "os" - "path/filepath" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ruser "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" - treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/tests/helpers" - "github.com/google/uuid" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" -) - -// TestEnv represents a test environment for unit tests. -type TestEnv struct { - Root string - Fs storage.FS - Tree *tree.Tree - Permissions *mocks.PermissionsChecker - Blobstore *treemocks.Blobstore - Owner *userpb.User - Lookup *decomposedfs.Lookup - Ctx context.Context -} - -// NewTestEnv prepares a test environment on disk -// The storage contains some directories and a file: -// -// /dir1/ -// /dir1/file1 -// /dir1/subdir1/ -func NewTestEnv() (*TestEnv, error) { - tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") - if err != nil { - return nil, err - } - - config := map[string]interface{}{ - "root": tmpRoot, - "enable_home": true, - "treetime_accounting": true, - "treesize_accounting": true, - "share_folder": "/Shares", - "user_layout": "{{.Id.OpaqueId}}", - } - o, err := options.New(config) - if err != nil { - return nil, err - } - - owner := &userpb.User{ - Id: &userpb.UserId{ - Idp: "idp", - OpaqueId: "userid", - Type: userpb.UserType_USER_TYPE_PRIMARY, - }, - Username: "username", - } - lookup := &decomposedfs.Lookup{Options: o} - permissions := &mocks.PermissionsChecker{} - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Times(3) // Permissions required for setup below - bs := &treemocks.Blobstore{} - tree := tree.New(o.Root, true, true, lookup, bs) - fs, err := decomposedfs.New(o, lookup, permissions, tree) - if err != nil { - return nil, err - } - ctx := ruser.ContextSetUser(context.Background(), owner) - - env := &TestEnv{ - Root: tmpRoot, - Fs: fs, - Tree: tree, - Lookup: lookup, - Permissions: permissions, - Blobstore: bs, - Owner: owner, - Ctx: ctx, - } - - // Create home - err = fs.CreateHome(ctx) - if err != nil { - return nil, err - } - - // the space name attribute is the stop condition in the lookup - h, err := lookup.HomeNode(ctx) - if err != nil { - return nil, err - } - if err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")); err != nil { - return nil, err - } - - // Create dir1 - dir1, err := env.CreateTestDir("/dir1") - if err != nil { - return nil, err - } - - // Create file1 in dir1 - _, err = env.CreateTestFile("file1", "file1-blobid", 1234, dir1.ID) - if err != nil { - return nil, err - } - - // Create subdir1 in dir1 - err = fs.CreateDir(ctx, &providerv1beta1.Reference{Path: "/dir1/subdir1"}) - if err != nil { - return nil, err - } - - dir2, err := dir1.Child(ctx, "subdir1") - if err != nil { - return nil, err - } - // Create file1 in dir1 - _, err = env.CreateTestFile("file2", "file2-blobid", 12345, dir2.ID) - if err != nil { - return nil, err - } - - // Create emptydir - err = fs.CreateDir(ctx, &providerv1beta1.Reference{Path: "/emptydir"}) - if err != nil { - return nil, err - } - - return env, nil -} - -// Cleanup removes all files from disk. -func (t *TestEnv) Cleanup() { - os.RemoveAll(t.Root) -} - -// CreateTestDir create a directory and returns a corresponding Node. -func (t *TestEnv) CreateTestDir(name string) (*node.Node, error) { - err := t.Fs.CreateDir(t.Ctx, &providerv1beta1.Reference{Path: name}) - if err != nil { - return nil, err - } - n, err := t.Lookup.NodeFromPath(t.Ctx, name, false) - if err != nil { - return nil, err - } - - return n, nil -} - -// CreateTestFile creates a new file and its metadata and returns a corresponding Node. -func (t *TestEnv) CreateTestFile(name, blobID string, blobSize int64, parentID string) (*node.Node, error) { - // Create file in dir1 - file := node.New( - uuid.New().String(), - parentID, - name, - blobSize, - blobID, - nil, - t.Lookup, - ) - _, err := os.OpenFile(file.InternalPath(), os.O_CREATE, 0700) - if err != nil { - return nil, err - } - err = file.WriteMetadata(t.Owner.Id) - if err != nil { - return nil, err - } - // Link in parent - childNameLink := filepath.Join(t.Lookup.InternalPath(file.ParentID), file.Name) - err = os.Symlink("../"+file.ID, childNameLink) - if err != nil { - return nil, err - } - - return file, err -} diff --git a/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go b/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go deleted file mode 100644 index 46c4aaa37b..0000000000 --- a/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - io "io" - - mock "github.com/stretchr/testify/mock" -) - -// Blobstore is an autogenerated mock type for the Blobstore type -type Blobstore struct { - mock.Mock -} - -// Delete provides a mock function with given fields: key -func (_m *Blobstore) Delete(key string) error { - ret := _m.Called(key) - - var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Download provides a mock function with given fields: key -func (_m *Blobstore) Download(key string) (io.ReadCloser, error) { - ret := _m.Called(key) - - var r0 io.ReadCloser - if rf, ok := ret.Get(0).(func(string) io.ReadCloser); ok { - r0 = rf(key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(io.ReadCloser) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Upload provides a mock function with given fields: key, reader -func (_m *Blobstore) Upload(key string, reader io.Reader) error { - ret := _m.Called(key, reader) - - var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader) error); ok { - r0 = rf(key, reader) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go deleted file mode 100644 index 74f8edd62a..0000000000 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package tree - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" - "github.com/rs/zerolog/log" -) - -// go:generate mockery -name Blobstore - -const ( - spaceTypePersonal = "personal" - spaceTypeShare = "share" -) - -// Blobstore defines an interface for storing blobs in a blobstore. -type Blobstore interface { - Upload(key string, reader io.Reader) error - Download(key string) (io.ReadCloser, error) - Delete(key string) error -} - -// PathLookup defines the interface for the lookup component. -type PathLookup interface { - NodeFromPath(ctx context.Context, fn string, followReferences bool) (*node.Node, error) - NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) - RootNode(ctx context.Context) (node *node.Node, err error) - HomeOrRootNode(ctx context.Context) (node *node.Node, err error) - - InternalRoot() string - InternalPath(ID string) string - Path(ctx context.Context, n *node.Node) (path string, err error) - ShareFolder() string -} - -// Tree manages a hierarchical tree. -type Tree struct { - lookup PathLookup - blobstore Blobstore - - root string - treeSizeAccounting bool - treeTimeAccounting bool -} - -// PermissionCheckFunc defined a function used to check resource permissions. -type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool - -// New returns a new instance of Tree. -func New(root string, tta bool, tsa bool, lu PathLookup, bs Blobstore) *Tree { - return &Tree{ - lookup: lu, - blobstore: bs, - root: root, - treeTimeAccounting: tta, - treeSizeAccounting: tsa, - } -} - -// Setup prepares the tree structure. -func (t *Tree) Setup(owner *userpb.UserId, propagateToRoot bool) error { - // create data paths for internal layout - dataPaths := []string{ - filepath.Join(t.root, "nodes"), - // notes contain symlinks from nodes//uploads/ to ../../uploads/ - // better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir - filepath.Join(t.root, "uploads"), - filepath.Join(t.root, "trash"), - } - for _, v := range dataPaths { - err := os.MkdirAll(v, 0700) - if err != nil { - return err - } - } - - // the root node has an empty name - // the root node has no parent - n := node.New("root", "", "", 0, "", nil, t.lookup) - err := t.createNode(n, owner) - if err != nil { - return err - } - - // set propagation flag - v := []byte("0") - if propagateToRoot { - v = []byte("1") - } - if err = xattr.Set(n.InternalPath(), xattrs.PropagationAttr, v); err != nil { - return err - } - - // create spaces folder and iterate over existing nodes to populate it - spacesPath := filepath.Join(t.root, "spaces") - fi, err := os.Stat(spacesPath) - if os.IsNotExist(err) { - // create personal spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypePersonal), 0700); err != nil { - return err - } - // create share spaces dir - if err := os.MkdirAll(filepath.Join(spacesPath, spaceTypeShare), 0700); err != nil { - return err - } - - f, err := os.Open(filepath.Join(t.root, "nodes")) - if err != nil { - return err - } - nodes, err := f.Readdir(0) - if err != nil { - return err - } - - for i := range nodes { - nodePath := filepath.Join(t.root, "nodes", nodes[i].Name()) - - // is it a user root? -> create personal space - if isRootNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypePersonal, nodes[i].Name(), nodes[i].Name()) - } - - // is it a shared node? -> create share space - if isSharedNode(nodePath) { - // we can reuse the node id as the space id - t.linkSpace(spaceTypeShare, nodes[i].Name(), nodes[i].Name()) - } - } - } else if !fi.IsDir() { - // check if it is a directory - return fmt.Errorf("%s is not a directory", spacesPath) - } - - return nil -} - -// linkSpace creates a new symbolic link for a space with the given type st, and node id. -func (t *Tree) linkSpace(spaceType, spaceID, nodeID string) { - spacesPath := filepath.Join(t.root, "spaces", spaceType, spaceID) - expectedTarget := "../../nodes/" + nodeID - linkTarget, err := os.Readlink(spacesPath) - if errors.Is(err, os.ErrNotExist) { - err = os.Symlink(expectedTarget, spacesPath) - if err != nil { - logger.New().Error().Err(err). - Str("space_type", spaceType). - Str("space", spaceID). - Str("node", nodeID). - Msg("could not create symlink") - } - } else { - if err != nil { - logger.New().Error().Err(err). - Str("space_type", spaceType). - Str("space", spaceID). - Str("node", nodeID). - Msg("could not read symlink") - } - if linkTarget != expectedTarget { - logger.New().Warn(). - Str("space_type", spaceType). - Str("space", spaceID). - Str("node", nodeID). - Str("expected", expectedTarget). - Str("actual", linkTarget). - Msg("expected a different link target") - } - } -} - -func isRootNode(nodePath string) bool { - attrBytes, err := xattr.Get(nodePath, xattrs.ParentidAttr) - return err == nil && string(attrBytes) == "root" -} -func isSharedNode(nodePath string) bool { - if attrs, err := xattr.List(nodePath); err == nil { - for i := range attrs { - if strings.HasPrefix(attrs[i], xattrs.GrantPrefix) { - return true - } - } - } - return false -} - -// GetMD returns the metadata of a node in the tree. -func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) { - md, err := os.Stat(n.InternalPath()) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(n.ID) - } - return nil, errors.Wrap(err, "tree: error stating "+n.ID) - } - - return md, nil -} - -// CreateDir creates a new directory entry in the tree. -func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) { - if n.Exists || n.ID != "" { - return errtypes.AlreadyExists(n.ID) // path? - } - - // create a directory node - n.ID = uuid.New().String() - - // who will become the owner? the owner of the parent node, not the current user - var p *node.Node - p, err = n.Parent() - if err != nil { - return - } - var owner *userpb.UserId - owner, err = p.Owner() - if err != nil { - return - } - - err = t.createNode(n, owner) - if err != nil { - return nil - } - - // make child appear in listings - err = os.Symlink("../"+n.ID, filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name)) - if err != nil { - return - } - return t.Propagate(ctx, n) -} - -// Move replaces the target with the source. -func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) { - // if target exists delete it without trashing it - if newNode.Exists { - // TODO make sure all children are deleted - if err := os.RemoveAll(newNode.InternalPath()); err != nil { - return errors.Wrap(err, "decomposedfs: Move: error deleting target node "+newNode.ID) - } - } - - // Always target the old node ID for xattr updates. - // The new node id is empty if the target does not exist - // and we need to overwrite the new one when overwriting an existing path. - tgtPath := oldNode.InternalPath() - - // are we just renaming (parent stays the same)? - if oldNode.ParentID == newNode.ParentID { - parentPath := t.lookup.InternalPath(oldNode.ParentID) - - // rename child - err = os.Rename( - filepath.Join(parentPath, oldNode.Name), - filepath.Join(parentPath, newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "decomposedfs: could not rename child") - } - - // update name attribute - if err := xattr.Set(tgtPath, xattrs.NameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set name attribute") - } - - return t.Propagate(ctx, newNode) - } - - // we are moving the node to a new parent, any target has been removed - // bring old node to the new parent - - // rename child - err = os.Rename( - filepath.Join(t.lookup.InternalPath(oldNode.ParentID), oldNode.Name), - filepath.Join(t.lookup.InternalPath(newNode.ParentID), newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "decomposedfs: could not move child") - } - - // update target parentid and name - if err := xattr.Set(tgtPath, xattrs.ParentidAttr, []byte(newNode.ParentID)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set parentid attribute") - } - if err := xattr.Set(tgtPath, xattrs.NameAttr, []byte(newNode.Name)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set name attribute") - } - - // TODO inefficient because we might update several nodes twice, only propagate unchanged nodes? - // collect in a list, then only stat each node once - // also do this in a go routine ... webdav should check the etag async - - err = t.Propagate(ctx, oldNode) - if err != nil { - return errors.Wrap(err, "decomposedfs: Move: could not propagate old node") - } - err = t.Propagate(ctx, newNode) - if err != nil { - return errors.Wrap(err, "decomposedfs: Move: could not propagate new node") - } - return nil -} - -// ListFolder lists the content of a folder node. -func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) { - dir := n.InternalPath() - f, err := os.Open(dir) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(dir) - } - return nil, errors.Wrap(err, "tree: error listing "+dir) - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - return nil, err - } - nodes := []*node.Node{} - for i := range names { - link, err := os.Readlink(filepath.Join(dir, names[i])) - if err != nil { - // TODO log - continue - } - - n, err := node.ReadNode(ctx, t.lookup, filepath.Base(link)) - if err != nil { - // TODO log - continue - } - nodes = append(nodes, n) - } - return nodes, nil -} - -// Delete deletes a node in the tree by moving it to the trash. -func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { - deletingSharedResource := ctx.Value(appctx.DeletingSharedResource) - - if deletingSharedResource != nil && deletingSharedResource.(bool) { - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) - return os.Remove(src) - } - // Prepare the trash - // TODO use layout?, but it requires resolving the owners user if the username is used instead of the id. - // the node knows the owner id so we use that for now - o, err := n.Owner() - if err != nil { - return - } - if o.OpaqueId == "" { - // fall back to root trash - o.OpaqueId = "root" - } - err = os.MkdirAll(filepath.Join(t.root, "trash", o.OpaqueId), 0700) - if err != nil { - return - } - - // get the original path - origin, err := t.lookup.Path(ctx, n) - if err != nil { - return - } - - // set origin location in metadata - nodePath := n.InternalPath() - if err := xattr.Set(nodePath, xattrs.TrashOriginAttr, []byte(origin)); err != nil { - return err - } - - deletionTime := time.Now().UTC().Format(time.RFC3339Nano) - - // first make node appear in the owners (or root) trash - // parent id and name are stored as extended attributes in the node itself - trashLink := filepath.Join(t.root, "trash", o.OpaqueId, n.ID) - err = os.Symlink("../../nodes/"+n.ID+".T."+deletionTime, trashLink) - if err != nil { - // To roll back changes - // TODO unset trashOriginAttr - return - } - - // at this point we have a symlink pointing to a non existing destination, which is fine - - // rename the trashed node so it is not picked up when traversing up the tree and matches the symlink - trashPath := nodePath + ".T." + deletionTime - err = os.Rename(nodePath, trashPath) - if err != nil { - // To roll back changes - // TODO remove symlink - // TODO unset trashOriginAttr - return - } - - // finally remove the entry from the parent dir - src := filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name) - err = os.Remove(src) - if err != nil { - // To roll back changes - // TODO revert the rename - // TODO remove symlink - // TODO unset trashOriginAttr - return - } - - return t.Propagate(ctx, n) -} - -// RestoreRecycleItemFunc returns a node and a function to restore it from the trash. -func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, key, trashPath, restorePath string) (*node.Node, *node.Node, func() error, error) { - rn, trashItem, deletedNodePath, origin, err := t.readRecycleItem(ctx, key, trashPath) - if err != nil { - return nil, nil, nil, err - } - - if restorePath == "" { - restorePath = origin - } - - var target *node.Node - target, err = t.lookup.NodeFromPath(ctx, restorePath, true) - if err != nil { - return nil, nil, nil, err - } - - p, err := target.Parent() - if err != nil { - return nil, nil, nil, err - } - - fn := func() error { - // link to origin - var n *node.Node - n, err = t.lookup.NodeFromPath(ctx, restorePath, true) - if err != nil { - return err - } - if n.Exists { - return errtypes.AlreadyExists("origin already exists") - } - - // add the entry for the parent dir - err = os.Symlink("../"+rn.ID, filepath.Join(t.lookup.InternalPath(n.ParentID), n.Name)) - if err != nil { - return err - } - - // rename to node only name, so it is picked up by id - nodePath := rn.InternalPath() - - // attempt to rename only if we're not in a subfolder - if deletedNodePath != nodePath { - err = os.Rename(deletedNodePath, nodePath) - if err != nil { - return err - } - } - - // the new node will inherit the permissions of its parent - p, err := n.Parent() - if err != nil { - return err - } - - po, err := p.Owner() - if err != nil { - return err - } - - if err := rn.ChangeOwner(po); err != nil { - return err - } - - n.Exists = true - // update name attribute - if err := xattr.Set(nodePath, xattrs.NameAttr, []byte(n.Name)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set name attribute") - } - - // set ParentidAttr to restorePath's node parent id - if trashPath != "" { - if err := xattr.Set(nodePath, xattrs.ParentidAttr, []byte(n.ParentID)); err != nil { - return errors.Wrap(err, "decomposedfs: could not set name attribute") - } - } - - // delete item link in trash - if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trashitem") - } - return t.Propagate(ctx, n) - } - return rn, p, fn, nil -} - -// PurgeRecycleItemFunc returns a node and a function to purge it from the trash. -func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, key string, path string) (*node.Node, func() error, error) { - rn, trashItem, deletedNodePath, _, err := t.readRecycleItem(ctx, key, path) - if err != nil { - return nil, nil, err - } - - fn := func() error { - if err := os.RemoveAll(deletedNodePath); err != nil { - log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") - return err - } - - // delete blob from blobstore - if rn.BlobID != "" { - if err = t.DeleteBlob(rn.BlobID); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item blob") - return err - } - } - - // delete item link in trash - if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") - return err - } - - return nil - } - - return rn, fn, nil -} - -// Propagate propagates changes to the root of the tree. -func (t *Tree) Propagate(ctx context.Context, n *node.Node) (err error) { - sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() - if !t.treeTimeAccounting && !t.treeSizeAccounting { - // no propagation enabled - sublog.Debug().Msg("propagation disabled") - return - } - - // is propagation enabled for the parent node? - - var root *node.Node - if n.SpaceRoot == nil { - if root, err = t.lookup.HomeOrRootNode(ctx); err != nil { - return - } - } else { - root = n.SpaceRoot - } - - // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly - sTime := time.Now().UTC() - - // we loop until we reach the root - for err == nil && n.ID != root.ID { - sublog.Debug().Msg("propagating") - - if n, err = n.Parent(); err != nil { - break - } - - sublog = sublog.With().Interface("node", n).Logger() - - // TODO none, sync and async? - if !n.HasPropagation() { - sublog.Debug().Str("attr", xattrs.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating") - // if the attribute is not set treat it as false / none / no propagation - return nil - } - - if t.treeTimeAccounting { - // update the parent tree time if it is older than the nodes mtime - updateSyncTime := false - - var tmTime time.Time - tmTime, err = n.GetTMTime() - switch { - case err != nil: - // missing attribute, or invalid format, overwrite - sublog.Debug().Err(err). - Msg("could not read tmtime attribute, overwriting") - updateSyncTime = true - case tmTime.Before(sTime): - sublog.Debug(). - Time("tmtime", tmTime). - Time("stime", sTime). - Msg("parent tmtime is older than node mtime, updating") - updateSyncTime = true - default: - sublog.Debug(). - Time("tmtime", tmTime). - Time("stime", sTime). - Dur("delta", sTime.Sub(tmTime)). - Msg("parent tmtime is younger than node mtime, not updating") - } - - if updateSyncTime { - // update the tree time of the parent node - if err = n.SetTMTime(sTime); err != nil { - sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") - } else { - sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") - } - } - - if err := n.UnsetTempEtag(); err != nil { - sublog.Error().Err(err).Msg("could not remove temporary etag attribute") - } - } - - // size accounting - if t.treeSizeAccounting { - // update the treesize if it differs from the current size - updateTreeSize := false - - var treeSize, calculatedTreeSize uint64 - calculatedTreeSize, err = calculateTreeSize(ctx, n.InternalPath()) - if err != nil { - continue - } - - treeSize, err = n.GetTreeSize() - switch { - case err != nil: - // missing attribute, or invalid format, overwrite - sublog.Debug().Err(err).Msg("could not read treesize attribute, overwriting") - updateTreeSize = true - case treeSize != calculatedTreeSize: - sublog.Debug(). - Uint64("treesize", treeSize). - Uint64("calculatedTreeSize", calculatedTreeSize). - Msg("parent treesize is different then calculated treesize, updating") - updateTreeSize = true - default: - sublog.Debug(). - Uint64("treesize", treeSize). - Uint64("calculatedTreeSize", calculatedTreeSize). - Msg("parent size matches calculated size, not updating") - } - - if updateTreeSize { - // update the tree time of the parent node - if err = n.SetTreeSize(calculatedTreeSize); err != nil { - sublog.Error().Err(err).Uint64("calculatedTreeSize", calculatedTreeSize).Msg("could not update treesize of parent node") - } else { - sublog.Debug().Uint64("calculatedTreeSize", calculatedTreeSize).Msg("updated treesize of parent node") - } - } - } - } - if err != nil { - sublog.Error().Err(err).Msg("error propagating") - return - } - return -} - -func calculateTreeSize(ctx context.Context, nodePath string) (uint64, error) { - var size uint64 - - f, err := os.Open(nodePath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not open dir") - return 0, err - } - defer f.Close() - - names, err := f.Readdirnames(0) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read dirnames") - return 0, err - } - for i := range names { - cPath := filepath.Join(nodePath, names[i]) - info, err := os.Stat(cPath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not stat child entry") - continue // continue after an error - } - if !info.IsDir() { - blobSize, err := node.ReadBlobSizeAttr(cPath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read blobSize xattr") - continue // continue after an error - } - size += uint64(blobSize) - } else { - // read from attr - var b []byte - // xattr.Get will follow the symlink - if b, err = xattr.Get(cPath, xattrs.TreesizeAttr); err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - csize, err := strconv.ParseUint(string(b), 10, 64) - if err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - size += csize - } - } - return size, err -} - -// WriteBlob writes a blob to the blobstore. -func (t *Tree) WriteBlob(key string, reader io.Reader) error { - return t.blobstore.Upload(key, reader) -} - -// ReadBlob reads a blob from the blobstore. -func (t *Tree) ReadBlob(key string) (io.ReadCloser, error) { - return t.blobstore.Download(key) -} - -// DeleteBlob deletes a blob from the blobstore. -func (t *Tree) DeleteBlob(key string) error { - if key == "" { - return fmt.Errorf("could not delete blob, empty key was given") - } - - return t.blobstore.Delete(key) -} - -// TODO check if node exists? -func (t *Tree) createNode(n *node.Node, owner *userpb.UserId) (err error) { - // create a directory node - nodePath := n.InternalPath() - if err = os.MkdirAll(nodePath, 0700); err != nil { - return errors.Wrap(err, "decomposedfs: error creating node") - } - - return n.WriteMetadata(owner) -} - -// TODO refactor the returned params into Node properties? would make all the path transformations go away... -func (t *Tree) readRecycleItem(ctx context.Context, key, path string) (n *node.Node, trashItem string, deletedNodePath string, origin string, err error) { - if key == "" { - return nil, "", "", "", errtypes.InternalError("key is empty") - } - - u := ctxpkg.ContextMustGetUser(ctx) - trashItem = filepath.Join(t.lookup.InternalRoot(), "trash", u.Id.OpaqueId, key, path) - - var link string - link, err = os.Readlink(trashItem) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - - nodeID := filepath.Base(link) - if path == "" || path == "/" { - parts := strings.SplitN(filepath.Base(link), ".T.", 2) - if len(parts) != 2 { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Interface("parts", parts).Msg("malformed trash link") - return - } - nodeID = parts[0] - } - - var attrBytes []byte - deletedNodePath = t.lookup.InternalPath(filepath.Base(link)) - - owner := &userpb.UserId{} - // lookup ownerId in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.OwnerIDAttr); err == nil { - owner.OpaqueId = string(attrBytes) - } else { - return - } - // lookup ownerIdp in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.OwnerIDPAttr); err == nil { - owner.Idp = string(attrBytes) - } else { - return - } - // lookup ownerType in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.OwnerTypeAttr); err == nil { - owner.Type = utils.UserTypeMap(string(attrBytes)) - } else { - return - } - - n = node.New(nodeID, "", "", 0, "", owner, t.lookup) - // lookup blobID in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.BlobIDAttr); err == nil { - n.BlobID = string(attrBytes) - } else { - return - } - - // lookup parent id in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.ParentidAttr); err == nil { - n.ParentID = string(attrBytes) - } else { - return - } - // lookup name in extended attributes - if attrBytes, err = xattr.Get(deletedNodePath, xattrs.NameAttr); err == nil { - n.Name = string(attrBytes) - } else { - return - } - - // get origin node - origin = "/" - - deletedNodeRootPath := deletedNodePath - if path != "" && path != "/" { - trashItemRoot := filepath.Join(t.lookup.InternalRoot(), "trash", u.Id.OpaqueId, key) - var rootLink string - rootLink, err = os.Readlink(trashItemRoot) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - deletedNodeRootPath = t.lookup.InternalPath(filepath.Base(rootLink)) - } - // lookup origin path in extended attributes - if attrBytes, err = xattr.Get(deletedNodeRootPath, xattrs.TrashOriginAttr); err == nil { - origin = filepath.Join(string(attrBytes), path) - } else { - log.Error().Err(err).Str("trashItem", trashItem).Str("link", link).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") - } - return -} diff --git a/pkg/storage/utils/decomposedfs/tree/tree_suite_test.go b/pkg/storage/utils/decomposedfs/tree/tree_suite_test.go deleted file mode 100644 index a811bc03fa..0000000000 --- a/pkg/storage/utils/decomposedfs/tree/tree_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package tree_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestTree(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Tree Suite") -} diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go deleted file mode 100644 index 9558608357..0000000000 --- a/pkg/storage/utils/decomposedfs/tree/tree_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package tree_test - -import ( - "os" - "path" - - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - helpers "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/testhelpers" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" -) - -var _ = Describe("Tree", func() { - var ( - env *helpers.TestEnv - - t *tree.Tree - ) - - JustBeforeEach(func() { - var err error - env, err = helpers.NewTestEnv() - Expect(err).ToNot(HaveOccurred()) - t = env.Tree - }) - - AfterEach(func() { - if env != nil { - env.Cleanup() - } - }) - - Context("with an existingfile", func() { - var ( - n *node.Node - originalPath = "dir1/file1" - ) - - JustBeforeEach(func() { - var err error - n, err = env.Lookup.NodeFromPath(env.Ctx, originalPath, false) - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("Delete", func() { - JustBeforeEach(func() { - _, err := os.Stat(n.InternalPath()) - Expect(err).ToNot(HaveOccurred()) - - Expect(t.Delete(env.Ctx, n)).To(Succeed()) - - _, err = os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - }) - - It("moves the file to the trash", func() { - trashPath := path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - }) - - It("removes the file from its original location", func() { - _, err := os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - }) - - It("sets the trash origin xattr", func() { - trashPath := path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) - attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) - Expect(err).ToNot(HaveOccurred()) - Expect(string(attr)).To(Equal("/dir1/file1")) - }) - - It("does not delete the blob from the blobstore", func() { - env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - - Context("that was deleted", func() { - var ( - trashPath string - ) - - JustBeforeEach(func() { - env.Blobstore.On("Delete", n.BlobID).Return(nil) - trashPath = path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) - Expect(t.Delete(env.Ctx, n)).To(Succeed()) - }) - - Describe("PurgeRecycleItemFunc", func() { - JustBeforeEach(func() { - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - - _, purgeFunc, err := t.PurgeRecycleItemFunc(env.Ctx, n.ID, "") - Expect(err).ToNot(HaveOccurred()) - Expect(purgeFunc()).To(Succeed()) - }) - - It("removes the file from the trash", func() { - _, err := os.Stat(trashPath) - Expect(err).To(HaveOccurred()) - }) - - It("deletes the blob from the blobstore", func() { - env.Blobstore.AssertCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - - Describe("RestoreRecycleItemFunc", func() { - JustBeforeEach(func() { - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - _, err = os.Stat(n.InternalPath()) - Expect(err).To(HaveOccurred()) - }) - - It("restores the file to its original location if the targetPath is empty", func() { - _, _, restoreFunc, err := t.RestoreRecycleItemFunc(env.Ctx, n.ID, "", "") - Expect(err).ToNot(HaveOccurred()) - - Expect(restoreFunc()).To(Succeed()) - - originalNode, err := env.Lookup.NodeFromPath(env.Ctx, originalPath, false) - Expect(err).ToNot(HaveOccurred()) - Expect(originalNode.Exists).To(BeTrue()) - }) - - It("restores files to different locations", func() { - _, _, restoreFunc, err := t.RestoreRecycleItemFunc(env.Ctx, n.ID, "", "dir1/newLocation") - Expect(err).ToNot(HaveOccurred()) - - Expect(restoreFunc()).To(Succeed()) - - newNode, err := env.Lookup.NodeFromPath(env.Ctx, "dir1/newLocation", false) - Expect(err).ToNot(HaveOccurred()) - Expect(newNode.Exists).To(BeTrue()) - - originalNode, err := env.Lookup.NodeFromPath(env.Ctx, originalPath, false) - Expect(err).ToNot(HaveOccurred()) - Expect(originalNode.Exists).To(BeFalse()) - }) - - It("removes the file from the trash", func() { - _, _, restoreFunc, err := t.RestoreRecycleItemFunc(env.Ctx, n.ID, "", "") - Expect(err).ToNot(HaveOccurred()) - - Expect(restoreFunc()).To(Succeed()) - - _, err = os.Stat(trashPath) - Expect(err).To(HaveOccurred()) - }) - }) - }) - }) - - Context("with an empty directory", func() { - var ( - n *node.Node - ) - - JustBeforeEach(func() { - var err error - n, err = env.Lookup.NodeFromPath(env.Ctx, "emptydir", false) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("that was deleted", func() { - var ( - trashPath string - ) - - JustBeforeEach(func() { - trashPath = path.Join(env.Root, "trash", env.Owner.Id.OpaqueId, n.ID) - Expect(t.Delete(env.Ctx, n)).To(Succeed()) - }) - - Describe("PurgeRecycleItemFunc", func() { - JustBeforeEach(func() { - _, err := os.Stat(trashPath) - Expect(err).ToNot(HaveOccurred()) - - _, purgeFunc, err := t.PurgeRecycleItemFunc(env.Ctx, n.ID, "") - Expect(err).ToNot(HaveOccurred()) - Expect(purgeFunc()).To(Succeed()) - }) - - It("removes the file from the trash", func() { - _, err := os.Stat(trashPath) - Expect(err).To(HaveOccurred()) - }) - - It("does not try to delete a blob from the blobstore", func() { - env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) - }) - }) - }) - }) - - Describe("Propagate", func() { - var dir *node.Node - - JustBeforeEach(func() { - env.Permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - - // Create test dir - var err error - dir, err = env.CreateTestDir("testdir") - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("with TreeTimeAccounting enabled", func() { - It("sets the tmtime of the parent", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) - Expect(err).ToNot(HaveOccurred()) - - perms := node.OwnerPermissions() - riBefore, err := dir.AsResourceInfo(env.Ctx, &perms, []string{}, false) - Expect(err).ToNot(HaveOccurred()) - - err = env.Tree.Propagate(env.Ctx, file) - Expect(err).ToNot(HaveOccurred()) - - riAfter, err := dir.AsResourceInfo(env.Ctx, &perms, []string{}, false) - Expect(err).ToNot(HaveOccurred()) - Expect(riAfter.Etag).ToNot(Equal(riBefore.Etag)) - }) - }) - - Describe("with TreeSizeAccounting enabled", func() { - It("calculates the size", func() { - file, err := env.CreateTestFile("file1", "", 1, dir.ID) - Expect(err).ToNot(HaveOccurred()) - - err = env.Tree.Propagate(env.Ctx, file) - Expect(err).ToNot(HaveOccurred()) - size, err := dir.GetTreeSize() - Expect(err).ToNot(HaveOccurred()) - Expect(size).To(Equal(uint64(1))) - }) - - It("considers all files", func() { - _, err := env.CreateTestFile("file1", "", 1, dir.ID) - Expect(err).ToNot(HaveOccurred()) - file2, err := env.CreateTestFile("file2", "", 100, dir.ID) - Expect(err).ToNot(HaveOccurred()) - - err = env.Tree.Propagate(env.Ctx, file2) - Expect(err).ToNot(HaveOccurred()) - size, err := dir.GetTreeSize() - Expect(err).ToNot(HaveOccurred()) - Expect(size).To(Equal(uint64(101))) - }) - - It("adds the size of child directories", func() { - subdir, err := env.CreateTestDir("testdir/200bytes") - Expect(err).ToNot(HaveOccurred()) - err = subdir.SetTreeSize(uint64(200)) - Expect(err).ToNot(HaveOccurred()) - - file, err := env.CreateTestFile("file1", "", 1, dir.ID) - Expect(err).ToNot(HaveOccurred()) - - err = env.Tree.Propagate(env.Ctx, file) - Expect(err).ToNot(HaveOccurred()) - size, err := dir.GetTreeSize() - Expect(err).ToNot(HaveOccurred()) - Expect(size).To(Equal(uint64(201))) - }) - - It("stops at nodes with no propagation flag", func() { - subdir, err := env.CreateTestDir("testdir/200bytes") - Expect(err).ToNot(HaveOccurred()) - err = subdir.SetTreeSize(uint64(200)) - Expect(err).ToNot(HaveOccurred()) - - err = env.Tree.Propagate(env.Ctx, subdir) - Expect(err).ToNot(HaveOccurred()) - size, err := dir.GetTreeSize() - Expect(size).To(Equal(uint64(200))) - Expect(err).ToNot(HaveOccurred()) - - stopdir, err := env.CreateTestDir("testdir/stophere") - Expect(err).ToNot(HaveOccurred()) - err = xattr.Set(stopdir.InternalPath(), xattrs.PropagationAttr, []byte("0")) - Expect(err).ToNot(HaveOccurred()) - otherdir, err := env.CreateTestDir("testdir/stophere/lotsofbytes") - Expect(err).ToNot(HaveOccurred()) - err = otherdir.SetTreeSize(uint64(100000)) - Expect(err).ToNot(HaveOccurred()) - err = env.Tree.Propagate(env.Ctx, otherdir) - Expect(err).ToNot(HaveOccurred()) - - size, err = dir.GetTreeSize() - Expect(err).ToNot(HaveOccurred()) - Expect(size).To(Equal(uint64(200))) - }) - }) - }) -}) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go deleted file mode 100644 index 50d5c4cb38..0000000000 --- a/pkg/storage/utils/decomposedfs/upload.go +++ /dev/null @@ -1,727 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs - -import ( - "context" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "hash/adler32" - "io" - "os" - "path/filepath" - "strings" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rs/zerolog" - tusd "github.com/tus/tusd/pkg/handler" -) - -var defaultFilePerm = os.FileMode(0664) - -// Upload uploads data to the given resource -// TODO Upload (and InitiateUpload) needs a way to receive the expected checksum. -// Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { - upload, err := fs.GetUpload(ctx, ref.GetPath()) - if err != nil { - return errors.Wrap(err, "decomposedfs: error retrieving upload") - } - - uploadInfo := upload.(*fileUpload) - - p := uploadInfo.info.Storage["NodeName"] - ok, err := chunking.IsChunked(p) // check chunking v1 - if err != nil { - return errors.Wrap(err, "decomposedfs: error checking path") - } - if ok { - var assembledFile string - p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) - if err != nil { - return err - } - if p == "" { - if err = uploadInfo.Terminate(ctx); err != nil { - return errors.Wrap(err, "ocfs: error removing auxiliary files") - } - return errtypes.PartialContent(ref.String()) - } - uploadInfo.info.Storage["NodeName"] = p - fd, err := os.Open(assembledFile) - if err != nil { - return errors.Wrap(err, "decomposedfs: error opening assembled file") - } - defer fd.Close() - defer os.RemoveAll(assembledFile) - r = fd - } - - if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "decomposedfs: error writing to binary file") - } - - return uploadInfo.FinishUpload(ctx) -} - -// InitiateUpload returns upload ids corresponding to different protocols it supports -// TODO read optional content for small files in this request -// TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { - log := appctx.GetLogger(ctx) - - n, err := fs.lu.NodeFromResource(ctx, ref) - if err != nil { - return nil, err - } - - // permissions are checked in NewUpload below - - relative, err := fs.lu.Path(ctx, n) - if err != nil { - return nil, err - } - - info := tusd.FileInfo{ - MetaData: tusd.MetaData{ - "filename": filepath.Base(relative), - "dir": filepath.Dir(relative), - }, - Size: uploadLength, - Storage: map[string]string{ - "SpaceRoot": n.SpaceRoot.ID, - }, - } - - if metadata != nil { - if metadata["mtime"] != "" { - info.MetaData["mtime"] = metadata["mtime"] - } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - if metadata["checksum"] != "" { - parts := strings.SplitN(metadata["checksum"], " ", 2) - if len(parts) != 2 { - return nil, errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - switch parts[0] { - case "sha1", "md5", "adler32": - info.MetaData["checksum"] = metadata["checksum"] - default: - return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - } - } - - log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename") - - _, err = node.CheckQuota(n.SpaceRoot, uint64(info.Size)) - if err != nil { - return nil, err - } - - upload, err := fs.NewUpload(ctx, info) - if err != nil { - return nil, err - } - - info, _ = upload.GetInfo(ctx) - - return map[string]string{ - "simple": info.ID, - "tus": info.ID, - }, nil -} - -// UseIn tells the tus upload middleware which extensions it supports. -func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) { - composer.UseCore(fs) - composer.UseTerminater(fs) - composer.UseConcater(fs) - composer.UseLengthDeferrer(fs) -} - -// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol -// - the storage needs to implement NewUpload and GetUpload -// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload - -// NewUpload returns a new tus Upload instance. -func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { - log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("Decomposedfs: NewUpload") - - fn := info.MetaData["filename"] - if fn == "" { - return nil, errors.New("Decomposedfs: missing filename in metadata") - } - info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) - - dir := info.MetaData["dir"] - if dir == "" { - return nil, errors.New("Decomposedfs: missing dir in metadata") - } - info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) - - n, err := fs.lookupNode(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: error wrapping filename") - } - - log.Debug().Interface("info", info).Interface("node", n).Msg("Decomposedfs: resolved filename") - - // the parent owner will become the new owner - p, perr := n.Parent() - if perr != nil { - return nil, errors.Wrap(perr, "Decomposedfs: error getting parent "+n.ParentID) - } - - // check permissions - var ok bool - if n.Exists { - // check permissions of file to be overwritten - ok, err = fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileUpload - }) - } else { - // check permissions of parent - ok, err = fs.p.HasPermission(ctx, p, func(rp *provider.ResourcePermissions) bool { - return rp.InitiateFileUpload - }) - } - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case !ok: - return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) - } - - info.ID = uuid.New().String() - - binPath, err := fs.getUploadPath(ctx, info.ID) - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: error resolving upload path") - } - usr := ctxpkg.ContextMustGetUser(ctx) - - owner, err := p.Owner() - if err != nil { - return nil, errors.Wrap(err, "decomposedfs: error determining owner") - } - var spaceRoot string - if info.Storage != nil { - if spaceRoot, ok = info.Storage["SpaceRoot"]; !ok { - spaceRoot = n.SpaceRoot.ID - } - } else { - spaceRoot = n.SpaceRoot.ID - } - - info.Storage = map[string]string{ - "Type": "OCISStore", - "BinPath": binPath, - - "NodeId": n.ID, - "NodeParentId": n.ParentID, - "NodeName": n.Name, - "SpaceRoot": spaceRoot, - - "Idp": usr.Id.Idp, - "UserId": usr.Id.OpaqueId, - "UserType": utils.UserTypeToString(usr.Id.Type), - "UserName": usr.Username, - - "OwnerIdp": owner.Idp, - "OwnerId": owner.OpaqueId, - - "LogLevel": log.GetLevel().String(), - } - // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("Decomposedfs: built storage info") - file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return nil, err - } - defer file.Close() - - u := &fileUpload{ - info: info, - binPath: binPath, - infoPath: filepath.Join(fs.o.Root, "uploads", info.ID+".info"), - fs: fs, - ctx: ctx, - } - - // writeInfo creates the file by itself if necessary - err = u.writeInfo() - if err != nil { - return nil, err - } - - return u, nil -} - -func (fs *Decomposedfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { - return filepath.Join(fs.o.Root, "uploads", uploadID), nil -} - -// GetUpload returns the Upload for the given upload id. -func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - infoPath := filepath.Join(fs.o.Root, "uploads", id+".info") - - info := tusd.FileInfo{} - data, err := os.ReadFile(infoPath) - if err != nil { - if os.IsNotExist(err) { - // Interpret os.ErrNotExist as 404 Not Found - err = tusd.ErrNotFound - } - return nil, err - } - if err := json.Unmarshal(data, &info); err != nil { - return nil, err - } - - stat, err := os.Stat(info.Storage["BinPath"]) - if err != nil { - return nil, err - } - - info.Offset = stat.Size() - - u := &userpb.User{ - Id: &userpb.UserId{ - Idp: info.Storage["Idp"], - OpaqueId: info.Storage["UserId"], - Type: utils.UserTypeMap(info.Storage["UserType"]), - }, - Username: info.Storage["UserName"], - } - - ctx = ctxpkg.ContextSetUser(ctx, u) - // TODO configure the logger the same way ... store and add traceid in file info - - var opts []logger.Option - opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) - opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) - l := logger.New(opts...) - - sub := l.With().Int("pid", os.Getpid()).Logger() - - ctx = appctx.WithLogger(ctx, &sub) - - return &fileUpload{ - info: info, - binPath: info.Storage["BinPath"], - infoPath: infoPath, - fs: fs, - ctx: ctx, - }, nil -} - -// lookupNode looks up nodes by path. -// This method can also handle lookups for paths which contain chunking information. -func (fs *Decomposedfs) lookupNode(ctx context.Context, path string) (*node.Node, error) { - p := path - isChunked, err := chunking.IsChunked(path) - if err != nil { - return nil, err - } - if isChunked { - chunkInfo, err := chunking.GetChunkBLOBInfo(path) - if err != nil { - return nil, err - } - p = chunkInfo.Path - } - - n, err := fs.lu.NodeFromPath(ctx, p, false) - if err != nil { - return nil, err - } - - if isChunked { - n.Name = filepath.Base(path) - } - return n, nil -} - -type fileUpload struct { - // info stores the current information about the upload - info tusd.FileInfo - // infoPath is the path to the .info file - infoPath string - // binPath is the path to the binary file (which has no extension) - binPath string - // only fs knows how to handle metadata and versions - fs *Decomposedfs - // a context with a user - // TODO add logger as well? - ctx context.Context -} - -// GetInfo returns the FileInfo. -func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { - return upload.info, nil -} - -// WriteChunk writes the stream from the reader to the given offset of the upload. -func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return 0, err - } - defer file.Close() - - // calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum - // TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ... - // It is sent with the PATCH request, well or in the POST when the creation-with-upload extension is used - // but the tus handler uses a context.Background() so we cannot really check the header and put it in the context ... - n, err := io.Copy(file, src) - - // If the HTTP PATCH request gets interrupted in the middle (e.g. because - // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for the ocis driver it's not important whether the stream has ended - // on purpose or accidentally. - if err != nil { - if err != io.ErrUnexpectedEOF { - return n, err - } - } - - upload.info.Offset += n - err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk - - return n, err -} - -// GetReader returns an io.Reader for the upload. -func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { - return os.Open(upload.binPath) -} - -// writeInfo updates the entire information. Everything will be overwritten. -func (upload *fileUpload) writeInfo() error { - data, err := json.Marshal(upload.info) - if err != nil { - return err - } - return os.WriteFile(upload.infoPath, data, defaultFilePerm) -} - -// FinishUpload finishes an upload and moves the file to the internal destination. -func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { - // ensure cleanup - defer upload.discardChunk() - - fi, err := os.Stat(upload.binPath) - if err != nil { - appctx.GetLogger(upload.ctx).Err(err).Msg("Decomposedfs: could not stat uploaded file") - return - } - - n := node.New( - upload.info.Storage["NodeId"], - upload.info.Storage["NodeParentId"], - upload.info.Storage["NodeName"], - fi.Size(), - "", - nil, - upload.fs.lu, - ) - n.SpaceRoot = node.New(upload.info.Storage["SpaceRoot"], "", "", 0, "", nil, upload.fs.lu) - - _, err = node.CheckQuota(n.SpaceRoot, uint64(fi.Size())) - if err != nil { - return err - } - - if n.ID == "" { - n.ID = uuid.New().String() - } - targetPath := n.InternalPath() - sublog := appctx.GetLogger(upload.ctx). - With(). - Interface("info", upload.info). - Str("binPath", upload.binPath). - Str("targetPath", targetPath). - Logger() - - // calculate the checksum of the written bytes - // they will all be written to the metadata later, so we cannot omit any of them - // TODO only calculate the checksum in sync that was requested to match, the rest could be async ... but the tests currently expect all to be present - // TODO the hashes all implement BinaryMarshaler so we could try to persist the state for resumable upload. we would neet do keep track of the copied bytes ... - sha1h := sha1.New() - md5h := md5.New() - adler32h := adler32.New() - { - f, err := os.Open(upload.binPath) - if err != nil { - sublog.Err(err).Msg("Decomposedfs: could not open file for checksumming") - // we can continue if no oc checksum header is set - } - defer f.Close() - - r1 := io.TeeReader(f, sha1h) - r2 := io.TeeReader(r1, md5h) - - if _, err := io.Copy(adler32h, r2); err != nil { - sublog.Err(err).Msg("Decomposedfs: could not copy bytes for checksumming") - } - } - // compare if they match the sent checksum - // TODO the tus checksum extension would do this on every chunk, but I currently don't see an easy way to pass in the requested checksum. for now we do it in FinishUpload which is also called for chunked uploads - if upload.info.MetaData["checksum"] != "" { - parts := strings.SplitN(upload.info.MetaData["checksum"], " ", 2) - if len(parts) != 2 { - return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - switch parts[0] { - case "sha1": - err = upload.checkHash(parts[1], sha1h) - case "md5": - err = upload.checkHash(parts[1], md5h) - case "adler32": - err = upload.checkHash(parts[1], adler32h) - default: - err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - if err != nil { - return err - } - } - n.BlobID = upload.info.ID // This can be changed to a content hash in the future when reference counting for the blobs was added - - // defer writing the checksums until the node is in place - - // if target exists create new version - if fi, err = os.Stat(targetPath); err == nil { - // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - versionsPath := upload.fs.lu.InternalPath(n.ID + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) - - if err = os.Rename(targetPath, versionsPath); err != nil { - sublog.Err(err). - Str("binPath", upload.binPath). - Str("versionsPath", versionsPath). - Msg("Decomposedfs: could not create version") - return - } - } - - // upload the data to the blobstore - file, err := os.Open(upload.binPath) - if err != nil { - return err - } - defer file.Close() - err = upload.fs.tp.WriteBlob(n.BlobID, file) - if err != nil { - return errors.Wrap(err, "failed to upload file to blostore") - } - - // now truncate the upload (the payload stays in the blobstore) and move it to the target path - // TODO put uploads on the same underlying storage as the destination dir? - // TODO trigger a workflow as the final rename might eg involve antivirus scanning - if err = os.Truncate(upload.binPath, 0); err != nil { - sublog.Err(err). - Msg("Decomposedfs: could not truncate") - return - } - if err = os.Rename(upload.binPath, targetPath); err != nil { - sublog.Err(err). - Msg("Decomposedfs: could not rename") - return - } - - // now try write all checksums - tryWritingChecksum(&sublog, n, "sha1", sha1h) - tryWritingChecksum(&sublog, n, "md5", md5h) - tryWritingChecksum(&sublog, n, "adler32", adler32h) - - // who will become the owner? the owner of the parent actually ... not the currently logged in user - err = n.WriteMetadata(&userpb.UserId{ - Idp: upload.info.Storage["OwnerIdp"], - OpaqueId: upload.info.Storage["OwnerId"], - }) - if err != nil { - return errors.Wrap(err, "decomposedfs: could not write metadata") - } - - // link child name to parent if it is new - childNameLink := filepath.Join(upload.fs.lu.InternalPath(n.ParentID), n.Name) - var link string - link, err = os.Readlink(childNameLink) - if err == nil && link != "../"+n.ID { - sublog.Err(err). - Interface("node", n). - Str("childNameLink", childNameLink). - Str("link", link). - Msg("Decomposedfs: child name link has wrong target id, repairing") - - if err = os.Remove(childNameLink); err != nil { - return errors.Wrap(err, "decomposedfs: could not remove symlink child entry") - } - } - if os.IsNotExist(err) || link != "../"+n.ID { - if err = os.Symlink("../"+n.ID, childNameLink); err != nil { - return errors.Wrap(err, "decomposedfs: could not symlink child entry") - } - } - - // only delete the upload if it was successfully written to the storage - if err = os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - sublog.Err(err).Msg("Decomposedfs: could not delete upload info") - return - } - } - // use set arbitrary metadata? - if upload.info.MetaData["mtime"] != "" { - err := n.SetMtime(ctx, upload.info.MetaData["mtime"]) - if err != nil { - sublog.Err(err).Interface("info", upload.info).Msg("Decomposedfs: could not set mtime metadata") - return err - } - } - - n.Exists = true - - return upload.fs.tp.Propagate(upload.ctx, n) -} - -func (upload *fileUpload) checkHash(expected string, h hash.Hash) error { - if expected != hex.EncodeToString(h.Sum(nil)) { - upload.discardChunk() - return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", upload.info.MetaData["checksum"], h.Sum(nil))) - } - return nil -} -func tryWritingChecksum(log *zerolog.Logger, n *node.Node, algo string, h hash.Hash) { - if err := n.SetChecksum(algo, h); err != nil { - log.Err(err). - Str("csType", algo). - Bytes("hash", h.Sum(nil)). - Msg("Decomposedfs: could not write checksum") - // this is not critical, the bytes are there so we will continue - } -} - -func (upload *fileUpload) discardChunk() { - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") - return - } - } - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("infoPath", upload.infoPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk info") - return - } - } -} - -// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination -// - the storage needs to implement AsTerminatableUpload -// - the upload needs to implement Terminate - -// AsTerminatableUpload returns a TerminatableUpload. -func (fs *Decomposedfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { - return upload.(*fileUpload) -} - -// Terminate terminates the upload. -func (upload *fileUpload) Terminate(ctx context.Context) error { - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return nil -} - -// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation -// - the storage needs to implement AsLengthDeclarableUpload -// - the upload needs to implement DeclareLength - -// AsLengthDeclarableUpload returns a LengthDeclarableUpload. -func (fs *Decomposedfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { - return upload.(*fileUpload) -} - -// DeclareLength updates the upload length information. -func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { - upload.info.Size = length - upload.info.SizeIsDeferred = false - return upload.writeInfo() -} - -// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation -// - the storage needs to implement AsConcatableUpload -// - the upload needs to implement ConcatUploads - -// AsConcatableUpload returns a ConcatableUpload. -func (fs *Decomposedfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { - return upload.(*fileUpload) -} - -// ConcatUploads concatenates multiple uploads. -func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return err - } - defer file.Close() - - for _, partialUpload := range uploads { - fileUpload := partialUpload.(*fileUpload) - - src, err := os.Open(fileUpload.binPath) - if err != nil { - return err - } - defer src.Close() - - if _, err := io.Copy(file, src); err != nil { - return err - } - } - - return -} diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go deleted file mode 100644 index ae155c47ca..0000000000 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package decomposedfs_test - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - ruser "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" - treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/cs3org/reva/tests/helpers" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" -) - -var _ = Describe("File uploads", func() { - var ( - ref *provider.Reference - fs storage.FS - user *userpb.User - ctx context.Context - - o *options.Options - lookup *decomposedfs.Lookup - permissions *mocks.PermissionsChecker - bs *treemocks.Blobstore - ) - - BeforeEach(func() { - ref = &provider.Reference{Path: "/foo"} - user = &userpb.User{ - Id: &userpb.UserId{ - Idp: "idp", - OpaqueId: "userid", - Type: userpb.UserType_USER_TYPE_PRIMARY, - }, - Username: "username", - } - ctx = ruser.ContextSetUser(context.Background(), user) - - tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") - Expect(err).ToNot(HaveOccurred()) - - o, err = options.New(map[string]interface{}{ - "root": tmpRoot, - }) - Expect(err).ToNot(HaveOccurred()) - lookup = &decomposedfs.Lookup{Options: o} - permissions = &mocks.PermissionsChecker{} - bs = &treemocks.Blobstore{} - }) - - AfterEach(func() { - root := o.Root - if root != "" { - os.RemoveAll(root) - } - }) - - JustBeforeEach(func() { - var err error - tree := tree.New(o.Root, true, true, lookup, bs) - fs, err = decomposedfs.New(o, lookup, permissions, tree) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("the user's quota is exceeded", func() { - When("the user wants to initiate a file upload", func() { - It("fails", func() { - var originalFunc = node.CheckQuota - node.CheckQuota = func(spaceRoot *node.Node, fileSize uint64) (quotaSufficient bool, err error) { - return false, errtypes.InsufficientStorage("quota exceeded") - } - _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) - Expect(err).To(MatchError(errtypes.InsufficientStorage("quota exceeded"))) - node.CheckQuota = originalFunc - }) - }) - }) - - Context("the user has insufficient permissions", func() { - BeforeEach(func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - }) - - When("the user wants to initiate a file upload", func() { - It("fails", func() { - _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) - Expect(err).To(MatchError("error: permission denied: root/foo")) - }) - }) - }) - - Context("with insufficient permissions, home node", func() { - BeforeEach(func() { - var err error - // recreate the fs with home enabled - o.EnableHome = true - tree := tree.New(o.Root, true, true, lookup, bs) - fs, err = decomposedfs.New(o, lookup, permissions, tree) - Expect(err).ToNot(HaveOccurred()) - err = fs.CreateHome(ctx) - Expect(err).ToNot(HaveOccurred()) - // the space name attribute is the stop condition in the lookup - h, err := lookup.HomeNode(ctx) - Expect(err).ToNot(HaveOccurred()) - err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")) - Expect(err).ToNot(HaveOccurred()) - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) - }) - - When("the user wants to initiate a file upload", func() { - It("fails", func() { - h, err := lookup.HomeNode(ctx) - Expect(err).ToNot(HaveOccurred()) - msg := fmt.Sprintf("error: permission denied: %s/foo", h.ID) - _, err = fs.InitiateUpload(ctx, ref, 10, map[string]string{}) - Expect(err).To(MatchError(msg)) - }) - }) - }) - - Context("with sufficient permissions", func() { - BeforeEach(func() { - permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - permissions.On("AssemblePermissions", mock.Anything, mock.Anything). - Return(provider.ResourcePermissions{ - ListContainer: true, - }, nil) - }) - - When("the user initiates a non zero byte file upload", func() { - It("succeeds", func() { - uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(uploadIds)).To(Equal(2)) - Expect(uploadIds["simple"]).ToNot(BeEmpty()) - Expect(uploadIds["tus"]).ToNot(BeEmpty()) - - rootRef := &provider.Reference{Path: "/"} - resources, err := fs.ListFolder(ctx, rootRef, []string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resources)).To(Equal(0)) - }) - }) - - When("the user initiates a zero byte file upload", func() { - It("succeeds", func() { - uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(uploadIds)).To(Equal(2)) - Expect(uploadIds["simple"]).ToNot(BeEmpty()) - Expect(uploadIds["tus"]).ToNot(BeEmpty()) - - rootRef := &provider.Reference{Path: "/"} - resources, err := fs.ListFolder(ctx, rootRef, []string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resources)).To(Equal(0)) - }) - }) - - When("the user uploads a non zero byte file", func() { - It("succeeds", func() { - var ( - fileContent = []byte("0123456789") - ) - - uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(uploadIds)).To(Equal(2)) - Expect(uploadIds["simple"]).ToNot(BeEmpty()) - Expect(uploadIds["tus"]).ToNot(BeEmpty()) - - uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} - - bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). - Return(nil). - Run(func(args mock.Arguments) { - reader := args.Get(1).(io.Reader) - data, err := io.ReadAll(reader) - - Expect(err).ToNot(HaveOccurred()) - Expect(data).To(Equal([]byte("0123456789"))) - }) - - err = fs.Upload(ctx, uploadRef, io.NopCloser(bytes.NewReader(fileContent))) - - Expect(err).ToNot(HaveOccurred()) - bs.AssertCalled(GinkgoT(), "Upload", mock.Anything, mock.Anything) - - rootRef := &provider.Reference{Path: "/"} - resources, err := fs.ListFolder(ctx, rootRef, []string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resources)).To(Equal(1)) - Expect(resources[0].Path).To(Equal(ref.Path)) - }) - }) - - When("the user uploads a zero byte file", func() { - It("succeeds", func() { - var ( - fileContent = []byte("") - ) - - uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(uploadIds)).To(Equal(2)) - Expect(uploadIds["simple"]).ToNot(BeEmpty()) - Expect(uploadIds["tus"]).ToNot(BeEmpty()) - - uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} - - bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). - Return(nil). - Run(func(args mock.Arguments) { - reader := args.Get(1).(io.Reader) - data, err := io.ReadAll(reader) - - Expect(err).ToNot(HaveOccurred()) - Expect(data).To(Equal([]byte(""))) - }) - - err = fs.Upload(ctx, uploadRef, io.NopCloser(bytes.NewReader(fileContent))) - - Expect(err).ToNot(HaveOccurred()) - bs.AssertCalled(GinkgoT(), "Upload", mock.Anything, mock.Anything) - - rootRef := &provider.Reference{Path: "/"} - resources, err := fs.ListFolder(ctx, rootRef, []string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resources)).To(Equal(1)) - Expect(resources[0].Path).To(Equal(ref.Path)) - }) - }) - - When("the user tries to upload a file without intialising the upload", func() { - It("fails", func() { - var ( - fileContent = []byte("0123456789") - ) - - uploadRef := &provider.Reference{Path: "/some-non-existent-upload-reference"} - err := fs.Upload(ctx, uploadRef, io.NopCloser(bytes.NewReader(fileContent))) - - Expect(err).To(HaveOccurred()) - - rootRef := &provider.Reference{Path: "/"} - resources, err := fs.ListFolder(ctx, rootRef, []string{}) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resources)).To(Equal(0)) - }) - }) - - }) -}) diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go deleted file mode 100644 index 388c2c7371..0000000000 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package xattrs - -import ( - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" -) - -// Declare a list of xattr keys -// TODO the below comment is currently copied from the owncloud driver, revisit -// Currently,extended file attributes have four separated -// namespaces (user, trusted, security and system) followed by a dot. -// A non root user can only manipulate the user. namespace, which is what -// we will use to store ownCloud specific metadata. To prevent name -// collisions with other apps We are going to introduce a sub namespace -// "user.ocis.". -const ( - OcisPrefix string = "user.ocis." - ParentidAttr string = OcisPrefix + "parentid" - OwnerIDAttr string = OcisPrefix + "owner.id" - OwnerIDPAttr string = OcisPrefix + "owner.idp" - OwnerTypeAttr string = OcisPrefix + "owner.type" - // the base name of the node - // updated when the file is renamed or moved. - NameAttr string = OcisPrefix + "name" - - BlobIDAttr string = OcisPrefix + "blobid" - BlobsizeAttr string = OcisPrefix + "blobsize" - - // grantPrefix is the prefix for sharing related extended attributes. - GrantPrefix string = OcisPrefix + "grant." - MetadataPrefix string = OcisPrefix + "md." - - // favorite flag, per user. - FavPrefix string = OcisPrefix + "fav." - - // a temporary etag for a folder that is removed when the mtime propagation happens. - TmpEtagAttr string = OcisPrefix + "tmp.etag" - ReferenceAttr string = OcisPrefix + "cs3.ref" // arbitrary metadata - ChecksumPrefix string = OcisPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 - TrashOriginAttr string = OcisPrefix + "trash.origin" // trash origin - - // we use a single attribute to enable or disable propagation of both: synctime and treesize - // The propagation attribute is set to '1' at the top of the (sub)tree. Propagation will stop at - // that node. - PropagationAttr string = OcisPrefix + "propagation" - - // the tree modification time of the tree below this node, - // propagated when synctime_accounting is true and - // user.ocis.propagation=1 is set - // stored as a readable time.RFC3339Nano. - TreeMTimeAttr string = OcisPrefix + "tmtime" - - // the size of the tree below this node, - // propagated when treesize_accounting is true and - // user.ocis.propagation=1 is set - // stored as uint64, little endian. - TreesizeAttr string = OcisPrefix + "treesize" - - // the quota for the storage space / tree, regardless who accesses it. - QuotaAttr string = OcisPrefix + "quota" - - // the name given to a storage space. It should not contain any semantics as its only purpose is to be read. - SpaceNameAttr string = OcisPrefix + "space.name" - - UserAcePrefix string = "u:" - GroupAcePrefix string = "g:" -) - -// ReferenceFromAttr returns a CS3 reference from xattr of a node. -// Supported formats are: "cs3:storageid/nodeid". -func ReferenceFromAttr(b []byte) (*provider.Reference, error) { - return refFromCS3(b) -} - -// refFromCS3 creates a CS3 reference from a set of bytes. This method should remain private -// and only be called after validation because it can potentially panic. -func refFromCS3(b []byte) (*provider.Reference, error) { - parts := string(b[4:]) - return &provider.Reference{ - ResourceId: &provider.ResourceId{ - StorageId: strings.Split(parts, "/")[0], - OpaqueId: strings.Split(parts, "/")[1], - }, - }, nil -} diff --git a/pkg/storage/utils/eosfs/eosfs.go b/pkg/storage/utils/eosfs/eosfs.go index 7e9201e655..bfe39c4807 100644 --- a/pkg/storage/utils/eosfs/eosfs.go +++ b/pkg/storage/utils/eosfs/eosfs.go @@ -568,6 +568,9 @@ func (fs *eosfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Refer err := fs.c.UnsetAttr(ctx, auth, attr, false, fn) if err != nil { + if errors.Is(err, eosclient.AttrNotExistsError) { + continue + } return errors.Wrap(err, "eosfs: error unsetting xattr in eos driver") } } @@ -1112,7 +1115,10 @@ func (fs *eosfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *pr } if eosACL.Type == acl.TypeLightweight { - attr := &eosclient.Attribute{} + attr := &eosclient.Attribute{ + Type: SystemAttr, + Key: fmt.Sprintf("%s.%s", lwShareAttrKey, eosACL.Qualifier), + } if err := fs.c.UnsetAttr(ctx, rootAuth, attr, true, fn); err != nil { return errors.Wrap(err, "eosfs: error removing acl for lightweight account") } @@ -1253,6 +1259,13 @@ func (fs *eosfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []st if err != nil { return nil, err } + + if ref.Path != "" { + eosFileInfo, err = fs.c.GetFileInfoByPath(ctx, auth, filepath.Join(eosFileInfo.File, ref.Path)) + if err != nil { + return nil, err + } + } return fs.convertToResourceInfo(ctx, eosFileInfo) } diff --git a/pkg/test/vars.go b/pkg/test/vars.go index cc8f99c097..26979dceac 100644 --- a/pkg/test/vars.go +++ b/pkg/test/vars.go @@ -74,7 +74,7 @@ func NewFile(path, content string) error { if err != nil { return err } - _, err = file.Write([]byte(content)) + _, err = file.WriteString(content) if err != nil { return err } diff --git a/pkg/user/manager/loader/loader.go b/pkg/user/manager/loader/loader.go index 9be6711762..bd4fdbbb13 100644 --- a/pkg/user/manager/loader/loader.go +++ b/pkg/user/manager/loader/loader.go @@ -24,6 +24,5 @@ import ( _ "github.com/cs3org/reva/pkg/user/manager/json" _ "github.com/cs3org/reva/pkg/user/manager/ldap" _ "github.com/cs3org/reva/pkg/user/manager/nextcloud" - _ "github.com/cs3org/reva/pkg/user/manager/owncloudsql" // Add your own here. ) diff --git a/pkg/user/manager/nextcloud/nextcloud.go b/pkg/user/manager/nextcloud/nextcloud.go index cd0422b22f..acf385d9ff 100644 --- a/pkg/user/manager/nextcloud/nextcloud.go +++ b/pkg/user/manager/nextcloud/nextcloud.go @@ -51,7 +51,7 @@ type Manager struct { // UserManagerConfig contains config for a Nextcloud-based UserManager. type UserManagerConfig struct { - EndPoint string `mapstructure:"endpoint" docs:";The Nextcloud backend endpoint for user management"` + EndPoint string `docs:";The Nextcloud backend endpoint for user management" mapstructure:"endpoint"` SharedSecret string `mapstructure:"shared_secret"` MockHTTP bool `mapstructure:"mock_http"` } diff --git a/pkg/user/manager/owncloudsql/accounts/accounts.go b/pkg/user/manager/owncloudsql/accounts/accounts.go deleted file mode 100644 index aac42eab41..0000000000 --- a/pkg/user/manager/owncloudsql/accounts/accounts.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts - -import ( - "context" - "database/sql" - "strings" - "time" - - "github.com/cs3org/reva/pkg/appctx" - "github.com/pkg/errors" -) - -// Accounts represents oc10-style Accounts. -type Accounts struct { - driver string - db *sql.DB - joinUsername, joinUUID, enableMedialSearch bool - selectSQL string -} - -// NewMysql returns a new Cache instance connecting to a MySQL database. -func NewMysql(dsn string, joinUsername, joinUUID, enableMedialSearch bool) (*Accounts, error) { - sqldb, err := sql.Open("mysql", dsn) - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - sqldb.SetConnMaxLifetime(time.Minute * 3) - sqldb.SetMaxOpenConns(10) - sqldb.SetMaxIdleConns(10) - - err = sqldb.Ping() - if err != nil { - return nil, errors.Wrap(err, "error connecting to the database") - } - - return New("mysql", sqldb, joinUsername, joinUUID, enableMedialSearch) -} - -// New returns a new Cache instance connecting to the given sql.DB. -func New(driver string, sqldb *sql.DB, joinUsername, joinUUID, enableMedialSearch bool) (*Accounts, error) { - sel := "SELECT id, email, user_id, display_name, quota, last_login, backend, home, state" - from := ` - FROM oc_accounts a - ` - if joinUsername { - sel += ", p.configvalue AS username" - from += `LEFT JOIN oc_preferences p - ON a.user_id=p.userid - AND p.appid='core' - AND p.configkey='username'` - } else { - // fallback to user_id as username - sel += ", user_id AS username" - } - if joinUUID { - sel += ", p2.configvalue AS ownclouduuid" - from += `LEFT JOIN oc_preferences p2 - ON a.user_id=p2.userid - AND p2.appid='core' - AND p2.configkey='ownclouduuid'` - } else { - // fallback to user_id as ownclouduuid - sel += ", user_id AS ownclouduuid" - } - - return &Accounts{ - driver: driver, - db: sqldb, - joinUsername: joinUsername, - joinUUID: joinUUID, - enableMedialSearch: enableMedialSearch, - selectSQL: sel + from, - }, nil -} - -// Account stores information about accounts. -type Account struct { - ID uint64 - Email sql.NullString - UserID string - DisplayName sql.NullString - Quota sql.NullString - LastLogin int - Backend string - Home string - State int8 - Username sql.NullString // optional comes from the oc_preferences - OwnCloudUUID sql.NullString // optional comes from the oc_preferences -} - -func (as *Accounts) rowToAccount(ctx context.Context, row Scannable) (*Account, error) { - a := Account{} - if err := row.Scan(&a.ID, &a.Email, &a.UserID, &a.DisplayName, &a.Quota, &a.LastLogin, &a.Backend, &a.Home, &a.State, &a.Username, &a.OwnCloudUUID); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("could not scan row, skipping") - return nil, err - } - - return &a, nil -} - -// Scannable describes the interface providing a Scan method. -type Scannable interface { - Scan(...interface{}) error -} - -// GetAccountByClaim fetches an account by mail, username or userid. -func (as *Accounts) GetAccountByClaim(ctx context.Context, claim, value string) (*Account, error) { - // TODO align supported claims with rest driver and the others, maybe refactor into common mapping - var row *sql.Row - var where string - switch claim { - case "mail": - where = "WHERE a.email=?" - // case "uid": - // claim = m.c.Schema.UIDNumber - // case "gid": - // claim = m.c.Schema.GIDNumber - case "username": - if as.joinUsername { - where = "WHERE p.configvalue=?" - } else { - // use user_id as username - where = "WHERE a.user_id=?" - } - case "userid": - if as.joinUUID { - where = "WHERE p2.configvalue=?" - } else { - // use user_id as uuid - where = "WHERE a.user_id=?" - } - default: - return nil, errors.New("owncloudsql: invalid field " + claim) - } - - row = as.db.QueryRowContext(ctx, as.selectSQL+where, value) - - return as.rowToAccount(ctx, row) -} - -func sanitizeWildcards(q string) string { - return strings.ReplaceAll(strings.ReplaceAll(q, "%", `\%`), "_", `\_`) -} - -// FindAccounts searches userid, displayname and email using the given query. The Wildcard caracters % and _ are escaped. -func (as *Accounts) FindAccounts(ctx context.Context, query string) ([]Account, error) { - if as.enableMedialSearch { - query = "%" + sanitizeWildcards(query) + "%" - } - // TODO join oc_account_terms - where := "WHERE a.user_id LIKE ? OR a.display_name LIKE ? OR a.email LIKE ?" - args := []interface{}{query, query, query} - - if as.joinUsername { - where += " OR p.configvalue LIKE ?" - args = append(args, query) - } - if as.joinUUID { - where += " OR p2.configvalue LIKE ?" - args = append(args, query) - } - - rows, err := as.db.QueryContext(ctx, as.selectSQL+where, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - accounts := []Account{} - for rows.Next() { - a := Account{} - if err := rows.Scan(&a.ID, &a.Email, &a.UserID, &a.DisplayName, &a.Quota, &a.LastLogin, &a.Backend, &a.Home, &a.State, &a.Username, &a.OwnCloudUUID); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("could not scan row, skipping") - continue - } - accounts = append(accounts, a) - } - if err = rows.Err(); err != nil { - return nil, err - } - - return accounts, nil -} - -// GetAccountGroups lasts the groups for an account. -func (as *Accounts) GetAccountGroups(ctx context.Context, uid string) ([]string, error) { - rows, err := as.db.QueryContext(ctx, "SELECT gid FROM oc_group_user WHERE uid=?", uid) - if err != nil { - return nil, err - } - defer rows.Close() - - groups := []string{} - for rows.Next() { - var group string - if err := rows.Scan(&group); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("could not scan row, skipping") - continue - } - groups = append(groups, group) - } - if err = rows.Err(); err != nil { - return nil, err - } - return groups, nil -} diff --git a/pkg/user/manager/owncloudsql/accounts/accounts_suite_test.go b/pkg/user/manager/owncloudsql/accounts/accounts_suite_test.go deleted file mode 100644 index 4f15e148d5..0000000000 --- a/pkg/user/manager/owncloudsql/accounts/accounts_suite_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestAccounts(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Accounts Suite") -} diff --git a/pkg/user/manager/owncloudsql/accounts/accounts_test.go b/pkg/user/manager/owncloudsql/accounts/accounts_test.go deleted file mode 100644 index d4e984e86c..0000000000 --- a/pkg/user/manager/owncloudsql/accounts/accounts_test.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2018-2023 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package accounts_test - -import ( - "context" - "database/sql" - "os" - - "github.com/cs3org/reva/pkg/user/manager/owncloudsql/accounts" - _ "github.com/mattn/go-sqlite3" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Accounts", func() { - var ( - conn *accounts.Accounts - testDBFile *os.File - sqldb *sql.DB - ) - - BeforeEach(func() { - var err error - testDBFile, err = os.CreateTemp("", "example") - Expect(err).ToNot(HaveOccurred()) - - dbData, err := os.ReadFile("test.sqlite") - Expect(err).ToNot(HaveOccurred()) - - _, err = testDBFile.Write(dbData) - Expect(err).ToNot(HaveOccurred()) - err = testDBFile.Close() - Expect(err).ToNot(HaveOccurred()) - - sqldb, err = sql.Open("sqlite3", testDBFile.Name()) - Expect(err).ToNot(HaveOccurred()) - - }) - - AfterEach(func() { - os.Remove(testDBFile.Name()) - }) - - Describe("GetAccountByClaim", func() { - - Context("without any joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, false, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by userid", func() { - userID := "admin" - account, err := conn.GetAccountByClaim(context.Background(), "userid", userID) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByClaim(context.Background(), "mail", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("falls back to user_id colum when getting by username", func() { - value := "admin" - account, err := conn.GetAccountByClaim(context.Background(), "username", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("errors on unsupported claim", func() { - _, err := conn.GetAccountByClaim(context.Background(), "invalid", "invalid") - Expect(err).To(HaveOccurred()) - }) - }) - - Context("with username joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by userid", func() { - userID := "admin" - account, err := conn.GetAccountByClaim(context.Background(), "userid", userID) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByClaim(context.Background(), "mail", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("gets existing account by username", func() { - value := "Administrator" - account, err := conn.GetAccountByClaim(context.Background(), "username", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("admin")) - }) - - It("errors on unsupported claim", func() { - _, err := conn.GetAccountByClaim(context.Background(), "invalid", "invalid") - Expect(err).To(HaveOccurred()) - }) - }) - - Context("with uuid joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, false, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by uuid", func() { - userID := "7015b5ec-7723-4560-bb96-85e18a947314" - account, err := conn.GetAccountByClaim(context.Background(), "userid", userID) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByClaim(context.Background(), "mail", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by username", func() { - value := "admin" - account, err := conn.GetAccountByClaim(context.Background(), "username", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("admin")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("errors on unsupported claim", func() { - _, err := conn.GetAccountByClaim(context.Background(), "invalid", "invalid") - Expect(err).To(HaveOccurred()) - }) - }) - - Context("with username and uuid joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("gets existing account by uuid", func() { - userID := "7015b5ec-7723-4560-bb96-85e18a947314" - account, err := conn.GetAccountByClaim(context.Background(), "userid", userID) - Expect(err).ToNot(HaveOccurred()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by mail", func() { - value := "admin@example.org" - account, err := conn.GetAccountByClaim(context.Background(), "mail", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("gets existing account by username", func() { - value := "Administrator" - account, err := conn.GetAccountByClaim(context.Background(), "username", value) - Expect(err).ToNot(HaveOccurred()) - Expect(account).ToNot(BeNil()) - Expect(account.ID).To(Equal(uint64(1))) - Expect(account.Email.String).To(Equal("admin@example.org")) - Expect(account.UserID).To(Equal("admin")) - Expect(account.DisplayName.String).To(Equal("admin")) - Expect(account.Quota.String).To(Equal("100 GB")) - Expect(account.LastLogin).To(Equal(1619082575)) - Expect(account.Backend).To(Equal(`OC\User\Database`)) - Expect(account.Home).To(Equal("/mnt/data/files/admin")) - Expect(account.State).To(Equal(int8(1))) - Expect(account.Username.String).To(Equal("Administrator")) - Expect(account.OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("errors on unsupported claim", func() { - _, err := conn.GetAccountByClaim(context.Background(), "invalid", "invalid") - Expect(err).To(HaveOccurred()) - }) - }) - - }) - - Describe("FindAccounts", func() { - - Context("with username and uuid joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("finds the existing admin account", func() { - accounts, err := conn.FindAccounts(context.Background(), "admin") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(1)) - Expect(accounts[0]).ToNot(BeNil()) - Expect(accounts[0].ID).To(Equal(uint64(1))) - Expect(accounts[0].Email.String).To(Equal("admin@example.org")) - Expect(accounts[0].UserID).To(Equal("admin")) - Expect(accounts[0].DisplayName.String).To(Equal("admin")) - Expect(accounts[0].Quota.String).To(Equal("100 GB")) - Expect(accounts[0].LastLogin).To(Equal(1619082575)) - Expect(accounts[0].Backend).To(Equal(`OC\User\Database`)) - Expect(accounts[0].Home).To(Equal("/mnt/data/files/admin")) - Expect(accounts[0].State).To(Equal(int8(1))) - Expect(accounts[0].Username.String).To(Equal("Administrator")) - Expect(accounts[0].OwnCloudUUID.String).To(Equal("7015b5ec-7723-4560-bb96-85e18a947314")) - }) - - It("handles query without results", func() { - accounts, err := conn.FindAccounts(context.Background(), "__notexisting__") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(0)) - }) - }) - - Context("with username joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("finds the existing admin account", func() { - accounts, err := conn.FindAccounts(context.Background(), "admin") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(1)) - Expect(accounts[0]).ToNot(BeNil()) - Expect(accounts[0].ID).To(Equal(uint64(1))) - Expect(accounts[0].Email.String).To(Equal("admin@example.org")) - Expect(accounts[0].UserID).To(Equal("admin")) - Expect(accounts[0].DisplayName.String).To(Equal("admin")) - Expect(accounts[0].Quota.String).To(Equal("100 GB")) - Expect(accounts[0].LastLogin).To(Equal(1619082575)) - Expect(accounts[0].Backend).To(Equal(`OC\User\Database`)) - Expect(accounts[0].Home).To(Equal("/mnt/data/files/admin")) - Expect(accounts[0].State).To(Equal(int8(1))) - Expect(accounts[0].Username.String).To(Equal("Administrator")) - Expect(accounts[0].OwnCloudUUID.String).To(Equal("admin")) - }) - - It("handles query without results", func() { - accounts, err := conn.FindAccounts(context.Background(), "__notexisting__") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(0)) - }) - }) - - Context("without any joins", func() { - - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, false, false, false) - Expect(err).ToNot(HaveOccurred()) - }) - - It("finds the existing admin account", func() { - accounts, err := conn.FindAccounts(context.Background(), "admin") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(1)) - Expect(accounts[0]).ToNot(BeNil()) - Expect(accounts[0].ID).To(Equal(uint64(1))) - Expect(accounts[0].Email.String).To(Equal("admin@example.org")) - Expect(accounts[0].UserID).To(Equal("admin")) - Expect(accounts[0].DisplayName.String).To(Equal("admin")) - Expect(accounts[0].Quota.String).To(Equal("100 GB")) - Expect(accounts[0].LastLogin).To(Equal(1619082575)) - Expect(accounts[0].Backend).To(Equal(`OC\User\Database`)) - Expect(accounts[0].Home).To(Equal("/mnt/data/files/admin")) - Expect(accounts[0].State).To(Equal(int8(1))) - Expect(accounts[0].Username.String).To(Equal("admin")) - Expect(accounts[0].OwnCloudUUID.String).To(Equal("admin")) - }) - - It("handles query without results", func() { - accounts, err := conn.FindAccounts(context.Background(), "__notexisting__") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(0)) - }) - }) - }) - - Describe("GetAccountGroups", func() { - BeforeEach(func() { - var err error - conn, err = accounts.New("sqlite3", sqldb, true, true, false) - Expect(err).ToNot(HaveOccurred()) - }) - It("get admin group for admin account", func() { - accounts, err := conn.GetAccountGroups(context.Background(), "admin") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(1)) - Expect(accounts[0]).To(Equal("admin")) - }) - It("handles not existing account", func() { - accounts, err := conn.GetAccountGroups(context.Background(), "__notexisting__") - Expect(err).ToNot(HaveOccurred()) - Expect(len(accounts)).To(Equal(0)) - }) - }) -}) diff --git a/pkg/user/manager/owncloudsql/accounts/test.sqlite b/pkg/user/manager/owncloudsql/accounts/test.sqlite deleted file mode 100644 index c68bb753774fc28eda95f71768f5d41d29830363..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 90112 zcmeI*T~8a?8Nl%wFc_S`u)86nYK1ZisfZN_HUtP+Z8t!i4O+rW78;0FGJ`!hPHc}e zV=pi54QZwQ2z$|s_F~n3fJ&8C>X&G*Dpl%brD|2FdzIUsIm6gvjDc!ZwOIcVG4pcH z%yWKo=FFTiGx>aNDPOUSUE3*|6=PBvP*hcUXc&s3OpAZl#lK+qjTngp6XLfT8gDf^ zt=vsK=-jK5vNqxD8 zBGxQIh`ldJ;_A^za|mX(m@lRMO#ZmYB4k5zJ~$(eQ?s9lMW(3-)B9dj|7`3;FvnFR zU9p^^D?e)>4U67CHI{}Xq1?4V8WMP+c-s*$9}YzI=OUI*!Y~bqgG!qY?V^SR?_D6G zAVaW@)^XKSis)ARqx#U0`qr(OI|WPZ6tBLx!Efz&!|QYNODSVJn7wV>jv0pB@y3^? zlX+=6x2NyLjpdbfV|im~$#|4{Jh!p5Zj6k?Mn%Ho+Ugw-+YK3lbY3QzvTVBUp6z7o z$tI?zTCRBY+2Yf=XIsW+sjZ+M#^U2zx4tl>Dw5lMQxMk^X|r0f<+RsC$P%WL!Pvmt zTUVocJg)w-GcYJS)~@APrHtiCgr@qMJVFZt6#*!Yh=y{uf-%eGaMH<%mPOsp*rnZk z?zMGru{Hb2zcdR~L0+&+xr()4sqL4*`vjmL`RKME)@W#(@mTz$zHWUbeuis^Lh6R5 z`wwGL{l*RTXdxh$$G)fQk6dYj_2(a(=E6#6)U?L(c)3L2$t&G@>c$y?`>Izh~GqWAQ;<+A@U_6r|Q&4nEG+u&Gywzpe_yuH&{WZAOT$}Fd%Eeb6c{trt1QS_XeDfenxh8HTfobLI>&pM;}@UZ&T$3eM| z+efc*2BVRt5(i6;i|f_Te5qpPEXSxfgW)|jx3RvmxGdK5G_|}g+dBVZqSj8xhOVhW zl&9}-?Qdug<$4PCo;bx{AYsEr!#Uf1Q?)B*y{Yk9rl!~mrdvrD>|DN7FMLx=myj{; zn3>mBsYRKWB6L2H3B(zyBHFS*`ScBKEHAEYq`Z@XciAJ4e{lk;&26L- zZNpSox4t-h#sL%T=k!FVI&@D(E}G3d%>?V{5*N@t-bM6kN4I|Gj=Cjl-0>;Owcb?a z8OJ|k1(Tf(HSa{)+uj*J zFd%>c0tg_000IagfB*srAb>!d2=uF+!@c!CKXCp2|2K;EO`8;uVjzG30tg_000Iag zfB*srAaF?qUZ_`;ab4Gc6d9Z8l@HN8wD!$nxnSM3ot&Ie|41eh#;5b!M<1`OEPSyc z9?JOQktrT~FkNfBSgMR?#l-k-zF@iIGEYpHsMi(Ss`v2{c|9_kAT#AJN0tg_000IagfB*srAb`NJfdBr#?-lKPNDx2(0R#|0 z009ILKmY**5I~@91p3rYU9Z0`(0~4){(sx-UkZW%0tg_000IagfB*srAb@};&>zu{ zU;QU<0{Gwm^ZtJ-2LcEnfB*srAb#A$@}K)y_w16J+Z-|@J1WPhdR3S_@E|NzMFT%Caa~r z{GsV&-Iw-WVpwequ#zuYhjz(IRUNx*jn7+7Az$*o|Bq;2E85rEKeRt-zi-$869^!H z00IagfB*srAbGt{eM+^C!YUrorWa{Ab Line ${LINE_NUMBER}: Not in the correct format." + log_error " + Actual Line : '${INPUT_LINE}'" + log_error " - Expected Format : '- [suite/scenario.feature:line_number](scenario_line_url)'" + FINAL_EXIT_STATUS=1 + continue + else + # otherwise, ignore the line + continue + fi + # Find the link in round-brackets that should be after the SUITE_SCENARIO_LINE + if [[ "${INPUT_LINE}" =~ \(([a-zA-Z0-9:/.#_-]+)\) ]]; then + ACTUAL_LINK="${BASH_REMATCH[1]}" + else + log_error "Line ${LINE_NUMBER}: ${INPUT_LINE} : Link is empty" + FINAL_EXIT_STATUS=1 + continue + fi + if [[ -n "${scenarioLines[${SUITE_SCENARIO_LINE}]:-}" ]]; + then + log_error "> Line ${LINE_NUMBER}: Scenario line ${SUITE_SCENARIO_LINE} is duplicated" + FINAL_EXIT_STATUS=1 + fi + scenarioLines[${SUITE_SCENARIO_LINE}]="exists" + OLD_IFS=${IFS} + IFS=':' + read -ra FEATURE_PARTS <<< "${SUITE_SCENARIO_LINE}" + IFS=${OLD_IFS} + SUITE_FEATURE="${FEATURE_PARTS[0]}" + FEATURE_LINE="${FEATURE_PARTS[1]}" + EXPECTED_LINK="https://github.com/${FEATURE_FILE_REPO}/blob/master/${FEATURE_FILE_PATH}/${SUITE_FEATURE}#L${FEATURE_LINE}" + if [[ "${ACTUAL_LINK}" != "${EXPECTED_LINK}" ]]; then + log_error "> Line ${LINE_NUMBER}: Link is not correct for ${SUITE_SCENARIO_LINE}" + log_error " + Actual link : ${ACTUAL_LINK}" + log_error " - Expected link : ${EXPECTED_LINK}" + FINAL_EXIT_STATUS=1 + fi + + done < "${EXPECTED_FAILURES_FILE}" +else + log_error "Environment variable EXPECTED_FAILURES_FILE must be defined to be the file to check" + exit 1 +fi + +if [ ${FINAL_EXIT_STATUS} == 1 ] +then + log_error "\nErrors were found in the expected failures file - see the messages above!" +else + log_success "\nNo problems were found in the expected failures file." +fi +exit ${FINAL_EXIT_STATUS} diff --git a/tests/acceptance/run.sh b/tests/acceptance/run.sh new file mode 100755 index 0000000000..3435e89e5e --- /dev/null +++ b/tests/acceptance/run.sh @@ -0,0 +1,712 @@ +#!/bin/bash +[[ "${DEBUG}" == "true" ]] && set -x + +# from http://stackoverflow.com/a/630387 +SCRIPT_PATH="`dirname \"$0\"`" # relative +SCRIPT_PATH="`( cd \"${SCRIPT_PATH}\" && pwd )`" # absolutized and normalized + +echo 'Script path: '${SCRIPT_PATH} + +# Allow optionally passing in the path to the behat program. +# This gives flexibility for callers that have installed their own behat +if [ -z "${BEHAT_BIN}" ] +then + BEHAT=${SCRIPT_PATH}/../../vendor-bin/behat/vendor/bin/behat +else + BEHAT=${BEHAT_BIN} +fi +BEHAT_TAGS_OPTION_FOUND=false + +if [ -n "${STEP_THROUGH}" ] +then + STEP_THROUGH_OPTION="--step-through" +fi + +if [ -n "${STOP_ON_FAILURE}" ] +then + STOP_OPTION="--stop-on-failure" +fi + +if [ -n "${PLAIN_OUTPUT}" ] +then + # explicitly tell Behat to not do colored output + COLORS_OPTION="--no-colors" + # Use the Bash "null" command to do nothing, rather than use tput to set a color + RED_COLOR=":" + GREEN_COLOR=":" + YELLOW_COLOR=":" +else + COLORS_OPTION="--colors" + RED_COLOR="tput setaf 1" + GREEN_COLOR="tput setaf 2" + YELLOW_COLOR="tput setaf 3" +fi + +# The following environment variables can be specified: +# +# ACCEPTANCE_TEST_TYPE - see "--type" description +# BEHAT_FEATURE - see "--feature" description +# BEHAT_FILTER_TAGS - see "--tags" description +# BEHAT_SUITE - see "--suite" description +# BEHAT_YML - see "--config" description +# RUN_PART and DIVIDE_INTO_NUM_PARTS - see "--part" description +# SHOW_OC_LOGS - see "--show-oc-logs" description +# TESTING_REMOTE_SYSTEM - see "--remote" description +# EXPECTED_FAILURES_FILE - a file that contains a list of the scenarios that are expected to fail + +if [ -n "${EXPECTED_FAILURES_FILE}" ] +then + # Check the expected-failures file + ${SCRIPT_PATH}/lint-expected-failures.sh + LINT_STATUS=$? + if [ ${LINT_STATUS} -ne 0 ] + then + echo "Error: expected failures file ${EXPECTED_FAILURES_FILE} is invalid" + exit ${LINT_STATUS} + fi +fi + +# Default to API tests +# Note: if a specific feature or suite is also specified, then the acceptance +# test type is deduced from the suite name, and this environment variable +# ACCEPTANCE_TEST_TYPE is overridden. +if [ -z "${ACCEPTANCE_TEST_TYPE}" ] +then + ACCEPTANCE_TEST_TYPE="api" +fi + +# Look for command line options for: +# -c or --config - specify a behat.yml to use +# --feature - specify a single feature to run +# --suite - specify a single suite to run +# --type - api or core-api - if no individual feature or suite is specified, then +# specify the type of acceptance tests to run. Default api. +# --tags - specify tags for scenarios to run (or not) +# --remote - the server under test is remote, so we cannot locally enable the +# testing app. We have to assume it is already enabled. +# --show-oc-logs - tail the ownCloud log after the test run +# --loop - loop tests for given number of times. Only use it for debugging purposes +# --part - run a subset of scenarios, need two numbers. +# first number: which part to run +# second number: in how many parts to divide the set of scenarios +# --step-through - pause after each test step + +# Command line options processed here will override environment variables that +# might have been set by the caller, or in the code above. +while [[ $# -gt 0 ]] +do + key="$1" + case ${key} in + -c|--config) + BEHAT_YML="$2" + shift + ;; + --feature) + BEHAT_FEATURE="$2" + shift + ;; + --suite) + BEHAT_SUITE="$2" + shift + ;; + --loop) + BEHAT_RERUN_TIMES="$2" + shift + ;; + --type) + # Lowercase the parameter value, so the user can provide "API", "CORE-API", etc + ACCEPTANCE_TEST_TYPE="${2,,}" + shift + ;; + --tags) + BEHAT_FILTER_TAGS="$2" + BEHAT_TAGS_OPTION_FOUND=true + shift + ;; + --part) + RUN_PART="$2" + DIVIDE_INTO_NUM_PARTS="$3" + if [ ${RUN_PART} -gt ${DIVIDE_INTO_NUM_PARTS} ] + then + echo "cannot run part ${RUN_PART} of ${DIVIDE_INTO_NUM_PARTS}" + exit 1 + fi + shift 2 + ;; + --step-through) + STEP_THROUGH_OPTION="--step-through" + ;; + *) + # A "random" parameter is presumed to be a feature file to run. + # Typically that will be specified at the end, or as the only + # parameter. + BEHAT_FEATURE="$1" + ;; + esac + shift +done + +# Set the language to "C" +# We want to have it all in english to be able to parse outputs +export LANG=C + +# Provide a default admin username and password. +# But let the caller pass them if they wish +if [ -z "${ADMIN_USERNAME}" ] +then + ADMIN_USERNAME="admin" +fi + +if [ -z "${ADMIN_PASSWORD}" ] +then + ADMIN_PASSWORD="admin" +fi + +export ADMIN_USERNAME +export ADMIN_PASSWORD + +if [ -z "${BEHAT_RERUN_TIMES}" ] +then + BEHAT_RERUN_TIMES=1 +fi + +# expected variables +# -------------------- +# $SUITE_FEATURE_TEXT - human readable which test to run +# $BEHAT_SUITE_OPTION - suite setting with "--suite" or empty if all suites have to be run +# $BEHAT_FEATURE - feature file, or empty +# $BEHAT_FILTER_TAGS - list of tags +# $BEHAT_TAGS_OPTION_FOUND +# $TEST_LOG_FILE +# $BEHAT - behat executable +# $BEHAT_YML +# +# set arrays +# --------------- +# $UNEXPECTED_FAILED_SCENARIOS array of scenarios that failed unexpectedly +# $UNEXPECTED_PASSED_SCENARIOS array of scenarios that passed unexpectedly (while running with expected-failures.txt) +# $STOP_ON_FAILURE - aborts the test run after the first failure + +declare -a UNEXPECTED_FAILED_SCENARIOS +declare -a UNEXPECTED_PASSED_SCENARIOS +declare -a UNEXPECTED_BEHAT_EXIT_STATUSES + +function run_behat_tests() { + echo "Running ${SUITE_FEATURE_TEXT} tests tagged ${BEHAT_FILTER_TAGS}" | tee ${TEST_LOG_FILE} + + if [ "${REPLACE_USERNAMES}" == "true" ] + then + echo "Usernames and attributes in tests are being replaced:" + cat ${SCRIPT_PATH}/usernames.json + fi + + echo "Using behat config '${BEHAT_YML}'" + ${BEHAT} ${COLORS_OPTION} ${STOP_OPTION} --strict ${STEP_THROUGH_OPTION} -c ${BEHAT_YML} -f pretty ${BEHAT_SUITE_OPTION} --tags ${BEHAT_FILTER_TAGS} ${BEHAT_FEATURE} -v 2>&1 | tee -a ${TEST_LOG_FILE} + + BEHAT_EXIT_STATUS=${PIPESTATUS[0]} + + # remove nullbytes from the test log + TEMP_CONTENT=$(tr < ${TEST_LOG_FILE} -d '\000') + OLD_IFS="${IFS}" + IFS="" + echo ${TEMP_CONTENT} > ${TEST_LOG_FILE} + IFS="${OLD_IFS}" + + # Find the count of scenarios that passed + SCENARIO_RESULTS_COLORED=`grep -Ea '^[0-9]+[[:space:]]scenario(|s)[[:space:]]\(' ${TEST_LOG_FILE}` + SCENARIO_RESULTS=$(echo "${SCENARIO_RESULTS_COLORED}" | sed "s/\x1b[^m]*m//g") + if [ ${BEHAT_EXIT_STATUS} -eq 0 ] + then + # They (SCENARIO_RESULTS) all passed, so just get the first number. + # The text looks like "1 scenario (1 passed)" or "123 scenarios (123 passed)" + [[ ${SCENARIO_RESULTS} =~ ([0-9]+) ]] + SCENARIOS_THAT_PASSED=$((SCENARIOS_THAT_PASSED + BASH_REMATCH[1])) + else + # "Something went wrong" with the Behat run (non-zero exit status). + # If there were "ordinary" test fails, then we process that later. Maybe they are all "expected failures". + # But if there were steps in a feature file that are undefined, we want to fail immediately. + # So exit the tests and do not lint expected failures when undefined steps exist. + if [[ ${SCENARIO_RESULTS} == *"undefined"* ]] + then + ${RED_COLOR}; echo -e "Undefined steps: There were some undefined steps found." + exit 1 + fi + # If there were no scenarios in the requested suite or feature that match + # the requested combination of tags, then Behat exits with an error status + # and reports "No scenarios" in its output. + # This can happen, for example, when running core suites from an app and + # requesting some tag combination that does not happen frequently. Then + # sometimes there may not be any matching scenarios in one of the suites. + # In this case, consider the test has passed. + MATCHING_COUNT=`grep -ca '^No scenarios$' ${TEST_LOG_FILE}` + if [ ${MATCHING_COUNT} -eq 1 ] + then + echo "Information: no matching scenarios were found." + BEHAT_EXIT_STATUS=0 + else + # Find the count of scenarios that passed and failed + SCENARIO_RESULTS_COLORED=`grep -Ea '^[0-9]+[[:space:]]scenario(|s)[[:space:]]\(' ${TEST_LOG_FILE}` + SCENARIO_RESULTS=$(echo "${SCENARIO_RESULTS_COLORED}" | sed "s/\x1b[^m]*m//g") + if [[ ${SCENARIO_RESULTS} =~ [0-9]+[^0-9]+([0-9]+)[^0-9]+([0-9]+)[^0-9]+ ]] + then + # Some passed and some failed, we got the second and third numbers. + # The text looked like "15 scenarios (6 passed, 9 failed)" + SCENARIOS_THAT_PASSED=$((SCENARIOS_THAT_PASSED + BASH_REMATCH[1])) + SCENARIOS_THAT_FAILED=$((SCENARIOS_THAT_FAILED + BASH_REMATCH[2])) + elif [[ ${SCENARIO_RESULTS} =~ [0-9]+[^0-9]+([0-9]+)[^0-9]+ ]] + then + # All failed, we got the second number. + # The text looked like "4 scenarios (4 failed)" + SCENARIOS_THAT_FAILED=$((SCENARIOS_THAT_FAILED + BASH_REMATCH[1])) + fi + fi + fi + + FAILED_SCENARIO_PATHS_COLORED=`awk '/Failed scenarios:/',0 ${TEST_LOG_FILE} | grep -a feature` + # There will be some ANSI escape codes for color in the FEATURE_COLORED var. + # Strip them out so we can pass just the ordinary feature details to Behat. + # Thanks to https://en.wikipedia.org/wiki/Tee_(command) and + # https://stackoverflow.com/questions/23416278/how-to-strip-ansi-escape-sequences-from-a-variable + # for ideas. + FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g") + + # If something else went wrong, and there were no failed scenarios, + # then the awk, grep, sed command sequence above ends up with an empty string. + # Unset FAILED_SCENARIO_PATHS to avoid later code thinking that there might be + # one failed scenario. + if [ -z "${FAILED_SCENARIO_PATHS}" ] + then + unset FAILED_SCENARIO_PATHS + fi + + if [ -n "${EXPECTED_FAILURES_FILE}" ] + then + if [ -n "${BEHAT_SUITE_TO_RUN}" ] + then + echo "Checking expected failures for suite ${BEHAT_SUITE_TO_RUN}" + else + echo "Checking expected failures" + fi + + # Check that every failed scenario is in the list of expected failures + for FAILED_SCENARIO_PATH in ${FAILED_SCENARIO_PATHS} + do + SUITE_PATH=`dirname ${FAILED_SCENARIO_PATH}` + SUITE=`basename ${SUITE_PATH}` + SCENARIO=`basename ${FAILED_SCENARIO_PATH}` + SUITE_SCENARIO="${SUITE}/${SCENARIO}" + grep "\[${SUITE_SCENARIO}\]" "${EXPECTED_FAILURES_FILE}" > /dev/null + if [ $? -ne 0 ] + then + echo "Error: Scenario ${SUITE_SCENARIO} failed but was not expected to fail." + UNEXPECTED_FAILED_SCENARIOS+=("${SUITE_SCENARIO}") + fi + done + + # Check that every scenario in the list of expected failures did fail + while read SUITE_SCENARIO + do + # Ignore comment lines (starting with hash) + if [[ "${SUITE_SCENARIO}" =~ ^# ]] + then + continue + fi + # Match lines that have [someSuite/someName.feature:n] - the part inside the + # brackets is the suite, feature and line number of the expected failure. + # Else ignore the line. + if [[ "${SUITE_SCENARIO}" =~ \[([a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.feature:[0-9]+)] ]]; then + SUITE_SCENARIO="${BASH_REMATCH[1]}" + else + continue + fi + if [ -n "${BEHAT_SUITE_TO_RUN}" ] + then + # If the expected failure is not in the suite that is currently being run, + # then do not try and check that it failed. + REGEX_TO_MATCH="^${BEHAT_SUITE_TO_RUN}/" + if ! [[ "${SUITE_SCENARIO}" =~ ${REGEX_TO_MATCH} ]] + then + continue + fi + fi + + # look for the expected suite-scenario at the end of a line in the + # FAILED_SCENARIO_PATHS - for example looking for apiComments/comments.feature:9 + # we want to match lines like: + # tests/acceptance/features/apiComments/comments.feature:9 + # but not lines like:: + # tests/acceptance/features/apiComments/comments.feature:902 + echo "${FAILED_SCENARIO_PATHS}" | grep ${SUITE_SCENARIO}$ > /dev/null + if [ $? -ne 0 ] + then + echo "Info: Scenario ${SUITE_SCENARIO} was expected to fail but did not fail." + UNEXPECTED_PASSED_SCENARIOS+=("${SUITE_SCENARIO}") + fi + done < ${EXPECTED_FAILURES_FILE} + else + for FAILED_SCENARIO_PATH in ${FAILED_SCENARIO_PATHS} + do + SUITE_PATH=$(dirname "${FAILED_SCENARIO_PATH}") + SUITE=$(basename "${SUITE_PATH}") + SCENARIO=$(basename "${FAILED_SCENARIO_PATH}") + SUITE_SCENARIO="${SUITE}/${SCENARIO}" + UNEXPECTED_FAILED_SCENARIOS+=("${SUITE_SCENARIO}") + done + fi + + if [ ${BEHAT_EXIT_STATUS} -ne 0 ] && [ ${#FAILED_SCENARIO_PATHS[@]} -eq 0 ] + then + # Behat had some problem and there were no failed scenarios reported + # So the problem is something else. + # Possibly there were missing step definitions. Or Behat crashed badly, or... + UNEXPECTED_BEHAT_EXIT_STATUSES+=("${SUITE_FEATURE_TEXT} had behat exit status ${BEHAT_EXIT_STATUS}") + fi + + if [ "${BEHAT_TAGS_OPTION_FOUND}" != true ] + then + # The behat run specified to skip scenarios tagged @skip + # Report them in a dry-run so they can be seen + # Big red error output is displayed if there are no matching scenarios - send it to null + DRY_RUN_FILE=$(mktemp) + SKIP_TAGS="@skip" + ${BEHAT} --dry-run {$COLORS_OPTION} -c ${BEHAT_YML} -f pretty ${BEHAT_SUITE_OPTION} --tags "${SKIP_TAGS}" ${BEHAT_FEATURE} 1>${DRY_RUN_FILE} 2>/dev/null + if grep -q -m 1 'No scenarios' "${DRY_RUN_FILE}" + then + # If there are no skip scenarios, then no need to report that + : + else + echo "" + echo "The following tests were skipped because they are tagged @skip:" + cat "${DRY_RUN_FILE}" | tee -a ${TEST_LOG_FILE} + fi + rm -f "${DRY_RUN_FILE}" + fi +} + +declare -x TEST_SERVER_URL + +if [ -z "${IPV4_URL}" ] +then + IPV4_URL="${TEST_SERVER_URL}" +fi + +if [ -z "${IPV6_URL}" ] +then + IPV6_URL="${TEST_SERVER_URL}" +fi + +# If a feature file has been specified but no suite, then deduce the suite +if [ -n "${BEHAT_FEATURE}" ] && [ -z "${BEHAT_SUITE}" ] +then + SUITE_PATH=`dirname ${BEHAT_FEATURE}` + BEHAT_SUITE=`basename ${SUITE_PATH}` +fi + +if [ -z "${BEHAT_YML}" ] +then + # Look for a behat.yml somewhere below the current working directory + # This saves app acceptance tests being forced to specify BEHAT_YML + BEHAT_YML="config/behat.yml" + if [ ! -f "${BEHAT_YML}" ] + then + BEHAT_YML="acceptance/config/behat.yml" + fi + if [ ! -f "${BEHAT_YML}" ] + then + BEHAT_YML="tests/acceptance/config/behat.yml" + fi + # If no luck above, then use the core behat.yml that should live below this script + if [ ! -f "${BEHAT_YML}" ] + then + BEHAT_YML="${SCRIPT_PATH}/config/behat.yml" + fi +fi + +BEHAT_CONFIG_DIR=$(dirname "${BEHAT_YML}") +ACCEPTANCE_DIR=$(dirname "${BEHAT_CONFIG_DIR}") + +if [[ -z "$BEHAT_FEATURES_DIR" ]] +then + BEHAT_FEATURES_DIR="${ACCEPTANCE_DIR}/features" +fi + +declare -a BEHAT_SUITES + +function get_behat_suites() { + # $1 type of suites to get "api" or "core-api" + # defaults to "api" + TYPE="$1" + if [[ -z "$TYPE" ]] + then + TYPE="api" + fi + + ALL_SUITES="" + for suite in `find ${BEHAT_FEATURES_DIR}/ -type d -iname ${TYPE}* | sort | rev | cut -d"/" -f1 | rev` + do + if [[ -f "${BEHAT_FILTER_SUITE_FILE}" ]] + then + if [[ ! `grep $suite "${BEHAT_FILTER_SUITE_FILE}"` ]] + then + ALL_SUITES+="$suite"$'\n' + fi + else + ALL_SUITES+="$suite"$'\n' + fi + done + + COUNT_ALL_SUITES=`echo "${ALL_SUITES}" | tr " " "\n" | wc -l` + + #divide the suites letting it round down (could be zero) + MIN_SUITES_PER_RUN=$((${COUNT_ALL_SUITES} / ${DIVIDE_INTO_NUM_PARTS})) + #some jobs might need an extra suite + MAX_SUITES_PER_RUN=$((${MIN_SUITES_PER_RUN} + 1)) + # the remaining number of suites that need to be distributed (could be zero) + REMAINING_SUITES=$((${COUNT_ALL_SUITES} - (${DIVIDE_INTO_NUM_PARTS} * ${MIN_SUITES_PER_RUN}))) + + if [[ ${RUN_PART} -le ${REMAINING_SUITES} ]] + then + SUITES_THIS_RUN=${MAX_SUITES_PER_RUN} + SUITES_IN_PREVIOUS_RUNS=$((${MAX_SUITES_PER_RUN} * (${RUN_PART} - 1))) + else + SUITES_THIS_RUN=${MIN_SUITES_PER_RUN} + SUITES_IN_PREVIOUS_RUNS=$((((${MAX_SUITES_PER_RUN} * ${REMAINING_SUITES}) + (${MIN_SUITES_PER_RUN} * (${RUN_PART} - ${REMAINING_SUITES} - 1))))) + fi + + if [ ${SUITES_THIS_RUN} -eq 0 ] + then + echo "there are only ${COUNT_ALL_SUITES} suites, nothing to do in part ${RUN_PART}" + exit 0 + fi + + COUNT_FINISH_AND_TODO_SUITES=$((${SUITES_IN_PREVIOUS_RUNS} + ${SUITES_THIS_RUN})) + BEHAT_SUITES+=(`echo "${ALL_SUITES}" | tr " " "\n" | head -n ${COUNT_FINISH_AND_TODO_SUITES} | tail -n ${SUITES_THIS_RUN}`) +} + +if [[ -n "${BEHAT_SUITE}" ]] +then + BEHAT_SUITES+=("${BEHAT_SUITE}") +else + if [[ -n "${RUN_PART}" ]]; then + if [[ "${ACCEPTANCE_TEST_TYPE}" == "core-api" ]]; then + get_behat_suites "core" + else + get_behat_suites "${ACCEPTANCE_TEST_TYPE}" + fi + fi +fi + + +TEST_TYPE_TEXT="API" + +# Always have "@api" +if [ ! -z "${BEHAT_FILTER_TAGS}" ] +then + # Be nice to the caller + # Remove any extra "&&" at the end of their tags list + BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS%&&}" + # Remove any extra "&&" at the beginning of their tags list + BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS#&&}" +fi + +# EMAIL_HOST defines where the system-under-test can find the email server (inbucket) +# for sending email. +if [ -z "${EMAIL_HOST}" ] +then + EMAIL_HOST="127.0.0.1" +fi + +# LOCAL_INBUCKET_HOST defines where this test script can find the Inbucket server +# for sending email. When testing a remote system, the Inbucket server somewhere +# "in the middle" might have a different host name from the point of view of +# the test script. +if [ -z "${LOCAL_EMAIL_HOST}" ] +then + LOCAL_EMAIL_HOST="${EMAIL_HOST}" +fi + +if [ -z "${EMAIL_SMTP_PORT}" ] +then + EMAIL_SMTP_PORT="2500" +fi + +# If the caller did not mention specific tags, skip the skipped tests by default +if [ "${BEHAT_TAGS_OPTION_FOUND}" = false ] +then + if [[ -z $BEHAT_FILTER_TAGS ]] + then + BEHAT_FILTER_TAGS="~@skip" + # If the caller has already specified specifically to run "@skip" scenarios + # then do not append "not @skip" + elif [[ ! ${BEHAT_FILTER_TAGS} =~ (^|&)@skip(&|$) ]] + then + BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS}&&~@skip" + fi +fi + +export IPV4_URL +export IPV6_URL +export FILES_FOR_UPLOAD="${SCRIPT_PATH}/filesForUpload/" + +if [ "${TEST_OCIS}" != "true" ] && [ "${TEST_REVA}" != "true" ] +then + # We are testing on an ownCloud core server. + # Tell the tests to wait 1 second between each upload/delete action + # to avoid problems with actions that depend on timestamps in seconds. + export UPLOAD_DELETE_WAIT_TIME=1 +fi + +TEST_LOG_FILE=$(mktemp) +SCENARIOS_THAT_PASSED=0 +SCENARIOS_THAT_FAILED=0 + +if [ ${#BEHAT_SUITES[@]} -eq 0 ] && [ -z "${BEHAT_FEATURE}" ] +then + SUITE_FEATURE_TEXT="all ${TEST_TYPE_TEXT}" + run_behat_tests +else + if [ -n "${BEHAT_SUITE}" ] + then + SUITE_FEATURE_TEXT="${BEHAT_SUITE}" + fi + + if [ -n "${BEHAT_FEATURE}" ] + then + # If running a whole feature, it will be something like login.feature + # If running just a single scenario, it will also have the line number + # like login.feature:36 - which will be parsed correctly like a "file" + # by basename. + BEHAT_FEATURE_FILE=`basename ${BEHAT_FEATURE}` + SUITE_FEATURE_TEXT="${SUITE_FEATURE_TEXT} ${BEHAT_FEATURE_FILE}" + fi +fi + +for i in "${!BEHAT_SUITES[@]}" + do + BEHAT_SUITE_TO_RUN="${BEHAT_SUITES[$i]}" + BEHAT_SUITE_OPTION="--suite=${BEHAT_SUITE_TO_RUN}" + SUITE_FEATURE_TEXT="${BEHAT_SUITES[$i]}" + for rerun_number in $(seq 1 ${BEHAT_RERUN_TIMES}) + do + if ((${BEHAT_RERUN_TIMES} > 1)) + then + echo -e "\nTest repeat $rerun_number of ${BEHAT_RERUN_TIMES}" + fi + run_behat_tests + done +done + +TOTAL_SCENARIOS=$((SCENARIOS_THAT_PASSED + SCENARIOS_THAT_FAILED)) + +echo "runsh: Total ${TOTAL_SCENARIOS} scenarios (${SCENARIOS_THAT_PASSED} passed, ${SCENARIOS_THAT_FAILED} failed)" + +# 3 types of things can have gone wrong: +# - some scenario failed (and it was not expected to fail) +# - some scenario passed (but it was expected to fail) +# - Behat exited with non-zero status because of some other error +# If any of these happened then report about it and exit with status 1 (error) + +if [ ${#UNEXPECTED_FAILED_SCENARIOS[@]} -gt 0 ] +then + UNEXPECTED_FAILURE=true +else + UNEXPECTED_FAILURE=false +fi + +if [ ${#UNEXPECTED_PASSED_SCENARIOS[@]} -gt 0 ] +then + UNEXPECTED_SUCCESS=true +else + UNEXPECTED_SUCCESS=false +fi + +if [ ${#UNEXPECTED_BEHAT_EXIT_STATUSES[@]} -gt 0 ] +then + UNEXPECTED_BEHAT_EXIT_STATUS=true +else + UNEXPECTED_BEHAT_EXIT_STATUS=false +fi + +# If we got some unexpected success, and we only ran a single feature or scenario +# then the fact that some expected failures did not happen might be because those +# scenarios were never even run. +# Filter the UNEXPECTED_PASSED_SCENARIOS to remove scenarios that were not run. +if [ "${UNEXPECTED_SUCCESS}" = true ] +then + ACTUAL_UNEXPECTED_PASS=() + # if running a single feature or a single scenario + if [[ -n "${BEHAT_FEATURE}" ]] + then + for unexpected_passed_value in "${UNEXPECTED_PASSED_SCENARIOS[@]}" + do + # check only for the running feature + if [[ $BEHAT_FEATURE == *":"* ]] + then + BEHAT_FEATURE_WITH_LINE_NUM=$BEHAT_FEATURE + else + LINE_NUM=$(echo ${unexpected_passed_value} | cut -d":" -f2) + BEHAT_FEATURE_WITH_LINE_NUM=$BEHAT_FEATURE:$LINE_NUM + fi + if [[ $BEHAT_FEATURE_WITH_LINE_NUM == *"${unexpected_passed_value}" ]] + then + ACTUAL_UNEXPECTED_PASS+=("${unexpected_passed_value}") + fi + done + else + ACTUAL_UNEXPECTED_PASS=("${UNEXPECTED_PASSED_SCENARIOS[@]}") + fi + + if [ ${#ACTUAL_UNEXPECTED_PASS[@]} -eq 0 ] + then + UNEXPECTED_SUCCESS=false + fi +fi + +if [ "${UNEXPECTED_FAILURE}" = false ] && [ "${UNEXPECTED_SUCCESS}" = false ] && [ "${UNEXPECTED_BEHAT_EXIT_STATUS}" = false ] +then + FINAL_EXIT_STATUS=0 +else + FINAL_EXIT_STATUS=1 +fi + +if [ -n "${EXPECTED_FAILURES_FILE}" ] +then + echo "runsh: Exit code after checking expected failures: ${FINAL_EXIT_STATUS}" +fi + +if [ "${UNEXPECTED_FAILURE}" = true ] +then + ${YELLOW_COLOR}; echo "runsh: Total unexpected failed scenarios throughout the test run:" + ${RED_COLOR}; printf "%s\n" "${UNEXPECTED_FAILED_SCENARIOS[@]}" +else + ${GREEN_COLOR}; echo "runsh: There were no unexpected failures." +fi + +if [ "${UNEXPECTED_SUCCESS}" = true ] +then + ${YELLOW_COLOR}; echo "runsh: Total unexpected passed scenarios throughout the test run:" + ${RED_COLOR}; printf "%s\n" "${ACTUAL_UNEXPECTED_PASS[@]}" +else + ${GREEN_COLOR}; echo "runsh: There were no unexpected success." +fi + +if [ "${UNEXPECTED_BEHAT_EXIT_STATUS}" = true ] +then + ${YELLOW_COLOR}; echo "runsh: The following Behat test runs exited with non-zero status:" + ${RED_COLOR}; printf "%s\n" "${UNEXPECTED_BEHAT_EXIT_STATUSES[@]}" +fi + +# sync the file-system so all output will be flushed to storage. +# In drone we sometimes see that the last lines of output are missing from the +# drone log. +sync + +# If we are running in drone CI, then sleep for a bit to (hopefully) let the +# drone agent send all the output to the drone server. +if [ -n "${CI_REPO}" ] +then + echo "sleeping for 30 seconds at end of test run" + sleep 30 +fi + +exit ${FINAL_EXIT_STATUS} diff --git a/tests/docker/docker-compose.yml b/tests/docker/docker-compose.yml index 580cbe065f..bb54bc3ba7 100644 --- a/tests/docker/docker-compose.yml +++ b/tests/docker/docker-compose.yml @@ -1,12 +1,36 @@ version: "3.4" services: revad: - image: ${REVAD_IMAGE} + # image: ${REVAD_IMAGE} + # we build the reva image with eos because it's faster + # instead of uploading and share through the github runners + # TODO (gdelmont): in future this should be parameterized + # to support other reva images as before + build: + context: ../../ + dockerfile: docker/Dockerfile.revad-eos volumes: - ../revad:/etc/revad working_dir: /etc/revad/ healthcheck: test: sleep 5 + eos-storage: + image: ${EOS_FULL_IMAGE} + security_opt: + - seccomp:unconfined + ulimits: + nproc: 57875 + nofile: 1024 + core: -1 + privileged: true + sysctls: + - net.ipv6.conf.all.disable_ipv6=0 + hostname: eosuser.example.org + healthcheck: + test: eos file info /eos/user + interval: 10s + timeout: 5s + retries: 5 litmus: image: registry.cern.ch/docker.io/owncloud/litmus:latest environment: @@ -15,7 +39,7 @@ services: TESTS: basic http copymove props acceptance: image: cs3org/behat:latest - entrypoint: /mnt/ocis/tests/acceptance/run.sh + entrypoint: /mnt/acceptance/run.sh environment: OCIS_REVA_DATA_ROOT: /var/tmp/reva/data/ DELETE_USER_DATA_CMD: rm -rf /var/tmp/reva/data/nodes/root/* /var/tmp/reva/data/nodes/*-*-*-* /var/tmp/reva/data/blobs/* @@ -35,10 +59,10 @@ services: command: -c /etc/revad/frontend.toml volumes: - shared-volume:/var/tmp - storage-home-ocis: + storage-home: extends: revad hostname: storage-home - command: -c /etc/revad/storage-home-ocis.toml + command: -c /etc/revad/storage-home.toml volumes: - shared-volume:/var/tmp users: @@ -56,11 +80,13 @@ services: environment: LITMUS_URL: http://frontend:20080/remote.php/webdav depends_on: + eos-storage: + condition: service_healthy gateway: condition: service_healthy frontend: condition: service_healthy - storage-home-ocis: + storage-home: condition: service_healthy users: condition: service_healthy @@ -71,50 +97,26 @@ services: environment: LITMUS_URL: http://frontend:20080/remote.php/dav/files/4c510ada-c86b-4815-8820-42cdf82c3d51 depends_on: + eos-storage: + condition: service_healthy gateway: condition: service_healthy frontend: condition: service_healthy - storage-home-ocis: + storage-home: condition: service_healthy users: condition: service_healthy shares: condition: service_healthy - permissions-ocis-ci: - extends: revad - command: -c /etc/revad/permissions-ocis-ci.toml - storage-users-ocis: + storage-users: extends: revad hostname: storage-users - command: -c /etc/revad/storage-users-ocis.toml - volumes: - - shared-volume:/var/tmp - litmus-3: - extends: litmus - entrypoint: /bin/sh - command: - - -c - - | - curl -s -k -u einstein:relativity -I http://frontend:20080/remote.php/dav/files/einstein - export LITMUS_URL=http://frontend:20080/remote.php/dav/spaces/123e4567-e89b-12d3-a456-426655440000!$$(ls /var/tmp/reva/data/spaces/personal/) - exec /usr/local/bin/litmus-wrapper + command: -c /etc/revad/storage-users.toml volumes: - shared-volume:/var/tmp depends_on: - gateway: - condition: service_healthy - frontend: - condition: service_healthy - storage-home-ocis: - condition: service_healthy - users: - condition: service_healthy - shares: - condition: service_healthy - permissions-ocis-ci: - condition: service_healthy - storage-users-ocis: + eos-storage: condition: service_healthy frontend-global: extends: revad @@ -127,18 +129,24 @@ services: command: -c /etc/revad/storage-local-1.toml volumes: - shared-volume:/var/tmp + depends_on: + eos-storage: + condition: service_healthy storage-local-2: extends: revad command: -c /etc/revad/storage-local-2.toml volumes: - shared-volume:/var/tmp + depends_on: + eos-storage: + condition: service_healthy acceptance-1: extends: acceptance environment: PATH_TO_APITESTS: /mnt/ocis TEST_SERVER_URL: http://frontend:20180 - STORAGE_DRIVER: OCIS TEST_REVA: 'true' + EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-EOS-storage.md REGULAR_USER_PASSWORD: relativity SEND_SCENARIO_LINE_REFERENCES: 'true' BEHAT_SUITE: apiVirtualViews @@ -151,7 +159,7 @@ services: condition: service_healthy frontend-global: condition: service_healthy - storage-home-ocis: + storage-home: condition: service_healthy users: condition: service_healthy @@ -179,6 +187,9 @@ services: command: -c /etc/revad/storage-publiclink.toml volumes: - shared-volume:/var/tmp + depends_on: + eos-storage: + condition: service_healthy ldap-users: extends: revad hostname: users @@ -192,7 +203,6 @@ services: extends: acceptance environment: TEST_SERVER_URL: http://frontend:20080 - STORAGE_DRIVER: OCIS TEST_WITH_LDAP: 'true' REVA_LDAP_HOSTNAME: ldap TEST_REVA: 'true' @@ -200,9 +210,11 @@ services: BEHAT_FILTER_TAGS: ~@provisioning_api-app-required&&~@skipOnOcis-OCIS-Storage&&~@personalSpace&&~@skipOnGraph&&~@carddav&&~@skipOnReva&&~@skipOnRevaMaster DIVIDE_INTO_NUM_PARTS: ${PARTS:-1} RUN_PART: ${PART:-1} - EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-OCIS-storage.md - BEHAT_YML: tests/acceptance/config/behat-core.yml + EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-EOS-storage.md + BEHAT_FEATURES_DIR: /mnt/ocis/tests/acceptance/features + BEHAT_YML: /mnt/acceptance/config/behat-core.yml ACCEPTANCE_TEST_TYPE: core-api + BEHAT_FILTER_SUITE_FILE: /mnt/acceptance/filtered-suites-acceptance-2-EOS volumes: - shared-volume:/var/tmp working_dir: /mnt/ocis @@ -211,85 +223,34 @@ services: condition: service_healthy frontend: condition: service_healthy - storage-home-ocis: + storage-home: condition: service_healthy shares: condition: service_healthy - storage-users-ocis: - condition: service_healthy - storage-publiclink: - condition: service_healthy - ldap-users: - condition: service_healthy - ceph: - image: ceph/daemon - environment: - CEPH_DAEMON: demo - NETWORK_AUTO_DETECT: 4 - MON_IP: 0.0.0.0 - CEPH_PUBLIC_NETWORK: 0.0.0.0/0 - RGW_CIVETWEB_PORT: 4000 - RGW_NAME: ceph - CEPH_DEMO_UID: test-user - CEPH_DEMO_ACCESS_KEY: test - CEPH_DEMO_SECRET_KEY: test - CEPH_DEMO_BUCKET: test - healthcheck: - test: ceph health - interval: 5s - timeout: 5s - retries: 5 - storage-home-s3ng: - extends: revad - hostname: storage-home - command: -c /etc/revad/storage-home-s3ng.toml - volumes: - - shared-volume:/var/tmp - depends_on: - ceph: - condition: service_healthy - storage-users-s3ng: - extends: revad - hostname: storage-users - command: -c /etc/revad/storage-users-s3ng.toml - volumes: - - shared-volume:/var/tmp - depends_on: - ceph: - condition: service_healthy - acceptance-3: - extends: acceptance - environment: - TEST_SERVER_URL: http://frontend:20080 - STORAGE_DRIVER: S3NG - TEST_WITH_LDAP: 'true' - REVA_LDAP_HOSTNAME: ldap - TEST_REVA: 'true' - SEND_SCENARIO_LINE_REFERENCES: 'true' - BEHAT_FILTER_TAGS: ~@provisioning_api-app-required&&~@skipOnOcis-OCIS-Storage&&~@personalSpace&&~&&~@skipOnGraph&&~@carddav&&~@skipOnReva&&~@skipOnRevaMaster - DIVIDE_INTO_NUM_PARTS: ${PARTS:-1} - RUN_PART: ${PART:-1} - EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-S3NG-storage.md - BEHAT_YML: tests/acceptance/config/behat-core.yml - ACCEPTANCE_TEST_TYPE: core-api - volumes: - - shared-volume:/var/tmp - working_dir: /mnt/ocis - depends_on: - gateway: - condition: service_healthy - frontend: - condition: service_healthy - shares: + storage-users: condition: service_healthy storage-publiclink: condition: service_healthy ldap-users: condition: service_healthy - storage-home-s3ng: - condition: service_healthy - storage-users-s3ng: - condition: service_healthy - + # ceph: + # image: ceph/daemon + # environment: + # CEPH_DAEMON: demo + # NETWORK_AUTO_DETECT: 4 + # MON_IP: 0.0.0.0 + # CEPH_PUBLIC_NETWORK: 0.0.0.0/0 + # RGW_CIVETWEB_PORT: 4000 + # RGW_NAME: ceph + # CEPH_DEMO_UID: test-user + # CEPH_DEMO_ACCESS_KEY: test + # CEPH_DEMO_SECRET_KEY: test + # CEPH_DEMO_BUCKET: test + # healthcheck: + # test: ceph health + # interval: 5s + # timeout: 5s + # retries: 5 + volumes: shared-volume: diff --git a/tests/docker/eos-storage/Dockerfile b/tests/docker/eos-storage/Dockerfile new file mode 100644 index 0000000000..8afb99af03 --- /dev/null +++ b/tests/docker/eos-storage/Dockerfile @@ -0,0 +1,10 @@ +FROM gitlab-registry.cern.ch/dss/eos/eos-ci:5.1.25 + +COPY scripts/eos-run.sh /mnt/scripts/eos-run.sh +COPY sssd/sssd.conf /etc/sssd/sssd.conf + +RUN ulimit -n 1024000 && yum install -y sssd sssd-client + +RUN chmod 0600 /etc/sssd/sssd.conf && chown root:root /etc/sssd/sssd.conf + +ENTRYPOINT /mnt/scripts/eos-run.sh \ No newline at end of file diff --git a/tests/docker/eos-storage/scripts/eos-run.sh b/tests/docker/eos-storage/scripts/eos-run.sh new file mode 100755 index 0000000000..1237c96216 --- /dev/null +++ b/tests/docker/eos-storage/scripts/eos-run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +eos daemon sss recreate +eos daemon run mq & +eos daemon run qdb & +eos daemon run mgm & +eos daemon run fst & +sssd +sleep 5 + +for name in 01; do + mkdir -p /data/fst/$name; + chown daemon:daemon /data/fst/$name +done +eos space define default + +eosfstregister -r localhost /data/fst/ default:1 + +eos space set default on +eos mkdir /eos/dev/rep-2/ +eos mkdir /eos/dev/ec-42/ +eos attr set default=replica /eos/dev/rep-2 / +eos attr set default=raid6 /eos/dev/ec-42/ +eos chmod 777 /eos/dev/rep-2/ +eos chmod 777 /eos/dev/ec-42/ +mkdir -p /eos/ +eosxd -ofsname=$(hostname -f):/eos/ /eos/ + +eos mkdir -p /eos/user + +for letter in {a..z}; do + eos mkdir -p "/eos/user/$letter" +done + +eos vid set membership 0 +sudo +eos vid set membership 99 +sudo +eos vid set map -tident "*@storage-home" vuid:0 vgid:0 +eos vid set map -tident "*@storage-users" vuid:0 vgid:0 +eos vid set map -tident "*@storage-local-1" vuid:0 vgid:0 +eos vid set map -tident "*@storage-local-2" vuid:0 vgid:0 + +tail -f /dev/null diff --git a/tests/docker/eos-storage/sssd/sssd.conf b/tests/docker/eos-storage/sssd/sssd.conf new file mode 100644 index 0000000000..09f099db27 --- /dev/null +++ b/tests/docker/eos-storage/sssd/sssd.conf @@ -0,0 +1,25 @@ +[sssd] +config_file_version = 2 +services = nss, pam +domains = ldap + +[domain/ldap] +cache_credentials = true +enumerate = true + +id_provider = ldap +auth_provider = ldap + +ldap_uri = ldap://ldap +ldap_search_base = dc=owncloud,dc=com +ldap_id_use_start_tls = true +ldap_tls_reqcert = never +chpass_provider = ldap +ldap_chpass_uri = ldap://ldap +entry_cache_timeout = 600 +ldap_network_timeout = 2 +ldap_default_bind_dn = cn=admin,dc=owncloud,dc=com +ldap_default_authtok = admin + +ldap_schema = rfc2307 +ldap_group_member = memberUid \ No newline at end of file diff --git a/tests/integration/grpc/storageprovider_test.go b/tests/integration/grpc/storageprovider_test.go index 30f2460244..392b8528d4 100644 --- a/tests/integration/grpc/storageprovider_test.go +++ b/tests/integration/grpc/storageprovider_test.go @@ -20,7 +20,6 @@ package grpc_test import ( "context" - "os" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" @@ -28,10 +27,7 @@ import ( "github.com/cs3org/reva/pkg/auth/scope" ctxpkg "github.com/cs3org/reva/pkg/ctx" "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/cs3org/reva/pkg/storage/fs/ocis" - "github.com/cs3org/reva/pkg/storage/fs/owncloud" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" - "github.com/cs3org/reva/tests/helpers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "google.golang.org/grpc/metadata" @@ -61,15 +57,15 @@ var _ = Describe("storage providers", func() { Username: "einstein", } - homeRef = &storagep.Reference{Path: "/"} - filePath = "/file" - fileRef = &storagep.Reference{Path: filePath} - versionedFilePath = "/versionedFile" - versionedFileRef = &storagep.Reference{Path: versionedFilePath} - subdirPath = "/subdir" - subdirRef = &storagep.Reference{Path: subdirPath} - sharesPath = "/Shares" - sharesRef = &storagep.Reference{Path: sharesPath} + homeRef = &storagep.Reference{Path: "/"} + filePath = "/file" + fileRef = &storagep.Reference{Path: filePath} + // versionedFilePath = "/versionedFile" + // versionedFileRef = &storagep.Reference{Path: versionedFilePath} + subdirPath = "/subdir" + subdirRef = &storagep.Reference{Path: subdirPath} + sharesPath = "/Shares" + sharesRef = &storagep.Reference{Path: sharesPath} ) JustBeforeEach(func() { @@ -151,37 +147,37 @@ var _ = Describe("storage providers", func() { }) } - assertFileVersions := func() { - It("lists file versions", func() { - listRes, err := serviceClient.ListFileVersions(ctx, &storagep.ListFileVersionsRequest{Ref: versionedFileRef}) - Expect(err).ToNot(HaveOccurred()) - Expect(listRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - Expect(len(listRes.Versions)).To(Equal(1)) - Expect(listRes.Versions[0].Size).To(Equal(uint64(1))) - }) - - It("restores a file version", func() { - statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: versionedFileRef}) - Expect(err).ToNot(HaveOccurred()) - Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - Expect(statRes.Info.Size).To(Equal(uint64(2))) // second version contains 2 bytes - - listRes, err := serviceClient.ListFileVersions(ctx, &storagep.ListFileVersionsRequest{Ref: versionedFileRef}) - Expect(err).ToNot(HaveOccurred()) - restoreRes, err := serviceClient.RestoreFileVersion(ctx, - &storagep.RestoreFileVersionRequest{ - Ref: versionedFileRef, - Key: listRes.Versions[0].Key, - }) - Expect(err).ToNot(HaveOccurred()) - Expect(restoreRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - - statRes, err = serviceClient.Stat(ctx, &storagep.StatRequest{Ref: versionedFileRef}) - Expect(err).ToNot(HaveOccurred()) - Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - Expect(statRes.Info.Size).To(Equal(uint64(1))) // initial version contains 1 byte - }) - } + // assertFileVersions := func() { + // It("lists file versions", func() { + // listRes, err := serviceClient.ListFileVersions(ctx, &storagep.ListFileVersionsRequest{Ref: versionedFileRef}) + // Expect(err).ToNot(HaveOccurred()) + // Expect(listRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) + // Expect(len(listRes.Versions)).To(Equal(1)) + // Expect(listRes.Versions[0].Size).To(Equal(uint64(1))) + // }) + + // It("restores a file version", func() { + // statRes, err := serviceClient.Stat(ctx, &storagep.StatRequest{Ref: versionedFileRef}) + // Expect(err).ToNot(HaveOccurred()) + // Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) + // Expect(statRes.Info.Size).To(Equal(uint64(2))) // second version contains 2 bytes + + // listRes, err := serviceClient.ListFileVersions(ctx, &storagep.ListFileVersionsRequest{Ref: versionedFileRef}) + // Expect(err).ToNot(HaveOccurred()) + // restoreRes, err := serviceClient.RestoreFileVersion(ctx, + // &storagep.RestoreFileVersionRequest{ + // Ref: versionedFileRef, + // Key: listRes.Versions[0].Key, + // }) + // Expect(err).ToNot(HaveOccurred()) + // Expect(restoreRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) + + // statRes, err = serviceClient.Stat(ctx, &storagep.StatRequest{Ref: versionedFileRef}) + // Expect(err).ToNot(HaveOccurred()) + // Expect(statRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) + // Expect(statRes.Info.Size).To(Equal(uint64(1))) // initial version contains 1 byte + // }) + // } assertDelete := func() { It("deletes a directory", func() { @@ -498,153 +494,6 @@ var _ = Describe("storage providers", func() { // assertMetadata() }) - Context("with an existing file /versioned_file", func() { - JustBeforeEach(func() { - fs, err := ocis.New(ctx, map[string]interface{}{ - "root": revads["storage"].TmpRoot, - "enable_home": true, - }) - Expect(err).ToNot(HaveOccurred()) - - content1 := []byte("1") - content2 := []byte("22") - - ctx := ctxpkg.ContextSetUser(context.Background(), user) - - err = fs.CreateHome(ctx) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content1) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content2) - Expect(err).ToNot(HaveOccurred()) - }) - - assertFileVersions() - }) }) - Describe("ocis", func() { - BeforeEach(func() { - dependencies = map[string]string{ - "storage": "storageprovider-ocis.toml", - } - }) - - assertCreateHome() - - Context("with a home and a subdirectory", func() { - JustBeforeEach(func() { - res, err := serviceClient.CreateHome(ctx, &storagep.CreateHomeRequest{}) - Expect(err).ToNot(HaveOccurred()) - Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - - subdirRes, err := serviceClient.CreateContainer(ctx, &storagep.CreateContainerRequest{Ref: subdirRef}) - Expect(err).ToNot(HaveOccurred()) - Expect(subdirRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - }) - - assertCreateContainer() - assertListContainer() - assertGetPath() - assertDelete() - assertMove() - assertGrants() - assertUploads() - assertDownloads() - assertRecycle() - assertReferences() - // assertMetadata() - }) - - Context("with an existing file /versioned_file", func() { - JustBeforeEach(func() { - fs, err := ocis.New(ctx, map[string]interface{}{ - "root": revads["storage"].TmpRoot, - "enable_home": true, - }) - Expect(err).ToNot(HaveOccurred()) - - content1 := []byte("1") - content2 := []byte("22") - - ctx := ctxpkg.ContextSetUser(context.Background(), user) - - err = fs.CreateHome(ctx) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content1) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content2) - Expect(err).ToNot(HaveOccurred()) - }) - - assertFileVersions() - }) - }) - - Describe("owncloud", func() { - BeforeEach(func() { - dependencies = map[string]string{ - "users": "userprovider-json.toml", - "storage": "storageprovider-owncloud.toml", - } - - redisAddress := os.Getenv("REDIS_ADDRESS") - if redisAddress == "" { - Fail("REDIS_ADDRESS not set") - } - variables = map[string]string{ - "redis_address": redisAddress, - } - }) - - assertCreateHome() - - Context("with a home and a subdirectory", func() { - JustBeforeEach(func() { - res, err := serviceClient.CreateHome(ctx, &storagep.CreateHomeRequest{}) - Expect(err).ToNot(HaveOccurred()) - Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - - subdirRes, err := serviceClient.CreateContainer(ctx, &storagep.CreateContainerRequest{Ref: subdirRef}) - Expect(err).ToNot(HaveOccurred()) - Expect(subdirRes.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - }) - - assertCreateContainer() - assertListContainer() - assertDelete() - assertMove() - assertGrants() - assertUploads() - assertDownloads() - assertRecycle() - assertReferences() - // assertMetadata() - }) - - Context("with an existing file /versioned_file", func() { - JustBeforeEach(func() { - fs, err := owncloud.New(ctx, map[string]interface{}{ - "datadirectory": revads["storage"].TmpRoot, - "userprovidersvc": revads["users"].GrpcAddress, - "enable_home": true, - }) - Expect(err).ToNot(HaveOccurred()) - - content1 := []byte("1") - content2 := []byte("22") - - ctx := ctxpkg.ContextSetUser(context.Background(), user) - - err = fs.CreateHome(ctx) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content1) - Expect(err).ToNot(HaveOccurred()) - err = helpers.Upload(ctx, fs, versionedFileRef, content2) - Expect(err).ToNot(HaveOccurred()) - }) - - assertFileVersions() - }) - }) }) diff --git a/tests/oc-integration-tests/drone/frontend-global.toml b/tests/oc-integration-tests/drone/frontend-global.toml deleted file mode 100644 index 12d5a2bc92..0000000000 --- a/tests/oc-integration-tests/drone/frontend-global.toml +++ /dev/null @@ -1,114 +0,0 @@ -# This config file will start a reva service that: -# - serves as the entrypoint for owncloud APIs but with a globally accessible namespace. -# - serves http endpoints on port 20180 -# - / --------------- ocdav -# - /ocs ------------ ocs -# - TODO(diocas): ocm - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[http] -address = "0.0.0.0:20180" - -[http.middlewares.cors] -allow_credentials = true - -[http.services.ocdav] -# serve ocdav on the root path -prefix = "" -chunk_folder = "/drone/src/tmp/reva/chunks" -# for user lookups -# prefix the path of requests to /dav/files with this namespace -# While owncloud has only listed usernames at this endpoint CERN has -# been exposing more than just usernames. For owncloud deployments we -# can prefix the path to jail the requests to the correct CS3 namespace. -# In this deployment we mounted the owncloud storage provider at /users. It -# expects a username as the first path segment. -# currently, only the desktop client will use this endpoint, but only if -# the dav.chunking capability is available -# TODO implement a path wrapper that rewrites `` into the path -# layout for the users home? -# no, use GetHome? -# for eos we need to rewrite the path -# TODO strip the username from the path so the CS3 namespace can be mounted -# at the files/ endpoint? what about migration? separate reva instance -files_namespace = "/" - -# similar to the dav/files endpoint we can configure a prefix for the old webdav endpoint -# we use the old webdav endpoint to present the cs3 namespace -# note: this changes the tree that is rendered at remote.php/webdav from the users home to the cs3 namespace -# use webdav_namespace = "/home" to use the old namespace that only exposes the users files -# this endpoint should not affect the desktop client sync but will present different folders for the other clients: -# - the desktop clients use a hardcoded remote.php/dav/files/ if the dav.chunkung capability is present -# - the ios ios uses the core.webdav-root capability which points to remote.php/webdav in oc10 -# - the oc js sdk is hardcoded to the remote.php/webdav so it will see the new tree -# - TODO android? no sync ... but will see different tree -webdav_namespace = "/" - -[http.services.ocs] - -[http.services.ocs.capabilities.capabilities.core.status] -version = "10.0.11.5" -versionstring = "10.0.11" - -[http.services.ocs.capabilities.capabilities.files_sharing] -api_enabled = true -resharing = true -group_sharing = true -auto_accept_share = true -share_with_group_members_only = true -share_with_membership_groups_only = true -default_permissions = 22 -search_min_length = 3 - -[http.services.ocs.capabilities.capabilities.files_sharing.public] -enabled = true -send_mail = true -social_share = true -upload = true -multiple = true -supports_upload_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.password] -enforced = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.password.enforced_for] -read_only = true -read_write = true -upload_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.expire_date] -enabled = true - -[http.services.ocs.capabilities.capabilities.files_sharing.user] -send_mail = true - -[http.services.ocs.capabilities.capabilities.files_sharing.user_enumeration] -enabled = true -group_members_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.federation] -outgoing = true -incoming = true - -[http.services.ocs.capabilities.capabilities.notifications] -endpoints = [] - -[http.services.ocs.capabilities.capabilities.files.tus_support] -version = "1.0.0" -resumable = "1.0.0" -extension = "creation,creation-with-upload" -http_method_override = "" -max_chunk_size = 0 - -# serve /ocm -[http.services.ocmd] -prefix = "ocm" - -[http.middlewares.providerauthorizer] -driver = "json" - -[http.middlewares.providerauthorizer.drivers.json] -providers = "providers.demo.json" \ No newline at end of file diff --git a/tests/oc-integration-tests/drone/frontend.toml b/tests/oc-integration-tests/drone/frontend.toml deleted file mode 100644 index 84ed3b43f7..0000000000 --- a/tests/oc-integration-tests/drone/frontend.toml +++ /dev/null @@ -1,125 +0,0 @@ -# This config file will start a reva service that: -# - serves as the entrypoint for owncloud APIs. -# - jails users into their home folder as in owncloud classic -# - serves http endpoints on port 20080 -# - / --------------- ocdav -# - /ocs ------------ ocs -# - TODO(diocas): ocm - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:20099" - -[grpc.services.authprovider] -auth_manager = "oidc" - -# If you want to use your own openid provider change this config -[grpc.services.authprovider.auth_managers.oidc] -issuer = "http://localhost:20080" - -[http] -address = "0.0.0.0:20080" - -[http.middlewares.cors] -allow_credentials = true - -[http.services.ocdav] -# serve ocdav on the root path -prefix = "" -chunk_folder = "/drone/src/tmp/reva/chunks" -# for user lookups -# prefix the path of requests to /dav/files with this namespace -# While owncloud has only listed usernames at this endpoint CERN has -# been exposing more than just usernames. For owncloud deployments we -# can prefix the path to jail the requests to the correct CS3 namespace. -# In this deployment we mounted the owncloud storage provider at /users. It -# expects a username as the first path segment. -# currently, only the desktop client will use this endpoint, but only if -# the dav.chunking capability is available -# TODO implement a path wrapper that rewrites `` into the path -# layout for the users home? -# no, use GetHome? -# for eos we need to rewrite the path -# TODO strip the username from the path so the CS3 namespace can be mounted -# at the files/ endpoint? what about migration? separate reva instance -files_namespace = "/users" - -# similar to the dav/files endpoint we can configure a prefix for the old webdav endpoint -# we use the old webdav endpoint to present the cs3 namespace -# note: this changes the tree that is rendered at remote.php/webdav from the users home to the cs3 namespace -# use webdav_namespace = "/home" to use the old namespace that only exposes the users files -# this endpoint should not affect the desktop client sync but will present different folders for the other clients: -# - the desktop clients use a hardcoded remote.php/dav/files/ if the dav.chunkung capability is present -# - the ios ios uses the core.webdav-root capability which points to remote.php/webdav in oc10 -# - the oc js sdk is hardcoded to the remote.php/webdav so it will see the new tree -# - TODO android? no sync ... but will see different tree -webdav_namespace = "/home" - -[http.services.ocs] - -[http.services.ocs.capabilities.capabilities.core.status] -version = "10.0.11.5" -versionstring = "10.0.11" - -[http.services.ocs.capabilities.capabilities.files_sharing] -api_enabled = true -resharing = true -group_sharing = true -auto_accept_share = true -share_with_group_members_only = true -share_with_membership_groups_only = true -default_permissions = 22 -search_min_length = 3 - -[http.services.ocs.capabilities.capabilities.files_sharing.public] -enabled = true -send_mail = true -social_share = true -upload = true -multiple = true -supports_upload_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.password] -enforced = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.password.enforced_for] -read_only = true -read_write = true -upload_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.public.expire_date] -enabled = true - -[http.services.ocs.capabilities.capabilities.files_sharing.user] -send_mail = true - -[http.services.ocs.capabilities.capabilities.files_sharing.user_enumeration] -enabled = true -group_members_only = true - -[http.services.ocs.capabilities.capabilities.files_sharing.federation] -outgoing = true -incoming = true - -[http.services.ocs.capabilities.capabilities.notifications] -endpoints = [] - -[http.services.ocs.capabilities.capabilities.files.tus_support] -version = "1.0.0" -resumable = "1.0.0" -extension = "creation,creation-with-upload" -http_method_override = "" -max_chunk_size = 0 - -# serve /ocm -[http.services.ocmd] -prefix = "ocm" - -[http.middlewares.providerauthorizer] -driver = "json" - -[http.middlewares.providerauthorizer.drivers.json] -providers = "providers.demo.json" \ No newline at end of file diff --git a/tests/oc-integration-tests/drone/gateway.toml b/tests/oc-integration-tests/drone/gateway.toml deleted file mode 100644 index 85c17b20c2..0000000000 --- a/tests/oc-integration-tests/drone/gateway.toml +++ /dev/null @@ -1,83 +0,0 @@ -# This config file will start a reva service that: -# - serves as a gateway for all CS3 requests -# - looks up the storageprovider using a storageregistry -# - looks up the authprovider using an authregistry -# - serves the gateway on grpc port 19000 -# - serves http datagateway on port 19001 -# - /data - datagateway: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:19000" - -[grpc.services.gateway] -# registries -authregistrysvc = "localhost:19000" -storageregistrysvc = "localhost:19000" -# user metadata -preferencessvc = "localhost:18000" -userprovidersvc = "localhost:18000" -groupprovidersvc = "localhost:18000" -# an approvider lives on "localhost:18000" as well, see users.toml -# sharing -usershareprovidersvc = "localhost:17000" -publicshareprovidersvc = "localhost:17000" -# ocm -ocmcoresvc = "localhost:14000" -ocmshareprovidersvc = "localhost:14000" -ocminvitemanagersvc = "localhost:14000" -ocmproviderauthorizersvc = "localhost:14000" -# permissions -permissionssvc = "localhost:10000" -# other -commit_share_to_storage_grant = true -commit_share_to_storage_ref = true -share_folder = "Shares" -datagateway = "http://localhost:19001/data" -transfer_shared_secret = "replace-me-with-a-transfer-secret" # for direct uploads -transfer_expires = 6 # give it a moment -#disable_home_creation_on_login = true -link_grants_file = "/drone/src/tmp/reva/link_grants_file.json" - -[grpc.services.authregistry] -driver = "static" - -[grpc.services.authregistry.drivers.static.rules] -publicshares = "localhost:17000" # started with the shares.toml -basic = "localhost:18000" # started with the users.toml - -[grpc.services.storageregistry] -driver = "static" - -[grpc.services.storageregistry.drivers.static] -home_provider = "/home" - -[grpc.services.storageregistry.drivers.static.rules] - -# mount a home storage provider that uses a context based path wrapper -# to jail users into their home dir -"/home" = {"address" = "localhost:12000"} - -# mount a storage provider without a path wrapper for direct access to users. -"/users" = {"address" = "localhost:11000"} -"123e4567-e89b-12d3-a456-426655440000" = {"address" = "localhost:11000"} - -# the /virtual namespace is only accessible via the frontend-global service -"/virtual/[a-k]" = {"address" = "localhost:11100"} -"virtual-a-k" = {"address" = "localhost:11100"} -"/virtual/[l-z]" = {"address" = "localhost:11110"} -"virtual-l-z" = {"address" = "localhost:11110"} - -# another mount point might be "/projects/" - -"/public" = {"address" = "localhost:13000"} -"e1a73ede-549b-4226-abdf-40e69ca8230d" = {"address" = "localhost:13000"} - -[http] -address = "0.0.0.0:19001" - -[http.services.datagateway] -transfer_shared_secret = "replace-me-with-a-transfer-secret" diff --git a/tests/oc-integration-tests/drone/ldap-users.toml b/tests/oc-integration-tests/drone/ldap-users.toml deleted file mode 100644 index 0cd4f47dab..0000000000 --- a/tests/oc-integration-tests/drone/ldap-users.toml +++ /dev/null @@ -1,71 +0,0 @@ -# This config file will start a reva service that: -# - handles user metadata and user preferences -# - serves the grpc services on port 18000 - -[shared] -jwt_secret = "Pive-Fumkiu4" - -[grpc] -address = "0.0.0.0:18000" - -[grpc.services.authprovider] -auth_manager = "ldap" - -[grpc.services.authprovider.auth_managers.ldap] -hostname="ldap" -port=636 -insecure=true -base_dn="dc=owncloud,dc=com" -loginfilter="(&(objectclass=posixAccount)(|(cn={{login}}))(uid={{login}}))" -bind_username="cn=admin,dc=owncloud,dc=com" -bind_password="admin" -idp="http://localhost:18000" -[grpc.services.authprovider.auth_managers.ldap.schema] -uid="uid" -displayName="displayName" -dn="dn" -cn="cn" - -[grpc.services.userprovider] -driver = "ldap" - -[grpc.services.userprovider.drivers.ldap] -hostname="ldap" -port=636 -insecure=true -base_dn="dc=owncloud,dc=com" -userfilter="(&(objectclass=posixAccount)(|(uid={{.OpaqueId}})(cn={{.OpaqueId}})))" -findfilter="(&(objectclass=posixAccount)(|(cn={{query}}*)(displayname={{query}}*)(mail={{query}}*)))" -attributefilter="(&(objectclass=posixAccount)({{attr}}={{value}}))" -groupfilter="(&(objectclass=posixGroup)(cn=*)(memberuid={{.OpaqueId}}))" -bind_username="cn=admin,dc=owncloud,dc=com" -bind_password="admin" -idp="http://localhost:18000" - -[grpc.services.userprovider.drivers.ldap.schema] -uid="uid" -displayName="displayName" -dn="dn" -cn="cn" - -[grpc.services.groupprovider] -driver = "ldap" - -[grpc.services.groupprovider.drivers.ldap] -hostname="ldap" -port=636 -insecure=true -base_dn="dc=owncloud,dc=com" -groupfilter="(&(objectclass=posixGroup)(|(gid={{.OpaqueId}})(cn={{.OpaqueId}})))" -findfilter="(&(objectclass=posixGroup)(|(cn={{query}}*)(displayname={{query}}*)(mail={{query}}*)))" -attributefilter="(&(objectclass=posixGroup)({{attr}}={{value}}))" -memberfilter="(&(objectclass=posixAccount)(cn={{.OpaqueId}}))" -bind_username="cn=admin,dc=owncloud,dc=com" -bind_password="admin" -idp="http://localhost:18000" - -[grpc.services.groupprovider.drivers.ldap.schema] -gid="cn" -displayName="cn" -dn="dn" -cn="cn" diff --git a/tests/oc-integration-tests/drone/ocmd.toml b/tests/oc-integration-tests/drone/ocmd.toml deleted file mode 100644 index 0439d83856..0000000000 --- a/tests/oc-integration-tests/drone/ocmd.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This config file will start a reva service that: -# - serves user and public link shares - -[shared] -jwt_secret = "Pive-Fumkiu4" - -[grpc] -address = "0.0.0.0:14000" - -[grpc.services.ocmcore] -driver = "json" - -# Note that ocmcore and ocmshareprovider should use the same file for storing the shares. -[grpc.services.ocmcore.drivers.json] -file = "/drone/src/var/tmp/reva/shares_server_1.json" - -[grpc.services.ocminvitemanager] -driver = "json" - -[grpc.services.ocmshareprovider] -driver = "json" - -[grpc.services.ocmshareprovider.drivers.json] -file = "/drone/src/var/tmp/reva/shares_server_1.json" - -[grpc.services.ocmproviderauthorizer] -driver = "json" - -[grpc.services.ocmproviderauthorizer.drivers.json] -providers = "providers.demo.json" diff --git a/tests/oc-integration-tests/drone/providers.demo.json b/tests/oc-integration-tests/drone/providers.demo.json deleted file mode 100644 index f2111b71b2..0000000000 --- a/tests/oc-integration-tests/drone/providers.demo.json +++ /dev/null @@ -1,100 +0,0 @@ -[ - { - "name": "local", - "full_name": "ownCloud@Example", - "organization": "Example", - "domain": "example.org", - "homepage": "http://example.org", - "description": "Example cloud storage.", - "services": [ - { - "endpoint": { - "type": { - "name": "OCM", - "description": "Example Open Cloud Mesh API" - }, - "name": "Example - OCM API", - "path": "http://127.0.0.1:20080/ocm/", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:20080" - }, - { - "endpoint": { - "type": { - "name": "Webdav", - "description": "Example Webdav API" - }, - "name": "Example - Webdav API", - "path": "http://127.0.0.1:20080/remote.php/webdav/", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:20080" - }, - { - "endpoint": { - "type": { - "name": "Gateway", - "description": "Example GRPC Gateway" - }, - "name": "Example - GRPC Gateway", - "path": "127.0.0.1:19000", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:19000" - } - ] - }, - { - "name": "local-mesh", - "full_name": "ownCloud@Example2", - "organization": "Example 2", - "domain": "example.org", - "homepage": "http://example.org", - "description": "Example cloud storage 2.", - "services": [ - { - "endpoint": { - "type": { - "name": "OCM", - "description": "Example 2 Open Cloud Mesh API" - }, - "name": "Example 2 - OCM API", - "path": "http://127.0.0.1:40080/ocm/", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:40080" - }, - { - "endpoint": { - "type": { - "name": "Webdav", - "description": "Example 2 Webdav API" - }, - "name": "Example 2 - Webdav API", - "path": "http://127.0.0.1:40080/remote.php/webdav/", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:40080" - }, - { - "endpoint": { - "type": { - "name": "Gateway", - "description": "Example 2 GRPC Gateway" - }, - "name": "Example 2 - GRPC Gateway", - "path": "127.0.0.1:39000", - "is_monitored": true - }, - "api_version": "0.0.1", - "host": "127.0.0.1:39000" - } - ] - } -] diff --git a/tests/oc-integration-tests/drone/shares.toml b/tests/oc-integration-tests/drone/shares.toml deleted file mode 100644 index 34f5cf2c16..0000000000 --- a/tests/oc-integration-tests/drone/shares.toml +++ /dev/null @@ -1,20 +0,0 @@ -# This config file will start a reva service that: -# - serves user and public link shares - -[shared] -jwt_secret = "Pive-Fumkiu4" - -[grpc] -address = "0.0.0.0:17000" - -[grpc.services.usershareprovider] -driver = "memory" - -[grpc.services.authprovider] -auth_manager = "publicshares" - -[grpc.services.authprovider.auth_managers.publicshares] -gateway_addr = "0.0.0.0:19000" - -[grpc.services.publicshareprovider.drivers.json] -file = "/drone/src/tmp/reva/publicshares.json" diff --git a/tests/oc-integration-tests/drone/storage-home-ocis.toml b/tests/oc-integration-tests/drone/storage-home-ocis.toml deleted file mode 100644 index 098aaf7fca..0000000000 --- a/tests/oc-integration-tests/drone/storage-home-ocis.toml +++ /dev/null @@ -1,47 +0,0 @@ -# This config file will start a reva service that: -# - uses the ocis driver to serve users, jailed into their home (/home) -# - serves the home storage provider on grpc port 12000 -# - serves http dataprovider for this storage on port 12001 -# - /data - dataprovider: file up and download -# -# The home storage will inject the username into the path and jail users into -# their home directory - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:12000" - -# This is a storage provider that grants direct access to the wrapped storage -# the context path wrapper reads tho username from the context and prefixes the relative storage path with it -[grpc.services.storageprovider] -driver = "ocis" -mount_path = "/home" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://revad-services:12001/data" -enable_home_creation = true -gateway_addr = "0.0.0.0:19000" - -[grpc.services.storageprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -gateway_addr = "0.0.0.0:19000" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:12001" - -[http.services.dataprovider] -driver = "ocis" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true diff --git a/tests/oc-integration-tests/drone/storage-home-s3ng.toml b/tests/oc-integration-tests/drone/storage-home-s3ng.toml deleted file mode 100644 index dfd784b567..0000000000 --- a/tests/oc-integration-tests/drone/storage-home-s3ng.toml +++ /dev/null @@ -1,55 +0,0 @@ -# This config file will start a reva service that: -# - uses the s3ng driver to serve users, jailed into their home (/home) -# - serves the home storage provider on grpc port 12000 -# - serves http dataprovider for this storage on port 12001 -# - /data - dataprovider: file up and download -# -# The home storage will inject the username into the path and jail users into -# their home directory - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:12000" - -# This is a storage provider that grants direct access to the wrapped storage -# the context path wrapper reads tho username from the context and prefixes the relative storage path with it -[grpc.services.storageprovider] -driver = "s3ng" -mount_path = "/home" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://revad-services:12001/data" -enable_home_creation = true - -[grpc.services.storageprovider.drivers.s3ng] -root = "/drone/src/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:12001" - -[http.services.dataprovider] -driver = "s3ng" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.s3ng] -root = "/drone/src/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" diff --git a/tests/oc-integration-tests/drone/storage-local-1.toml b/tests/oc-integration-tests/drone/storage-local-1.toml deleted file mode 100644 index 3b7d166d87..0000000000 --- a/tests/oc-integration-tests/drone/storage-local-1.toml +++ /dev/null @@ -1,46 +0,0 @@ -# This config file will start a reva service that: -# - uses the ocis driver to serve one half of a virtual view folder (/virtual/[a-k]) -# - serves the storage provider on grpc port 11100 -# - serves http dataprovider for this storage on port 11101 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:11100" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.storageprovider] -driver = "ocis" -mount_path = "/virtual" -mount_id = "virtual-a-k" -expose_data_server = true -data_server_url = "http://localhost:11101/data" - -[grpc.services.storageprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data-local-1" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://localhost:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true - -[http] -address = "0.0.0.0:11101" - -[http.services.dataprovider] -driver = "ocis" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data-local-1" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://localhost:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true diff --git a/tests/oc-integration-tests/drone/storage-local-2.toml b/tests/oc-integration-tests/drone/storage-local-2.toml deleted file mode 100644 index db3e79fe5f..0000000000 --- a/tests/oc-integration-tests/drone/storage-local-2.toml +++ /dev/null @@ -1,46 +0,0 @@ -# This config file will start a reva service that: -# - uses the ocis driver to serve one half of a virtual view folder (/virtual/[l-z]) -# - serves the storage provider on grpc port 11110 -# - serves http dataprovider for this storage on port 11111 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:11110" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.storageprovider] -driver = "ocis" -mount_path = "/virtual" -mount_id = "virtual-l-z" -expose_data_server = true -data_server_url = "http://localhost:11111/data" - -[grpc.services.storageprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data-local-2" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://localhost:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true - -[http] -address = "0.0.0.0:11111" - -[http.services.dataprovider] -driver = "ocis" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data-local-2" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://localhost:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true diff --git a/tests/oc-integration-tests/drone/storage-publiclink.toml b/tests/oc-integration-tests/drone/storage-publiclink.toml deleted file mode 100644 index f36aeb18fd..0000000000 --- a/tests/oc-integration-tests/drone/storage-publiclink.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This storage.toml config file will start a reva service that: -# - serves the public storage provider on grpc port 13000 - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:13000" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.publicstorageprovider] -mount_path = "/public/" -mount_id = "e1a73ede-549b-4226-abdf-40e69ca8230d" -gateway_addr = "0.0.0.0:19000" - diff --git a/tests/oc-integration-tests/drone/storage-users-ocis.toml b/tests/oc-integration-tests/drone/storage-users-ocis.toml deleted file mode 100644 index 795ba41d54..0000000000 --- a/tests/oc-integration-tests/drone/storage-users-ocis.toml +++ /dev/null @@ -1,42 +0,0 @@ -# This config file will start a reva service that: -# - uses the ocis driver to serve users (/users) -# - serves the storage provider on grpc port 11000 -# - serves http dataprovider for this storage on port 11001 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:11000" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.storageprovider] -driver = "ocis" -mount_path = "/users" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://revad-services:11001/data" -gateway_addr = "0.0.0.0:19000" - -[grpc.services.storageprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -userprovidersvc = "localhost:18000" -gateway_addr = "0.0.0.0:19000" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:11001" - -[http.services.dataprovider] -driver = "ocis" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.ocis] -root = "/drone/src/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true diff --git a/tests/oc-integration-tests/drone/storage-users-s3ng.toml b/tests/oc-integration-tests/drone/storage-users-s3ng.toml deleted file mode 100644 index 63073c9585..0000000000 --- a/tests/oc-integration-tests/drone/storage-users-s3ng.toml +++ /dev/null @@ -1,49 +0,0 @@ -# This config file will start a reva service that: -# - uses the s3ng driver to serve users (/users) -# - serves the storage provider on grpc port 11000 -# - serves http dataprovider for this storage on port 11001 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -[grpc] -address = "0.0.0.0:11000" - -# This is a storage provider that grants direct access to the wrapped storage -[grpc.services.storageprovider] -driver = "s3ng" -mount_path = "/users" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://revad-services:11001/data" - -[grpc.services.storageprovider.drivers.s3ng] -root = "/drone/src/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -userprovidersvc = "localhost:18000" -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:11001" - -[http.services.dataprovider] -driver = "s3ng" -temp_folder = "/drone/src/tmp/reva/tmp" - -[http.services.dataprovider.drivers.s3ng] -root = "/drone/src/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" diff --git a/tests/oc-integration-tests/drone/users.demo.json b/tests/oc-integration-tests/drone/users.demo.json deleted file mode 100644 index 893d69b99f..0000000000 --- a/tests/oc-integration-tests/drone/users.demo.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "id": { - "opaque_id": "4c510ada-c86b-4815-8820-42cdf82c3d51", - "idp": "http://localhost:20080", - "type": 1 - }, - "username": "einstein", - "secret": "relativity", - "mail": "einstein@example.org", - "display_name": "Albert Einstein", - "groups": ["sailing-lovers", "violin-haters", "physics-lovers"] - }, - { - "id": { - "opaque_id": "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", - "idp": "http://localhost:20080", - "type": 1 - }, - "username": "marie", - "secret": "radioactivity", - "mail": "marie@example.org", - "display_name": "Marie Curie", - "groups": ["radium-lovers", "polonium-lovers", "physics-lovers"] - }, - { - "id": { - "opaque_id": "932b4540-8d16-481e-8ef4-588e4b6b151c", - "idp": "http://localhost:20080", - "type": 1 - }, - "username": "richard", - "secret": "superfluidity", - "mail": "richard@example.org", - "display_name": "Richard Feynman", - "groups": ["quantum-lovers", "philosophy-haters", "physics-lovers"] - } -] diff --git a/tests/oc-integration-tests/drone/users.toml b/tests/oc-integration-tests/drone/users.toml deleted file mode 100644 index 6f849bf6a8..0000000000 --- a/tests/oc-integration-tests/drone/users.toml +++ /dev/null @@ -1,21 +0,0 @@ -# This config file will start a reva service that: -# - handles user metadata and user preferences -# - serves the grpc services on port 18000 - -[shared] -jwt_secret = "Pive-Fumkiu4" - -[grpc] -address = "0.0.0.0:18000" - -[grpc.services.authprovider] -auth_manager = "json" - -[grpc.services.authprovider.auth_managers.json] -users = "users.demo.json" - -[grpc.services.userprovider] -driver = "json" - -[grpc.services.userprovider.drivers.json] -users = "users.demo.json" diff --git a/tests/ocis b/tests/ocis index eb1aa4502e..8931ee1187 160000 --- a/tests/ocis +++ b/tests/ocis @@ -1 +1 @@ -Subproject commit eb1aa4502e084972a6fd7e57112ae81431961374 +Subproject commit 8931ee1187fa2c2a696a68a99a6c1c77919630d0 diff --git a/tests/revad/storage-home-s3ng.toml b/tests/revad/storage-home-s3ng.toml deleted file mode 100644 index d8d6b82d7a..0000000000 --- a/tests/revad/storage-home-s3ng.toml +++ /dev/null @@ -1,55 +0,0 @@ -# This config file will start a reva service that: -# - uses the s3ng driver to serve users, jailed into their home (/home) -# - serves the home storage provider on grpc port 12000 -# - serves http dataprovider for this storage on port 12001 -# - /data - dataprovider: file up and download -# -# The home storage will inject the username into the path and jail users into -# their home directory - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "gateway:19000" - -[grpc] -address = "0.0.0.0:12000" - -# This is a storage provider that grants direct access to the wrapped storage -# the context path wrapper reads tho username from the context and prefixes the relative storage path with it -[grpc.services.storageprovider] -driver = "s3ng" -mount_path = "/home" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://storage-home:12001/data" -enable_home_creation = true - -[grpc.services.storageprovider.drivers.s3ng] -root = "/var/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:12001" - -[http.services.dataprovider] -driver = "s3ng" -temp_folder = "/var/tmp/reva/tmp" - -[http.services.dataprovider.drivers.s3ng] -root = "/var/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" diff --git a/tests/revad/storage-home-ocis.toml b/tests/revad/storage-home.toml similarity index 54% rename from tests/revad/storage-home-ocis.toml rename to tests/revad/storage-home.toml index 633cde4d4b..2205efe141 100644 --- a/tests/revad/storage-home-ocis.toml +++ b/tests/revad/storage-home.toml @@ -1,5 +1,5 @@ # This config file will start a reva service that: -# - uses the ocis driver to serve users, jailed into their home (/home) +# - uses the eoshome driver to serve users, jailed into their home (/home) # - serves the home storage provider on grpc port 12000 # - serves http dataprovider for this storage on port 12001 # - /data - dataprovider: file up and download @@ -17,31 +17,37 @@ address = "0.0.0.0:12000" # This is a storage provider that grants direct access to the wrapped storage # the context path wrapper reads tho username from the context and prefixes the relative storage path with it [grpc.services.storageprovider] -driver = "ocis" +driver = "eoshome" mount_path = "/home" mount_id = "123e4567-e89b-12d3-a456-426655440000" expose_data_server = true data_server_url = "http://storage-home:12001/data" enable_home_creation = true -gateway_addr = "gateway:19000" -[grpc.services.storageprovider.drivers.ocis] -root = "/var/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true -gateway_addr = "gateway:19000" +[grpc.services.storageprovider.drivers.eoshome] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" # we have a locally running dataprovider [http] address = "0.0.0.0:12001" [http.services.dataprovider] -driver = "ocis" +driver = "eoshome" temp_folder = "/var/tmp/reva/tmp" -[http.services.dataprovider.drivers.ocis] -root = "/var/tmp/reva/data" -enable_home = true -treetime_accounting = true -treesize_accounting = true +[http.services.dataprovider.drivers.eoshome] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" \ No newline at end of file diff --git a/tests/revad/storage-local-1.toml b/tests/revad/storage-local-1.toml index e3fa9b6abe..d0927dec8e 100644 --- a/tests/revad/storage-local-1.toml +++ b/tests/revad/storage-local-1.toml @@ -14,33 +14,35 @@ address = "0.0.0.0:11100" # This is a storage provider that grants direct access to the wrapped storage # we have a locally running dataprovider [grpc.services.storageprovider] -driver = "ocis" +driver = "eos" mount_path = "/virtual" mount_id = "virtual-a-k" expose_data_server = true data_server_url = "http://storage-local-1:11101/data" -[grpc.services.storageprovider.drivers.ocis] -root = "/var/tmp/reva/data-local-1" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://frontend:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true +[grpc.services.storageprovider.drivers.eos] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" [http] address = "0.0.0.0:11101" [http.services.dataprovider] -driver = "ocis" +driver = "eos" temp_folder = "/var/tmp/reva/tmp" -[http.services.dataprovider.drivers.ocis] -root = "/var/tmp/reva/data-local-1" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://frontend:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true +[http.services.dataprovider.drivers.eos] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" diff --git a/tests/revad/storage-local-2.toml b/tests/revad/storage-local-2.toml index f7a86df1b3..47488f2654 100644 --- a/tests/revad/storage-local-2.toml +++ b/tests/revad/storage-local-2.toml @@ -14,33 +14,35 @@ address = "0.0.0.0:11110" # This is a storage provider that grants direct access to the wrapped storage # we have a locally running dataprovider [grpc.services.storageprovider] -driver = "ocis" +driver = "eos" mount_path = "/virtual" mount_id = "virtual-l-z" expose_data_server = true data_server_url = "http://storage-local-2:11111/data" -[grpc.services.storageprovider.drivers.ocis] -root = "/var/tmp/reva/data-local-2" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://frontend:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true +[grpc.services.storageprovider.drivers.eos] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" [http] address = "0.0.0.0:11111" [http.services.dataprovider] -driver = "ocis" +driver = "eos" temp_folder = "/var/tmp/reva/tmp" -[http.services.dataprovider.drivers.ocis] -root = "/var/tmp/reva/data-local-2" -owner = "4c510ada-c86b-4815-8820-42cdf82c3d51" -owner_idp = "http://frontend:20080" -owner_type = "USER_TYPE_PRIMARY" -enable_home = false -treetime_accounting = true -treesize_accounting = true +[http.services.dataprovider.drivers.eos] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" diff --git a/tests/revad/storage-publiclink.toml b/tests/revad/storage-publiclink.toml index 78db1777b6..f8807dbf62 100644 --- a/tests/revad/storage-publiclink.toml +++ b/tests/revad/storage-publiclink.toml @@ -13,5 +13,3 @@ address = "0.0.0.0:13000" [grpc.services.publicstorageprovider] mount_path = "/public/" mount_id = "e1a73ede-549b-4226-abdf-40e69ca8230d" -gateway_addr = "gateway:19000" - diff --git a/tests/revad/storage-users-ocis.toml b/tests/revad/storage-users-ocis.toml deleted file mode 100644 index 723e1686d4..0000000000 --- a/tests/revad/storage-users-ocis.toml +++ /dev/null @@ -1,42 +0,0 @@ -# This config file will start a reva service that: -# - uses the ocis driver to serve users (/users) -# - serves the storage provider on grpc port 11000 -# - serves http dataprovider for this storage on port 11001 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "gateway:19000" - -[grpc] -address = "0.0.0.0:11000" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.storageprovider] -driver = "ocis" -mount_path = "/users" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://storage-users:11001/data" -gateway_addr = "gateway:19000" - -[grpc.services.storageprovider.drivers.ocis] -root = "/var/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -userprovidersvc = "users:18000" -gateway_addr = "gateway:19000" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:11001" - -[http.services.dataprovider] -driver = "ocis" -temp_folder = "/var/tmp/reva/tmp" - -[http.services.dataprovider.drivers.ocis] -root = "/var/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true diff --git a/tests/revad/storage-users-s3ng.toml b/tests/revad/storage-users-s3ng.toml deleted file mode 100644 index 022edf1a05..0000000000 --- a/tests/revad/storage-users-s3ng.toml +++ /dev/null @@ -1,49 +0,0 @@ -# This config file will start a reva service that: -# - uses the s3ng driver to serve users (/users) -# - serves the storage provider on grpc port 11000 -# - serves http dataprovider for this storage on port 11001 -# - /data - dataprovider: file up and download - -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "gateway:19000" - -[grpc] -address = "0.0.0.0:11000" - -# This is a storage provider that grants direct access to the wrapped storage -[grpc.services.storageprovider] -driver = "s3ng" -mount_path = "/users" -mount_id = "123e4567-e89b-12d3-a456-426655440000" -expose_data_server = true -data_server_url = "http://storage-users:11001/data" - -[grpc.services.storageprovider.drivers.s3ng] -root = "/var/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -userprovidersvc = "users:18000" -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" - -# we have a locally running dataprovider -[http] -address = "0.0.0.0:11001" - -[http.services.dataprovider] -driver = "s3ng" -temp_folder = "/var/tmp/reva/tmp" - -[http.services.dataprovider.drivers.s3ng] -root = "/var/tmp/reva/data" -treetime_accounting = true -treesize_accounting = true -"s3.endpoint" = "http://ceph:8080" -"s3.region" = "default" -"s3.bucket" = "test" -"s3.access_key" = "test" -"s3.secret_key" = "test" diff --git a/tests/revad/storage-users.toml b/tests/revad/storage-users.toml new file mode 100644 index 0000000000..4b28319d57 --- /dev/null +++ b/tests/revad/storage-users.toml @@ -0,0 +1,49 @@ +# This config file will start a reva service that: +# - uses the eos driver to serve users (/users) +# - serves the storage provider on grpc port 11000 +# - serves http dataprovider for this storage on port 11001 +# - /data - dataprovider: file up and download + +[shared] +jwt_secret = "Pive-Fumkiu4" +gatewaysvc = "gateway:19000" + +[grpc] +address = "0.0.0.0:11000" + +# This is a storage provider that grants direct access to the wrapped storage +# we have a locally running dataprovider +[grpc.services.storageprovider] +driver = "eos" +mount_path = "/users" +mount_id = "123e4567-e89b-12d3-a456-426655440000" +expose_data_server = true +data_server_url = "http://storage-users:11001/data" + +[grpc.services.storageprovider.drivers.local] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" + +# we have a locally running dataprovider +[http] +address = "0.0.0.0:11001" + +[http.services.dataprovider] +driver = "eos" +temp_folder = "/var/tmp/reva/tmp" + +[http.services.dataprovider.drivers.local] +namespace = "/eos/user/" +quota_node = "/eos/user/" +master_url = "root://eosuser.example.org" +slave_url = "root://eosuser.example.org" +user_layout = "{{substr 0 1 .Username}}/{{.Username}}" +share_folder = "/Shares" +xrdcopy_binary = "/opt/eos/xrootd/bin/xrdcopy" +cache_directory = "/var/tmp/reva" diff --git a/tests/revad/users.demo.json b/tests/revad/users.demo.json index b3d289ef84..6404382055 100644 --- a/tests/revad/users.demo.json +++ b/tests/revad/users.demo.json @@ -6,10 +6,16 @@ "type": 1 }, "username": "einstein", + "uid_number": 10, + "gid_number": 1000, "secret": "relativity", "mail": "einstein@example.org", "display_name": "Albert Einstein", - "groups": ["sailing-lovers", "violin-haters", "physics-lovers"] + "groups": [ + "sailing-lovers", + "violin-haters", + "physics-lovers" + ] }, { "id": { @@ -18,10 +24,16 @@ "type": 1 }, "username": "marie", + "uid_number": 20, + "gid_number": 2000, "secret": "radioactivity", "mail": "marie@example.org", "display_name": "Marie Curie", - "groups": ["radium-lovers", "polonium-lovers", "physics-lovers"] + "groups": [ + "radium-lovers", + "polonium-lovers", + "physics-lovers" + ] }, { "id": { @@ -30,9 +42,15 @@ "type": 1 }, "username": "richard", + "uid_number": 30, + "gid_number": 3000, "secret": "superfluidity", "mail": "richard@example.org", "display_name": "Richard Feynman", - "groups": ["quantum-lovers", "philosophy-haters", "physics-lovers"] + "groups": [ + "quantum-lovers", + "philosophy-haters", + "physics-lovers" + ] } -] +] \ No newline at end of file