diff --git a/.editorconfig b/.editorconfig index 5431854f..a4e9c1eb 100644 --- a/.editorconfig +++ b/.editorconfig @@ -7,3 +7,10 @@ charset = utf-8 [*.md] insert_final_newline = true trim_trailing_whitespace = true +indent_style = space +indent_size = 2 +end_of_line = lf + +[*.md] +max_line_length = off +trim_trailing_whitespace = false diff --git a/.github/workflows/build-environment.yaml b/.github/workflows/build-environment.yaml new file mode 100644 index 00000000..3c7122ed --- /dev/null +++ b/.github/workflows/build-environment.yaml @@ -0,0 +1,55 @@ +--- +name: Build Environment +on: + push: + branches: + - "main" + + workflow_call: + inputs: + tag: + description: "A tag to use for the container image." + required: false + type: string + default: "latest" + ref: + description: "The git ref to checkout and build." + required: false + default: "main" + type: string + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + matrix: + dockerfile: + - build-radosgw + - build-run-radosgw + - build-run-radosgw-tests + + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: ${{ inputs.ref }} + + - name: Build image ${{ matrix.dockerfile }} + id: build-image + uses: redhat-actions/buildah-build@v2 + with: + image: 's3gw/${{ matrix.dockerfile }}' + tags: latest ${{ github.sha }} + containerfiles: 'tools/build/Dockerfile.${{ matrix.dockerfile }}' + context: 'tools/build' + + - name: Push build-radosgw to quay.io + id: push-to-quay + uses: redhat-actions/push-to-registry@v2 + with: + image: ${{ steps.build-image.outputs.image }} + tags: ${{ steps.build-image.outputs.tags }} + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index e78e9f8a..acea1654 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -13,8 +13,8 @@ jobs: - name: Set up Git repository uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v3 + - name: Set up Python3 + uses: actions/setup-python@v4 - - name: Pre-Commit Checks + - name: Pre Commit uses: pre-commit/action@v3.0.0 diff --git a/.gitignore b/.gitignore index 3c3461b2..fbcc55ed 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,7 @@ venv .DS_Store .vscode +.vagrant + +*.swp +__pycache__/ diff --git a/.gitmodules b/.gitmodules index 996a19cb..c7b0776f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "tools"] - path = tools - url = https://github.com/aquarist-labs/s3gw-tools [submodule "ceph"] path = ceph url = https://github.com/aquarist-labs/ceph @@ -10,3 +7,6 @@ [submodule "ui"] path = ui url = https://github.com/aquarist-labs/s3gw-ui +[submodule "k3s-ansible"] + path = tools/env/playbooks/k3s-ansible + url = https://github.com/k3s-io/k3s-ansible.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9bbb47f3..6dd8d7b3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,34 +1,37 @@ +--- repos: -- repo: https://github.com/pre-commit/pre-commit-hooks + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.0.1 hooks: - - id: trailing-whitespace - - id: check-yaml - - id: check-added-large-files - - id: end-of-file-fixer - - id: debug-statements - - id: mixed-line-ending - - id: detect-private-key - - id: destroyed-symlinks - - id: check-shebang-scripts-are-executable - - id: check-case-conflict + - id: trailing-whitespace + - id: check-yaml + args: + - --allow-multiple-documents + - id: check-added-large-files + - id: end-of-file-fixer + - id: debug-statements + - id: mixed-line-ending + - id: detect-private-key + - id: destroyed-symlinks + - id: check-shebang-scripts-are-executable + - id: check-case-conflict -- repo: https://github.com/jumanjihouse/pre-commit-hooks - rev: 2.1.5 - hooks: - - id: forbid-binary - exclude: >- - (?x)^( - assets/images/.* - )$ - - id: git-check + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 2.1.5 + hooks: + - id: forbid-binary + exclude: >- + (?x)^( + assets/images/.* + )$ + - id: git-check -- repo: https://github.com/codespell-project/codespell - rev: v2.1.0 - hooks: - - id: codespell + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell -- repo: https://github.com/DavidAnson/markdownlint-cli2 - rev: v0.4.0 - hooks: - - id: markdownlint-cli2 + - repo: https://github.com/DavidAnson/markdownlint-cli2 + rev: v0.4.0 + hooks: + - id: markdownlint-cli2 diff --git a/env/playbooks/k3s-ansible b/env/playbooks/k3s-ansible new file mode 160000 index 00000000..8e708124 --- /dev/null +++ b/env/playbooks/k3s-ansible @@ -0,0 +1 @@ +Subproject commit 8e7081243b4ffa83beffe53b58458824a00d1a38 diff --git a/tools b/tools deleted file mode 160000 index 20379c18..00000000 --- a/tools +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 20379c188419c8455ecde2c196420f56f85aedca diff --git a/tools/.s3cfg b/tools/.s3cfg new file mode 100644 index 00000000..a2def3e8 --- /dev/null +++ b/tools/.s3cfg @@ -0,0 +1,87 @@ +[default] +access_key = 0555b35654ad1656d804 +access_token = +add_encoding_exts = +add_headers = +bucket_location = US +ca_certs_file = +cache_file = +check_ssl_certificate = True +check_ssl_hostname = True +cloudfront_host = cloudfront.amazonaws.com +connection_max_age = 5 +connection_pooling = True +content_disposition = +content_type = +default_mime_type = binary/octet-stream +delay_updates = False +delete_after = False +delete_after_fetch = False +delete_removed = False +dry_run = False +enable_multipart = True +encoding = UTF-8 +encrypt = False +expiry_date = +expiry_days = +expiry_prefix = +follow_symlinks = False +force = False +get_continue = False +gpg_command = /usr/bin/gpg +gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s +gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s +gpg_passphrase = s3gw +guess_mime_type = True +host_base = 127.0.0.1:7480 +host_bucket = 127.0.0.1:7480/%(bucket) +human_readable_sizes = False +invalidate_default_index_on_cf = False +invalidate_default_index_root_on_cf = True +invalidate_on_cf = False +kms_key = +limit = -1 +limitrate = 0 +list_md5 = False +log_target_prefix = +long_listing = False +max_delete = -1 +mime_type = +multipart_chunk_size_mb = 15 +multipart_copy_chunk_size_mb = 1024 +multipart_max_chunks = 10000 +preserve_attrs = True +progress_meter = True +proxy_host = +proxy_port = 0 +public_url_use_https = False +put_continue = False +recursive = False +recv_chunk = 65536 +reduced_redundancy = False +requester_pays = False +restore_days = 1 +restore_priority = Standard +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== +send_chunk = 65536 +server_side_encryption = False +signature_v2 = False +signurl_use_https = False +simpledb_host = sdb.amazonaws.com +skip_existing = False +socket_timeout = 300 +ssl_client_cert_file = +ssl_client_key_file = +stats = False +stop_on_error = False +storage_class = +throttle_max = 100 +upload_id = +urlencoding_mode = normal +use_http_expect = False +use_https = False +use_mime_magic = True +verbosity = WARNING +website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/ +website_error = +website_index = index.html diff --git a/tools/build-ui/Dockerfile.app b/tools/build-ui/Dockerfile.app new file mode 100644 index 00000000..c9784d59 --- /dev/null +++ b/tools/build-ui/Dockerfile.app @@ -0,0 +1,15 @@ +FROM node:lts-alpine + +RUN apk add gettext +RUN npm install -g http-server +RUN echo -e "#!/bin/sh\n" \ + "envsubst < /app/assets/rgw_service.config.json.sample > /app/assets/rgw_service.config.json\n" \ + "exec http-server /app/\n" > /usr/bin/entrypoint.sh +RUN chmod +x /usr/bin/entrypoint.sh + +WORKDIR /app +COPY ./ . + +EXPOSE 8080 +STOPSIGNAL SIGINT +ENTRYPOINT [ "/usr/bin/entrypoint.sh" ] diff --git a/tools/build-ui/Dockerfile.app-builder b/tools/build-ui/Dockerfile.app-builder new file mode 100644 index 00000000..490b75fe --- /dev/null +++ b/tools/build-ui/Dockerfile.app-builder @@ -0,0 +1,6 @@ +FROM node:lts-alpine + +COPY build-app.sh /usr/bin/build-app.sh + +VOLUME ["/srv/app"] +ENTRYPOINT [ "/usr/bin/build-app.sh" ] diff --git a/tools/build-ui/build-app.sh b/tools/build-ui/build-app.sh new file mode 100755 index 00000000..9d6c1abc --- /dev/null +++ b/tools/build-ui/build-app.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -e + +cd /srv/app/ +npm ci +npm run build:prod + +exit 0 diff --git a/tools/build-ui/build.sh b/tools/build-ui/build.sh new file mode 100755 index 00000000..e7128e57 --- /dev/null +++ b/tools/build-ui/build.sh @@ -0,0 +1,118 @@ +#!/bin/sh + +set -e + +BUILDER_IMAGE_NAME=${BUILDER_IMAGE_NAME:-"s3gw-ui-builder"} +IMAGE_NAME=${IMAGE_NAME:-"s3gw-ui"} +S3GW_UI_DIR=$(realpath ${S3GW_UI_DIR:-"../../s3gw-ui/"}) +S3GW_UI_DIST_DIR=${S3GW_UI_DIST_DIR:-"${S3GW_UI_DIR}/dist/s3gw-ui/"} + +force=false +registry= +registry_args= + +usage() { + cat << EOF +usage: $0 CMD [args...] + +commands + builder-image Create the app builder image. + app Build the app. + app-image Create the app image. + help This message. + +options + --registry URL The URL of the registry. + --no-registry-tls Disable TLS when pushing to registry. + +EOF +} + +info() { + echo "[INFO] $*" >/dev/stdout +} + +error() { + echo "[ERROR] $*" >/dev/stderr +} + +build_builder_image() { + info "Building ${BUILDER_IMAGE_NAME} image ..." + podman build -t ${BUILDER_IMAGE_NAME} -f ./Dockerfile.app-builder . || exit 1 +} + +build_app() { + info "Building ${IMAGE_NAME} app ..." + if ! podman image exists "${BUILDER_IMAGE_NAME}"; then + error "Unable to find builder image '${BUILDER_IMAGE_NAME}'. Please run the 'builder-image' command first." && exit 1 + fi + rm -rf "${S3GW_UI_DIST_DIR}/*" + podman run -it --replace --name "${BUILDER_IMAGE_NAME}" \ + -v "${S3GW_UI_DIR}":/srv/app \ + ${BUILDER_IMAGE_NAME} +} + +build_app_image() { + if [ ! -e "${S3GW_UI_DIST_DIR}" ]; then + error "Application dist folder '${S3GW_UI_DIST_DIR}' does not exist. Please run the 'app' command first." && exit 1 + fi + + info "Building ${IMAGE_NAME} image ..." + podman build -t ${IMAGE_NAME} -f ./Dockerfile.app ${S3GW_UI_DIST_DIR} + + if [ -n "${registry}" ]; then + info "Pushing ${IMAGE_NAME} image to registry ..." + podman push ${registry_args} localhost/${IMAGE_NAME} \ + ${registry}/${IMAGE_NAME} + fi +} + +cmd="${1}" + +if [ -z "${cmd}" ]; then + usage && exit 1 +fi + +if [ "${cmd}" = "help" ]; then + usage && exit 0 +fi + +shift 1 + +while [ $# -ge 1 ]; do + case ${1} in + --force) + force=true + ;; + --registry) + registry=$2 + shift + ;; + --no-registry-tls) + registry_args="--tls-verify=false" + ;; + *) + error "Unknown argument '${1}'" + exit 1 + ;; + esac + shift +done + +case ${cmd} in + builder-image) + build_builder_image || exit 1 + ;; + app) + build_app || exit 1 + ;; + app-image) + build_app_image || exit 1 + ;; + *) + error "Unknown command '${cmd}'" + exit 1 + ;; +esac + +exit 0 diff --git a/tools/build/Dockerfile.build-container b/tools/build/Dockerfile.build-container new file mode 100644 index 00000000..d5d9e48f --- /dev/null +++ b/tools/build/Dockerfile.build-container @@ -0,0 +1,60 @@ +FROM opensuse/leap:15.4 +LABEL Name=s3gw + +ARG ID=s3gw +ENV ID=${ID} + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install \ + libblkid1 \ + libexpat1 \ + libtcmalloc4 \ + libfmt9 \ + liboath0 \ + libicu-suse65_1 \ + libthrift-0_16_0 \ + libboost_atomic1_80_0 \ + libboost_chrono1_80_0 \ + libboost_context1_80_0 \ + libboost_coroutine1_80_0 \ + libboost_date_time1_80_0 \ + libboost_filesystem1_80_0 \ + libboost_iostreams1_80_0 \ + libboost_program_options1_80_0 \ + libboost_random1_80_0 \ + libboost_regex1_80_0 \ + libboost_serialization1_80_0 \ + libboost_system1_80_0 \ + libboost_thread1_80_0 \ + && zypper clean --all + +RUN mkdir -p /radosgw +ENV PATH /radosgw:$PATH +ENV LD_LIBRARY_PATH /radosgw:$LD_LIBRARY_PATH + +RUN mkdir -p /data + +COPY ./bin/radosgw /radosgw/ +COPY [ \ + "./lib/librados.so", \ + "./lib/librados.so.2", \ + "./lib/librados.so.2.0.0", \ + "./lib/libceph-common.so", \ + "./lib/libceph-common.so.2", \ + "/radosgw/" ] + +EXPOSE 7480 + +VOLUME ["/data"] +ENTRYPOINT ["radosgw", "-d", \ + "--no-mon-config", \ + "--id", "${ID}", \ + "--rgw-data", "/data/", \ + "--run-dir", "/run/", \ + "--rgw-sfs-data-path", "/data"] +CMD ["--rgw-backend-store", "sfs", \ + "--debug-rgw", "1"] diff --git a/tools/build/Dockerfile.build-radosgw b/tools/build/Dockerfile.build-radosgw new file mode 100644 index 00000000..3c2125a6 --- /dev/null +++ b/tools/build/Dockerfile.build-radosgw @@ -0,0 +1,103 @@ +FROM opensuse/leap:15.4 + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install --no-recommends \ + 'cmake>3.5' \ + 'fmt-devel>=6.2.1' \ + 'gperftools-devel>=2.4' \ + 'libblkid-devel>=2.17' \ + 'liblz4-devel>=1.7' \ + 'libthrift-devel>=0.13.0' \ + 'pkgconfig(libudev)' \ + 'pkgconfig(systemd)' \ + 'pkgconfig(udev)' \ + babeltrace-devel \ + binutils \ + ccache \ + cmake \ + cpp11 \ + cryptsetup-devel \ + cunit-devel \ + fdupes \ + fuse-devel \ + gcc-c++ \ + gcc11 \ + gcc11-c++ \ + git \ + gperf \ + jq \ + keyutils-devel \ + libaio-devel \ + libasan6 \ + libboost_atomic1_80_0-devel \ + libboost_context1_80_0-devel \ + libboost_coroutine1_80_0-devel \ + libboost_filesystem1_80_0-devel \ + libboost_iostreams1_80_0-devel \ + libboost_program_options1_80_0-devel \ + libboost_python-py3-1_80_0-devel \ + libboost_random1_80_0-devel \ + libboost_regex1_80_0-devel \ + libboost_system1_80_0-devel \ + libboost_thread1_80_0-devel \ + libbz2-devel \ + libcap-devel \ + libcap-ng-devel \ + libcurl-devel \ + libexpat-devel \ + libicu-devel \ + libnl3-devel \ + liboath-devel \ + libopenssl-devel \ + libpmem-devel \ + libpmemobj-devel \ + librabbitmq-devel \ + librdkafka-devel \ + libsqliteorm \ + libstdc++6-devel-gcc11 \ + libtool \ + libtsan0 \ + libxml2-devel \ + lttng-ust-devel \ + lua-devel \ + lua53-luarocks \ + make \ + memory-constraints \ + mozilla-nss-devel \ + nasm \ + ncurses-devel \ + net-tools \ + ninja \ + ninja \ + openldap2-devel \ + patch \ + perl \ + pkgconfig \ + procps \ + python3 \ + python3-Cython \ + python3-PrettyTable \ + python3-PyYAML \ + python3-Sphinx \ + python3-devel \ + python3-setuptools \ + rdma-core-devel \ + re2-devel \ + rpm-build \ + snappy-devel \ + sqlite-devel \ + systemd-rpm-macros \ + systemd-rpm-macros \ + valgrind-devel \ + xfsprogs-devel \ + xmlstarlet \ + && zypper clean --all + +COPY build-radosgw.sh /usr/bin/build-radosgw.sh + +VOLUME ["/srv/ceph"] +ENTRYPOINT ["/usr/bin/build-radosgw.sh"] diff --git a/tools/build/Dockerfile.build-radosgw-test-container b/tools/build/Dockerfile.build-radosgw-test-container new file mode 100644 index 00000000..ee26d60d --- /dev/null +++ b/tools/build/Dockerfile.build-radosgw-test-container @@ -0,0 +1,64 @@ +FROM opensuse/leap:15.4 +LABEL Name=s3gw-test + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install \ + libblkid1 \ + libexpat1 \ + libtcmalloc4 \ + libfmt9 \ + liboath0 \ + libicu-suse65_1 \ + libthrift-0_16_0 \ + libboost_atomic1_80_0 \ + libboost_chrono1_80_0 \ + libboost_context1_80_0 \ + libboost_coroutine1_80_0 \ + libboost_date_time1_80_0 \ + libboost_filesystem1_80_0 \ + libboost_iostreams1_80_0 \ + libboost_program_options1_80_0 \ + libboost_random1_80_0 \ + libboost_regex1_80_0 \ + libboost_serialization1_80_0 \ + libboost_system1_80_0 \ + libboost_thread1_80_0 \ + && zypper clean --all + +RUN mkdir -p /radosgw +ENV PATH /radosgw:$PATH +ENV LD_LIBRARY_PATH /radosgw:$LD_LIBRARY_PATH + +COPY ./bin/unittest_rgw_sfs_sqlite_users /radosgw/unittest_rgw_sfs_sqlite_users +COPY ./bin/unittest_rgw_sfs_sqlite_buckets /radosgw/unittest_rgw_sfs_sqlite_buckets +COPY ./bin/unittest_rgw_sfs_sqlite_objects /radosgw/unittest_rgw_sfs_sqlite_objects +COPY ./bin/unittest_rgw_sfs_sqlite_versioned_objects /radosgw/unittest_rgw_sfs_sqlite_versioned_objects +COPY ./bin/unittest_rgw_sfs_sfs_bucket /radosgw/unittest_rgw_sfs_sfs_bucket +COPY ./bin/unittest_rgw_sfs_metadata_compatibility /radosgw/unittest_rgw_sfs_metadata_compatibility +COPY ./bin/unittest_rgw_sfs_gc /radosgw/unittest_rgw_sfs_gc +COPY [ "./lib/libradosgw.so", \ + "./lib/libradosgw.so.2", \ + "./lib/libradosgw.so.2.0.0", \ + "./lib/librados.so", \ + "./lib/librados.so.2", \ + "./lib/librados.so.2.0.0", \ + "./lib/libceph-common.so", \ + "./lib/libceph-common.so.2", \ + "/radosgw/" ] + +RUN touch /radosgw/run_tests.sh && chmod +x /radosgw/run_tests.sh +RUN echo -e "#!/bin/bash\n\ +unittest_rgw_sfs_sqlite_users || exit 1\n\ +unittest_rgw_sfs_sqlite_buckets || exit 1\n\ +unittest_rgw_sfs_sqlite_objects || exit 1\n\ +unittest_rgw_sfs_sqlite_versioned_objects || exit 1\n\ +unittest_rgw_sfs_sfs_bucket || exit 1\n \ +unittest_rgw_sfs_metadata_compatibility || exit 1\n \ +unittest_rgw_sfs_gc || exit 1\n" \ +> /radosgw/run_tests.sh + +ENTRYPOINT ["run_tests.sh"] diff --git a/tools/build/Dockerfile.build-run-radosgw b/tools/build/Dockerfile.build-run-radosgw new file mode 100644 index 00000000..97ce45c2 --- /dev/null +++ b/tools/build/Dockerfile.build-run-radosgw @@ -0,0 +1,51 @@ +FROM opensuse/leap:15.4 +LABEL Name=s3gw + +ARG ID=s3gw +ENV ID=${ID} + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install \ + libblkid1 \ + libexpat1 \ + libtcmalloc4 \ + libfmt9 \ + liboath0 \ + libicu-suse65_1 \ + libthrift-0_16_0 \ + libboost_atomic1_80_0 \ + libboost_chrono1_80_0 \ + libboost_context1_80_0 \ + libboost_coroutine1_80_0 \ + libboost_date_time1_80_0 \ + libboost_filesystem1_80_0 \ + libboost_iostreams1_80_0 \ + libboost_program_options1_80_0 \ + libboost_random1_80_0 \ + libboost_regex1_80_0 \ + libboost_serialization1_80_0 \ + libboost_system1_80_0 \ + libboost_thread1_80_0 \ + && zypper clean --all + +RUN mkdir -p /radosgw +ENV PATH /radosgw/bin:$PATH +ENV LD_LIBRARY_PATH /radosgw/lib:$LD_LIBRARY_PATH + +RUN mkdir -p /data + +EXPOSE 7480 + +VOLUME ["/data"] +ENTRYPOINT ["radosgw", "-d", \ + "--no-mon-config", \ + "--id", "${ID}", \ + "--rgw-data", "/data/", \ + "--run-dir", "/run/", \ + "--rgw-sfs-data-path", "/data"] +CMD ["--rgw-backend-store", "sfs", \ + "--debug-rgw", "1"] diff --git a/tools/build/Dockerfile.build-run-radosgw-tests b/tools/build/Dockerfile.build-run-radosgw-tests new file mode 100644 index 00000000..25963f01 --- /dev/null +++ b/tools/build/Dockerfile.build-run-radosgw-tests @@ -0,0 +1,39 @@ +FROM opensuse/leap:15.4 +LABEL Name=s3gw-test + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install \ + libblkid1 \ + libexpat1 \ + libtcmalloc4 \ + libfmt9 \ + liboath0 \ + libicu-suse65_1 \ + libthrift-0_16_0 \ + libboost_atomic1_80_0 \ + libboost_chrono1_80_0 \ + libboost_context1_80_0 \ + libboost_coroutine1_80_0 \ + libboost_date_time1_80_0 \ + libboost_filesystem1_80_0 \ + libboost_iostreams1_80_0 \ + libboost_program_options1_80_0 \ + libboost_random1_80_0 \ + libboost_regex1_80_0 \ + libboost_serialization1_80_0 \ + libboost_system1_80_0 \ + libboost_thread1_80_0 \ + && zypper clean --all + +RUN mkdir -p /radosgw +ENV PATH /radosgw:$PATH +ENV LD_LIBRARY_PATH /radosgw/lib:$LD_LIBRARY_PATH + +COPY run_tests.sh /radosgw/run_tests.sh +RUN chmod +x /radosgw/run_tests.sh + +ENTRYPOINT ["run_tests.sh"] diff --git a/tools/build/Dockerfile.s3gw b/tools/build/Dockerfile.s3gw new file mode 100644 index 00000000..4a4a076a --- /dev/null +++ b/tools/build/Dockerfile.s3gw @@ -0,0 +1,171 @@ +FROM opensuse/leap:15.4 as runtime + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install \ + libblkid1 \ + libexpat1 \ + libtcmalloc4 \ + libfmt9 \ + liboath0 \ + libicu-suse65_1 \ + libthrift-0_16_0 \ + libboost_atomic1_80_0 \ + libboost_chrono1_80_0 \ + libboost_context1_80_0 \ + libboost_coroutine1_80_0 \ + libboost_date_time1_80_0 \ + libboost_filesystem1_80_0 \ + libboost_iostreams1_80_0 \ + libboost_program_options1_80_0 \ + libboost_random1_80_0 \ + libboost_regex1_80_0 \ + libboost_serialization1_80_0 \ + libboost_system1_80_0 \ + libboost_thread1_80_0 \ + && zypper clean --all \ + && mkdir -p /radosgw /data + +ENV PATH /radosgw:$PATH +ENV LD_LIBRARY_PATH /radosgw:$LD_LIBRARY_PATH + +FROM opensuse/leap:15.4 AS buildenv + +ARG CMAKE_BUILD_TYPE=Release +ENV CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + +# Add OBS repository for additional dependencies necessary on Leap 15.4 +RUN zypper ar \ + https://download.opensuse.org/repositories/filesystems:/ceph:/s3gw/15.4/ \ + s3gw-deps \ + && zypper --gpg-auto-import-keys ref +RUN zypper -n install --no-recommends \ + 'cmake>3.5' \ + 'fmt-devel>=6.2.1' \ + 'gperftools-devel>=2.4' \ + 'libblkid-devel>=2.17' \ + 'liblz4-devel>=1.7' \ + 'libthrift-devel>=0.13.0' \ + 'pkgconfig(libudev)' \ + 'pkgconfig(systemd)' \ + 'pkgconfig(udev)' \ + babeltrace-devel \ + binutils \ + ccache \ + cmake \ + cpp11 \ + cryptsetup-devel \ + cunit-devel \ + fdupes \ + fuse-devel \ + gcc-c++ \ + gcc11 \ + gcc11-c++ \ + git \ + gperf \ + jq \ + keyutils-devel \ + libaio-devel \ + libasan6 \ + libboost_atomic1_80_0-devel \ + libboost_context1_80_0-devel \ + libboost_coroutine1_80_0-devel \ + libboost_filesystem1_80_0-devel \ + libboost_iostreams1_80_0-devel \ + libboost_program_options1_80_0-devel \ + libboost_python-py3-1_80_0-devel \ + libboost_random1_80_0-devel \ + libboost_regex1_80_0-devel \ + libboost_system1_80_0-devel \ + libboost_thread1_80_0-devel \ + libbz2-devel \ + libcap-ng-devel \ + libcurl-devel \ + libexpat-devel \ + libicu-devel \ + libnl3-devel \ + liboath-devel \ + libopenssl-devel \ + libpmem-devel \ + libpmemobj-devel \ + librabbitmq-devel \ + librdkafka-devel \ + libsqliteorm \ + libstdc++6-devel-gcc11 \ + libtool \ + libtsan0 \ + libxml2-devel \ + lttng-ust-devel \ + lua-devel \ + lua53-luarocks \ + make \ + memory-constraints \ + mozilla-nss-devel \ + nasm \ + ncurses-devel \ + net-tools \ + ninja \ + ninja \ + openldap2-devel \ + patch \ + perl \ + pkgconfig \ + procps \ + python3 \ + python3-Cython \ + python3-PrettyTable \ + python3-PyYAML \ + python3-Sphinx \ + python3-devel \ + python3-setuptools \ + rdma-core-devel \ + re2-devel \ + rpm-build \ + snappy-devel \ + sqlite-devel \ + systemd-rpm-macros \ + systemd-rpm-macros \ + valgrind-devel \ + xfsprogs-devel \ + xmlstarlet \ + && zypper clean --all + +COPY . /srv + +RUN /srv/ceph/qa/rgw/store/sfs/build-radosgw.sh + +FROM runtime AS s3gw +LABEL Name=s3gw + +ARG ID=s3gw +ENV ID=${ID} + +COPY --from=buildenv /srv/ceph/build/bin/radosgw /radosgw/ +COPY --from=buildenv \ + [ "/srv/ceph/build/lib/libradosgw.so", \ + "/srv/ceph/build/lib/libradosgw.so.2", \ + "/srv/ceph/build/lib/libradosgw.so.2.0.0", \ + "/srv/ceph/build/lib/librados.so", \ + "/srv/ceph/build/lib/librados.so.2", \ + "/srv/ceph/build/lib/librados.so.2.0.0", \ + "/srv/ceph/build/lib/libceph-common.so", \ + "/srv/ceph/build/lib/libceph-common.so.2", \ + "/radosgw/" ] + +# HTTP + HTTPS +EXPOSE 7480 +EXPOSE 7481 + +VOLUME ["/data"] +ENTRYPOINT ["radosgw", \ + "-d", \ + "--no-mon-config", \ + "--id", "${ID}", \ + "--rgw-data", "/data/", \ + "--run-dir", "/run/", \ + "--rgw-sfs-data-path", "/data"] +CMD ["--rgw-backend-store", "sfs", \ + "--debug-rgw", "1"] diff --git a/tools/build/build-container.sh b/tools/build/build-container.sh new file mode 100755 index 00000000..f4eeb323 --- /dev/null +++ b/tools/build/build-container.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +set -e + +IMAGE_NAME=${IMAGE_NAME:-"s3gw"} +CEPH_DIR=$(realpath ${CEPH_DIR:-"../../ceph/"}) +CONTAINER_ENGINE=${CONTAINER_ENGINE:-"podman"} + +registry= +registry_args= + +build_container_image() { + echo "Building container image ..." + case ${CONTAINER_ENGINE} in + podman) + podman build -t ${IMAGE_NAME} -f ./Dockerfile.build-container ${CEPH_DIR}/build + ;; + docker) + docker build -t localhost/${IMAGE_NAME} -f ./Dockerfile.build-container ${CEPH_DIR}/build + ;; + esac +} + +push_container_image() { + if [ -n "${registry}" ]; then + echo "Pushing container image to registry ..." + ${CONTAINER_ENGINE} push ${registry_args} localhost/${IMAGE_NAME} \ + ${registry}/${IMAGE_NAME} + fi +} + +while [ $# -ge 1 ]; do + case $1 in + --registry) + registry=$2 + shift + ;; + --no-registry-tls) + registry_args="--tls-verify=false" + ;; + esac + shift +done + +build_container_image +push_container_image + +exit 0 diff --git a/tools/build/build-radosgw-test-container.sh b/tools/build/build-radosgw-test-container.sh new file mode 100755 index 00000000..7eb7963f --- /dev/null +++ b/tools/build/build-radosgw-test-container.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +set -e + +IMAGE_NAME=${IMAGE_NAME:-"s3gw-test"} +CEPH_DIR=$(realpath ${CEPH_DIR:-"../../ceph/"}) +CONTAINER_ENGINE=${CONTAINER_ENGINE:-"podman"} + +registry= +registry_args= + +build_test_container_image() { + echo "Building test container image ..." + + case ${CONTAINER_ENGINE} in + podman) + podman build -t ${IMAGE_NAME} -f ./Dockerfile.build-radosgw-test-container ${CEPH_DIR}/build + ;; + docker) + docker build -t localhost/${IMAGE_NAME} -f ./Dockerfile.build-radosgw-test-container ${CEPH_DIR}/build + ;; + esac +} + +push_test_container_image() { + if [ -n "${registry}" ]; then + echo "Pushing test container image to registry ..." + ${CONTAINER_ENGINE} push ${registry_args} localhost/${IMAGE_NAME} \ + ${registry}/${IMAGE_NAME} + fi +} + +while [ $# -ge 1 ]; do + case $1 in + --registry) + registry=$2 + shift + ;; + --no-registry-tls) + registry_args="--tls-verify=false" + ;; + esac + shift +done + +build_test_container_image +push_test_container_image + +exit 0 diff --git a/tools/build/build-radosgw.sh b/tools/build/build-radosgw.sh new file mode 100755 index 00000000..a922983b --- /dev/null +++ b/tools/build/build-radosgw.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -e + +CEPH_DIR=$(realpath ${CEPH_DIR:-"/srv/ceph"}) +BUILD_SCRIPT=${BUILD_SCRIPT:-"${CEPH_DIR}/qa/rgw/store/sfs/build-radosgw.sh"} +${BUILD_SCRIPT} +exit 0 diff --git a/tools/build/build.sh b/tools/build/build.sh new file mode 100755 index 00000000..5cf213ea --- /dev/null +++ b/tools/build/build.sh @@ -0,0 +1,294 @@ +#!/bin/bash +# build.sh - helper to build container s3gw-related images +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +cephdir=${S3GW_CEPH_DIR:-"../../ceph.git"} +ccachedir=${S3GW_CCACHE_DIR:-""} +build_image_name=${S3GW_BUILD_IMAGE_NAME:-"s3gw-builder"} +build_image=${S3GW_BUILD_IMAGE:-"${build_image_name}:latest"} +s3gw_image=${S3GW_IMAGE:-"s3gw"} +with_tests=${WITH_TESTS:-"OFF"} +build_test_image_name=${S3GW_TEST_BUILD_IMAGE_NAME:-"s3gw-test-builder"} +build_test_image=${S3GW_TEST_BUILD_IMAGE:-"${build_test_image_name}:latest"} +s3gw_test_image=${S3GW_TEST_IMAGE:-"s3gw-test"} + +force=false + + +usage() { + cat << EOF +usage: $0 CMD [args...] + +commands + build-image Create the radosgw build image. + radosgw Build radosgw. + s3gw Create an s3gw container image. + s3gw-test Create an s3gw-test container image. + help This message. + +options + --ceph PATH Specifies the Ceph source directory. + (default: ${cephdir}) + --ccache PATH Specifies the ccache directory. + (default: ${ccachedir}) + --force Forces building even if image exists. + +env variables + S3GW_CEPH_DIR Specifies the Ceph source directory. + S3GW_CCACHE_DIR Specifies the ccache directory. + S3GW_BUILD_IMAGE_NAME Specifies the build image name. + S3GW_BUILD_IMAGE Specifies the build image (name:tag). + S3GW_IMAGE Specifies the s3gw container image name. + WITH_TESTS Specifies whether build the s3gw test images too. + S3GW_TEST_BUILD_IMAGE_NAME Specifies the test build image name. + S3GW_TEST_BUILD_IMAGE Specifies the test build image (name:tag). + S3GW_TEST_IMAGE Specifies the s3gw test container image name. + +EOF +} + +error() { + echo "error: $*" >/dev/stderr +} + +build_builder_image() { + + ver=$(git rev-parse --short HEAD) + img="${build_image_name}:${ver}" + + # check whether this builder image exists + # + found=false + ifs=$IFS + IFS=$'\n' + for l in $(podman image list --format '{{.Repository}}:{{.Tag}}'); do + IFS=" " a=(${l//\// }) + if [[ "${a[1]}" == "${img}" ]]; then + found=true + fi + done + IFS=$ifs + + if $found && ! $force ; then + echo "builder image already exists: ${img}" + return 0 + fi + + podman build -t ${img} -f Dockerfile.build-radosgw . || exit 1 + podman tag ${img} ${build_image_name}:latest +} + +build_radosgw() { + + # check build image exists + # + found=false + ifs=$IFS + IFS=$'\n' + for l in $(podman image list --format '{{.Repository}}:{{.Tag}}'); do + IFS=" " a=(${l//\// }) + if [[ "${a[1]}" == "${build_image}" ]]; then + found=true + fi + done + IFS=$ifs + + ! $found && \ + error "unable to find builder image '${build_image}'" && exit 1 + + # check ceph source directory + # + [[ -z "${cephdir}" ]] && \ + error "missing ceph directory" && exit 1 + [[ ! -d "${cephdir}" ]] && \ + error "path at '${cephdir}' is not a directory" && exit 1 + [[ ! -d "${cephdir}/.git" ]] && \ + error "path at '${cephdir}' is not a repository" && exit 1 + + volumes=("-v ${cephdir}:/srv/ceph") + + # check ccache directory + # + if [[ -n "${ccachedir}" ]]; then + if [[ ! -e "${ccachedir}" ]]; then + echo "ccache directory at '${ccachedir}' not found; creating." + mkdir -p ${ccachedir} + fi + [[ ! -d "${ccachedir}" ]] && \ + error "ccache path at '${ccachedir}' is not a directory." && exit 1 + + volumes=(${volumes[@]} "-v ${ccachedir}:/srv/ccache") + fi + + podman run -it --replace --name s3gw-builder \ + -e S3GW_CCACHE_DIR=/srv/ccache \ + -e WITH_TESTS=${with_tests} \ + ${volumes[@]} \ + ${build_image} +} + +build_s3gw() { + + [[ -z "${cephdir}" ]] && \ + error "missing ceph directory" && exit 1 + [[ ! -d "${cephdir}" ]] && \ + error "path at '${cephdir}' is not a directory" && exit 1 + [[ ! -d "${cephdir}/.git" ]] && \ + error "path at '${cephdir}' is not a repository" && exit 1 + [[ ! -d "${cephdir}/build" ]] && \ + error "unable to find build directory at '${cephdir}'" && exit 1 + [[ ! -e "${cephdir}/build/bin/radosgw" ]] && \ + error "unable to find radosgw binary at '${cephdir}' build directory" && \ + exit 1 + + ver=$(git --git-dir ${cephdir}/.git rev-parse --short HEAD) + imgname="${s3gw_image}:${ver}" + + echo "ceph dir: ${cephdir}" + echo " image: ${imgname}" + + is_done=false + + ifs=$IFS + IFS=$'\n' + for l in $(podman image list --format '{{.Repository}}:{{.Tag}}'); do + IFS=" " a=(${l//\// }) + if [[ "${a[1]}" == "${imgname}" ]] && ! $force ; then + echo "found built image '${l}', mark it latest" + podman tag ${l} s3gw:latest || exit 1 + is_done=true + break + fi + done + IFS=${ifs} + + if $is_done ; then + return 0 + fi + + podman build -t ${imgname} \ + -f $(pwd)/Dockerfile.build-container \ + ${cephdir}/build || exit 1 + podman tag ${imgname} s3gw:latest || exit 1 +} + +build_s3gw_test() { + + [[ -z "${cephdir}" ]] && \ + error "missing ceph directory" && exit 1 + [[ ! -d "${cephdir}" ]] && \ + error "path at '${cephdir}' is not a directory" && exit 1 + [[ ! -d "${cephdir}/.git" ]] && \ + error "path at '${cephdir}' is not a repository" && exit 1 + [[ ! -d "${cephdir}/build" ]] && \ + error "unable to find build directory at '${cephdir}'" && exit 1 + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_sqlite_users" ]] && \ + error "unable to find unittest_rgw_sfs_sqlite_users binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_sqlite_buckets" ]] && \ + error "unable to find unittest_rgw_sfs_sqlite_buckets binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_sqlite_objects" ]] && \ + error "unable to find unittest_rgw_sfs_sqlite_objects binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_sqlite_versioned_objects" ]] && \ + error "unable to find unittest_rgw_sfs_sqlite_versioned_objects binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_sfs_bucket" ]] && \ + error "unable to find unittest_rgw_sfs_sfs_bucket binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_metadata_compatibility" ]] && \ + error "unable to find unittest_rgw_sfs_metadata_compatibility binary at '${cephdir}' build directory" && \ + [[ ! -e "${cephdir}/build/bin/unittest_rgw_sfs_gc" ]] && \ + error "unable to find unittest_rgw_sfs_gc binary at '${cephdir}' build directory" && \ + exit 1 + + ver=$(git --git-dir ${cephdir}/.git rev-parse --short HEAD) + imgname="${s3gw_test_image}:${ver}" + + echo "ceph dir: ${cephdir}" + echo " image: ${imgname}" + + is_done=false + + ifs=$IFS + IFS=$'\n' + for l in $(podman image list --format '{{.Repository}}:{{.Tag}}'); do + IFS=" " a=(${l//\// }) + if [[ "${a[1]}" == "${imgname}" ]] && ! $force ; then + echo "found built image '${l}', mark it latest" + podman tag ${l} s3gw-test:latest || exit 1 + is_done=true + break + fi + done + IFS=${ifs} + + if $is_done ; then + return 0 + fi + + podman build -t ${imgname} \ + -f $(pwd)/Dockerfile.build-radosgw-test-container \ + ${cephdir}/build || exit 1 + podman tag ${imgname} s3gw-test:latest || exit 1 +} + +cmd="${1}" +shift 1 + +[[ -z "${cmd}" ]] && \ + usage && exit 1 + +if [[ "${cmd}" == "help" ]]; then + usage + exit 0 +fi + +while [[ $# -gt 0 ]]; do + case $1 in + --ceph) + cephdir="${2}" + shift 1 + ;; + --ccache) + ccachedir="${2}" + shift 1 + ;; + --force) + force=true + ;; + *) + error "unknown argument '${1}'" + exit 1 + ;; + esac + shift 1 +done + +case ${cmd} in + build-image) + build_builder_image || exit 1 + ;; + radosgw) + build_radosgw || exit 1 + ;; + s3gw) + build_s3gw || exit 1 + ;; + s3gw-test) + build_s3gw_test || exit 1 + ;; + *) + error "unknown command '${cmd}'" + exit 1 + ;; +esac diff --git a/tools/build/run_tests.sh b/tools/build/run_tests.sh new file mode 100755 index 00000000..52660026 --- /dev/null +++ b/tools/build/run_tests.sh @@ -0,0 +1,16 @@ +#!/bin/bash +RES=0 +echo "Unit tests found:" +find /radosgw/bin -name "unittest_rgw_sfs_*" + +echo "Running tests..." +UNIT_TESTS=(`find /radosgw/bin -name "unittest_rgw_sfs_*"`) +for unit_test in "${UNIT_TESTS[@]}" +do + ${unit_test} + if [ $? -ne 0 ] + then + RES=1 + fi +done +exit ${RES} diff --git a/tools/env/.gitignore b/tools/env/.gitignore new file mode 100644 index 00000000..944b91dc --- /dev/null +++ b/tools/env/.gitignore @@ -0,0 +1,8 @@ +/*.ctr.tar +s3gw/*.tmp.yaml +playbooks/join-command +playbooks/admin.conf +test.yaml +s3gw.tar +cors.xml +longhorn-setting.yaml diff --git a/tools/env/Vagrantfile b/tools/env/Vagrantfile new file mode 100644 index 00000000..0d9e161a --- /dev/null +++ b/tools/env/Vagrantfile @@ -0,0 +1,207 @@ +BOX_NAME = ENV["BOX_NAME"] || "opensuse/Leap-15.3.x86_64" +VM_PROVIDER = ENV["VM_PROVIDER"] || "libvirt" +VM_NET = (ENV["VM_NET"] || "10.46.201.0").split(".0")[0] +VM_NET_LAST_OCTET_START = Integer(ENV["VM_NET_LAST_OCTET_START"] || "101") +VM_BRIDGE_INET = ENV["VM_BRIDGE_INET"] || "eth0" + +#k3s-ansible seems to work with only 1 admin; this should be investigated. +#For the time being, we assume this value hardcoded to 1. +ADMIN_COUNT = Integer(ENV["ADMIN_COUNT"] || "1") + +WORKER_COUNT = Integer(ENV["WORKER_COUNT"] || "1") +ADMIN_MEM = Integer(ENV["ADMIN_MEM"] || "4096") +ADMIN_CPU = Integer(ENV["ADMIN_CPU"] || "2") +ADMIN_DISK = ((ENV["ADMIN_DISK"] || "no") == "yes") +ADMIN_DISK_SIZE = ENV["ADMIN_DISK_SIZE"] || "8G" +WORKER_MEM = Integer(ENV["WORKER_MEM"] || "4096") +WORKER_CPU = Integer(ENV["WORKER_CPU"] || "2") +WORKER_DISK = ((ENV["WORKER_DISK"] || "no") == "yes") +WORKER_DISK_SIZE = ENV["WORKER_DISK_SIZE"] || "8G" +STOP_AFTER_BOOTSTRAP = ((ENV["STOP_AFTER_BOOTSTRAP"] || "no") == "yes") +STOP_AFTER_K3S_INSTALL = ((ENV["STOP_AFTER_K3S_INSTALL"] || "no") == "yes") +S3GW_IMAGE = ENV["S3GW_IMAGE"] || "ghcr.io/aquarist-labs/s3gw:latest" +S3GW_IMAGE_PULL_POLICY = ENV["S3GW_IMAGE_PULL_POLICY"] || "Always" +PROV_USER = ENV["PROV_USER"] || "vagrant" +S3GW_UI_IMAGE = "admin-1.local/s3gw-ui:latest" +S3GW_UI_IMAGE_PULL_POLICY = "Always" +S3GW_UI_REPO = ENV["S3GW_UI_REPO"] || "" +S3GW_UI_VERSION = ENV["S3GW_UI_VERSION"] || "" +SCENARIO = ENV["SCENARIO"] || "" +K3S_VERSION = ENV["K3S_VERSION"] || "v1.23.6+k3s1" + +ansible_groups = { + "apt" => [], + "zypper" => [], + "master" => [ + "admin-[1:#{ADMIN_COUNT}]" + ], + "node" => [ + "worker-[1:#{WORKER_COUNT}]" + ], + "k3s_cluster" => [ + "admin-[1:#{ADMIN_COUNT}]", + "worker-[1:#{WORKER_COUNT}]" + ], + "kubectl" => [ + "admin-1" + ] +} + +extra_vars = { + user: PROV_USER, + worker_count: WORKER_COUNT, + s3gw_image: S3GW_IMAGE, + s3gw_image_pull_policy: S3GW_IMAGE_PULL_POLICY, + s3gw_ui_image: S3GW_UI_IMAGE, + s3gw_ui_image_pull_policy: S3GW_UI_IMAGE_PULL_POLICY, + s3gw_ui_repo: S3GW_UI_REPO, + s3gw_ui_version: S3GW_UI_VERSION, + scenario: SCENARIO, + k3s_version: K3S_VERSION, + systemd_dir: "/etc/systemd/system", + master_ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START}", + + # --node-ip is needed when using virtualbox, otherwise it will start k3s on the NAT interface. + # This is not sufficient when WORKER_COUNT > 0 because workers need this directive too. + # Currently seems that this problem cannot be overcome, so with virtualbox you can only have a + # working cluster with WORKER_COUNT == 0 + extra_server_args: "--node-ip #{VM_NET}.#{VM_NET_LAST_OCTET_START}" +} + +def ansible_provision (context, ansible_groups, extra_vars) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/bootstrap.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + if(!STOP_AFTER_BOOTSTRAP) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/k3s-ansible/site.yml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/k3s-post-install.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + if(!STOP_AFTER_K3S_INSTALL) + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/longhorn-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/s3gw-ui-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/ingress-traefik-deploy.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + if SCENARIO != "" + context.vm.provision "ansible" do |ansible| + ansible.limit = "all" + ansible.playbook = "playbooks/load-scen.yaml" + ansible.groups = ansible_groups + ansible.extra_vars = extra_vars + end + end + end + end +end + +Vagrant.configure("2") do |config| + + if BOX_NAME.include? "generic/ubuntu" + ansible_groups["apt"] << "admin-[1:#{ADMIN_COUNT}]" + ansible_groups["apt"] << "worker-[1:#{WORKER_COUNT}]" + elsif BOX_NAME.include? "opensuse/" + ansible_groups["zypper"] << "admin-[1:#{ADMIN_COUNT}]" + ansible_groups["zypper"] << "worker-[1:#{WORKER_COUNT}]" + end + + if VM_PROVIDER == "libvirt" + config.vm.provider "libvirt" do |lv| + lv.connect_via_ssh = false + lv.qemu_use_session = false + lv.nic_model_type = "e1000" + lv.cpu_mode = 'host-passthrough' + end + + # This allows to have a working cluster with WORKER_COUNT > 0 + # It removes --node-ip directive. + extra_vars[:extra_server_args] = "" + + elsif VM_PROVIDER == "virtualbox" + config.vm.synced_folder "~", "/shared" + end + + (1..ADMIN_COUNT).each do |i| + config.vm.define "admin-#{i}" do |admin| + admin.vm.provider VM_PROVIDER do |lv| + lv.memory = ADMIN_MEM + lv.cpus = ADMIN_CPU + if WORKER_COUNT == 0 && ADMIN_DISK + lv.storage :file, size: ADMIN_DISK_SIZE, type: 'qcow2', serial: "664620#{i}" + end + end + + admin.vm.box = BOX_NAME + admin.vm.hostname = "admin-#{i}" + + if VM_PROVIDER == "libvirt" + admin.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + elsif VM_PROVIDER == "virtualbox" + admin.vm.network "public_network", bridge: VM_BRIDGE_INET, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i-1}" + end + end + + if WORKER_COUNT == 0 + ansible_provision config, ansible_groups, extra_vars + end + end + + if WORKER_COUNT > 0 + (1..WORKER_COUNT).each do |i| + config.vm.define "worker-#{i}" do |worker| + worker.vm.provider VM_PROVIDER do |lv| + lv.memory = WORKER_MEM + lv.cpus = WORKER_CPU + if WORKER_DISK + lv.storage :file, size: WORKER_DISK_SIZE, type: 'qcow2', serial: "674620#{i}" + end + end + + worker.vm.box = BOX_NAME + worker.vm.hostname = "worker-#{i}" + if VM_PROVIDER == "libvirt" + worker.vm.network "private_network", autostart: true, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" + elsif VM_PROVIDER == "virtualbox" + worker.vm.network "public_network", bridge: VM_BRIDGE_INET, ip: "#{VM_NET}.#{VM_NET_LAST_OCTET_START+i+(ADMIN_COUNT-1)}" + end + + # Only execute once the Ansible provisioner, + # when all nodes are up and ready. + if i == WORKER_COUNT + ansible_provision worker, ansible_groups, extra_vars + end + end + end + end + +end diff --git a/tools/env/ansible.cfg b/tools/env/ansible.cfg new file mode 100644 index 00000000..bcc3533c --- /dev/null +++ b/tools/env/ansible.cfg @@ -0,0 +1,12 @@ +# config file for ansible -- https://ansible.com/ +# =============================================== + +# nearly all parameters can be overridden in ansible-playbook +# or with command line flags. ansible will read ANSIBLE_CONFIG, +# ansible.cfg in the current working directory, .ansible.cfg in +# the home directory or /etc/ansible/ansible.cfg, whichever it +# finds first + +[defaults] + +interpreter_python = /usr/bin/python3 diff --git a/tools/env/generate-spec.sh b/tools/env/generate-spec.sh new file mode 100755 index 00000000..af79fc63 --- /dev/null +++ b/tools/env/generate-spec.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# generate-spec.sh - create spec files for a k3s cluster with longhorn and s3gw +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +tgtfile="s3gw.yaml" +is_dev_env=false + +s3gw_image="ghcr.io/aquarist-labs/s3gw:latest" +s3gw_image_pull_policy="Always" +s3gw_ui_image="localhost/s3gw-ui:latest" +s3gw_ui_image_pull_policy="Never" + +info() { + echo "INFO: $*" >/dev/stdout +} + +error() { + echo "ERROR: $*" >/dev/stderr +} + +while [[ $# -gt 0 ]]; do + case $1 in + --output|-o) + tgtfile="${2}" + shift 1 + ;; + --dev) + s3gw_image="localhost/s3gw:latest" + s3gw_image_pull_policy="Never" + ;; + esac + shift 1 +done + +info "Output file: ${tgtfile}" + +s3gw_image=$(printf '%s\n' "$s3gw_image" | sed -e 's/[]\/$*.^[]/\\&/g') + +sed "s/##S3GW_IMAGE##/"${s3gw_image}"/" s3gw/s3gw-deployment.yaml > s3gw/s3gw-deployment.tmp.yaml +sed -i "s/##S3GW_IMAGE_PULL_POLICY##/"${s3gw_image_pull_policy}"/" s3gw/s3gw-deployment.tmp.yaml + +s3gw_ui_image=$(printf '%s\n' "$s3gw_ui_image" | sed -e 's/[]\/$*.^[]/\\&/g') + +sed "s/##S3GW_UI_IMAGE##/"${s3gw_ui_image}"/" s3gw-ui/s3gw-ui-deployment.yaml > s3gw-ui/s3gw-ui-deployment.tmp.yaml +sed -i "s/##S3GW_UI_IMAGE_PULL_POLICY##/"${s3gw_ui_image_pull_policy}"/" s3gw-ui/s3gw-ui-deployment.tmp.yaml + +rgw_default_user_access_key_base64=$(cat s3gw/s3gw-secret.yaml | grep RGW_DEFAULT_USER_ACCESS_KEY | cut -d':' -f 2 | sed -e 's/[[:space:],"]//g') +rgw_default_user_access_key_base64=$(echo -n $rgw_default_user_access_key_base64 | base64) +rgw_default_user_access_key_base64=$(printf '%s\n' "$rgw_default_user_access_key_base64" | sed -e 's/[]\/$*.^[]/\\&/g') +rgw_default_user_secret_key_base64=$(cat s3gw/s3gw-secret.yaml | grep RGW_DEFAULT_USER_SECRET_KEY | cut -d':' -f 2 | sed -e 's/[[:space:],"]//g') +rgw_default_user_secret_key_base64=$(echo -n $rgw_default_user_secret_key_base64 | base64) +rgw_default_user_secret_key_base64=$(printf '%s\n' "$rgw_default_user_secret_key_base64" | sed -e 's/[]\/$*.^[]/\\&/g') + +sed "s/##RGW_DEFAULT_USER_ACCESS_KEY_BASE64##/"\"${rgw_default_user_access_key_base64}\""/" longhorn/longhorn-s3gw-secret.yaml > longhorn/longhorn-s3gw-secret.tmp.yaml +sed -i "s/##RGW_DEFAULT_USER_SECRET_KEY_BASE64##/\""${rgw_default_user_secret_key_base64}\""/" longhorn/longhorn-s3gw-secret.tmp.yaml + +[[ -z "${tgtfile}" ]] && \ + error "Missing output file" && \ + exit 1 + +specs=( + "longhorn/longhorn-s3gw-secret.tmp" + "longhorn/longhorn-ingress-secret" + "longhorn/longhorn-storageclass" + "s3gw/s3gw-namespace" + "s3gw/s3gw-pvc" + "s3gw/s3gw-config" + "s3gw/s3gw-deployment.tmp" + "s3gw/s3gw-secret" + "s3gw/s3gw-ingress-secret" + "s3gw/s3gw-service" + "s3gw-ui/s3gw-ui-deployment.tmp" + "s3gw-ui/s3gw-ui-service" +) + +traefik_specs=( + "ingress-traefik/traefik-nodeport" + "ingress-traefik/longhorn-ingress" + "ingress-traefik/s3gw-ingress" + "ingress-traefik/s3gw-ui-ingress" +) + +d="$(date +'%Y/%M/%d %H:%m:%S %Z')" + +cat > ${tgtfile} << EOF +# ${tgtfile} - setup a k3s cluster with longhorn and s3gw +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file was auto-generated by generate-spec.sh on ${d} +# + +EOF + +has_prior=false +for spec in ${specs[@]}; do + echo Inflating s3gw-spec ${spec}.yaml + ${has_prior} && echo "---" >> ${tgtfile} + has_prior=true + cat ${spec}.yaml >> ${tgtfile} +done + +for spec in ${traefik_specs[@]}; do + echo Inflating traefik-spec ${spec}.yaml + echo "---" >> ${tgtfile} + cat ${spec}.yaml >> ${tgtfile} +done + +find . -name "*.tmp.yaml" -type f -delete diff --git a/tools/env/ingress-traefik/longhorn-ingress.yaml b/tools/env/ingress-traefik/longhorn-ingress.yaml new file mode 100644 index 00000000..964090f5 --- /dev/null +++ b/tools/env/ingress-traefik/longhorn-ingress.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: longhorn-system +spec: + tls: + - hosts: + - longhorn.local + secretName: longhorn-ingress-secret + rules: + - host: longhorn.local + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: longhorn-frontend + port: + number: 80 diff --git a/tools/env/ingress-traefik/s3gw-ingress.yaml b/tools/env/ingress-traefik/s3gw-ingress.yaml new file mode 100644 index 00000000..34f3aa4d --- /dev/null +++ b/tools/env/ingress-traefik/s3gw-ingress.yaml @@ -0,0 +1,64 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: cors-header + namespace: s3gw-system +spec: + headers: + accessControlAllowMethods: + - "GET" + - "HEAD" + - "PUT" + - "POST" + - "DELETE" + - "OPTIONS" + accessControlAllowOriginList: + - "*" + accessControlAllowHeaders: + - "*" + accessControlExposeHeaders: + - "ETag" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 diff --git a/tools/env/ingress-traefik/s3gw-ui-ingress.yaml b/tools/env/ingress-traefik/s3gw-ui-ingress.yaml new file mode 100644 index 00000000..c3490288 --- /dev/null +++ b/tools/env/ingress-traefik/s3gw-ui-ingress.yaml @@ -0,0 +1,43 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw-ui.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 diff --git a/tools/env/ingress-traefik/traefik-nodeport.yaml b/tools/env/ingress-traefik/traefik-nodeport.yaml new file mode 100644 index 00000000..0353df07 --- /dev/null +++ b/tools/env/ingress-traefik/traefik-nodeport.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: ingress-traefik-nodeport + namespace: kube-system +spec: + type: NodePort + ports: + - port: 80 + nodePort: 30080 + targetPort: 8000 + protocol: TCP + name: http + - port: 443 + nodePort: 30443 + targetPort: 8443 + protocol: TCP + name: https + selector: + app.kubernetes.io/instance: traefik + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: traefik diff --git a/tools/env/longhorn-setting.yaml.sample b/tools/env/longhorn-setting.yaml.sample new file mode 100644 index 00000000..83c61558 --- /dev/null +++ b/tools/env/longhorn-setting.yaml.sample @@ -0,0 +1,37 @@ +default-setting.yaml: |- + backup-target: + backup-target-credential-secret: + allow-recurring-job-while-volume-detached: + create-default-disk-labeled-nodes: + default-data-path: + replica-soft-anti-affinity: + replica-auto-balance: + storage-over-provisioning-percentage: + storage-minimal-available-percentage: 25 + upgrade-checker: + default-replica-count: + default-data-locality: + default-longhorn-static-storage-class: + backupstore-poll-interval: + taint-toleration: + system-managed-components-node-selector: + priority-class: + auto-salvage: + auto-delete-pod-when-volume-detached-unexpectedly: + disable-scheduling-on-cordoned-node: + replica-zone-soft-anti-affinity: + node-down-pod-deletion-policy: + allow-node-drain-with-last-healthy-replica: + mkfs-ext4-parameters: + disable-replica-rebuild: + replica-replenishment-wait-interval: + concurrent-replica-rebuild-per-node-limit: + disable-revision-counter: + system-managed-pods-image-pull-policy: + allow-volume-creation-with-degraded-availability: + auto-cleanup-system-generated-snapshot: + concurrent-automatic-engine-upgrade-per-node-limit: + backing-image-cleanup-wait-interval: + backing-image-recovery-wait-interval: + guaranteed-engine-manager-cpu: + guaranteed-replica-manager-cpu: diff --git a/tools/env/longhorn/longhorn-ingress-secret.yaml b/tools/env/longhorn/longhorn-ingress-secret.yaml new file mode 100644 index 00000000..baa22071 --- /dev/null +++ b/tools/env/longhorn/longhorn-ingress-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: longhorn-ingress-secret + namespace: longhorn-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/tools/env/longhorn/longhorn-s3gw-secret.yaml b/tools/env/longhorn/longhorn-s3gw-secret.yaml new file mode 100644 index 00000000..9de848bb --- /dev/null +++ b/tools/env/longhorn/longhorn-s3gw-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: longhorn-system +type: Opaque +data: + # The Base64 encoded s3gw default user access key. + AWS_ACCESS_KEY_ID: ##RGW_DEFAULT_USER_ACCESS_KEY_BASE64## + # The Base64 encoded s3gw default user secret key. + AWS_SECRET_ACCESS_KEY: ##RGW_DEFAULT_USER_SECRET_KEY_BASE64## + # The Base64 encoded URL http://s3gw-service.s3gw-system:80/ + AWS_ENDPOINTS: aHR0cDovL3MzZ3ctc2VydmljZS5zM2d3LXN5c3RlbTo4MC8= diff --git a/tools/env/longhorn/longhorn-storageclass.yaml b/tools/env/longhorn/longhorn-storageclass.yaml new file mode 100644 index 00000000..b0708102 --- /dev/null +++ b/tools/env/longhorn/longhorn-storageclass.yaml @@ -0,0 +1,14 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: longhorn-single + namespace: s3gw-system +provisioner: driver.longhorn.io +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate +parameters: + fsType: "ext4" + numberOfReplicas: "1" + staleReplicaTimeout: "2880" + fromBackup: "" diff --git a/tools/env/playbooks/bootstrap.yaml b/tools/env/playbooks/bootstrap.yaml new file mode 100644 index 00000000..dd61ff60 --- /dev/null +++ b/tools/env/playbooks/bootstrap.yaml @@ -0,0 +1,185 @@ +- name: Install Packages - [APT] + hosts: apt + become: true + tasks: + + - name: Install packages + apt: + name: "{{ packages }}" + state: present + vars: + packages: + - apt-transport-https + - ca-certificates + - curl + - wget + - gnupg-agent + - software-properties-common + + - name: Add signing key for Docker's repository + apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + + - name: Add repository for Docker stable version + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable + state: present + + - name: Install Docker + apt: + name: "{{ packages }}" + state: present + update_cache: yes + vars: + packages: + - docker-ce + - docker-ce-cli + +- name: Install Packages - [ZYPPER] + hosts: zypper + become: true + tasks: + + - name: Install packages + zypper: + name: "{{ packages }}" + state: present + vars: + packages: + - curl + - wget + - git + - docker + - python3-docker + +- name: Install pip + hosts: master + become: true + tasks: + + - name: Install pip + command: "{{ item }}" + with_items: + - wget "https://bootstrap.pypa.io/pip/3.5/get-pip.py" + - python3 get-pip.py + - rm -rf get-pip.py + +- name: Install pip s3cmd [ALL] + hosts: master + become: true + tasks: + - name: Install pip s3cmd + command: pip install s3cmd + +- name: Install pip kubernetes [ALL] + hosts: master + tasks: + - name: Install pip kubernetes + command: pip install kubernetes + +- name: Install pip docker [APT] + hosts: apt:!node + become: true + tasks: + - name: Install pip docker + command: pip install docker + +- name: Patch Runtime + hosts: all + become: true + tasks: + + - name: Patching docker/daemon.json + copy: + dest: "/etc/docker/daemon.json" + content: | + { + "exec-opts": ["native.cgroupdriver=systemd"], + "insecure-registries" : ["admin-1:5000"] + } + + - name: Add user to Docker group + user: + name: "{{ user }}" + group: docker + + - name: Restart Docker + service: + name: docker + state: restarted + daemon_reload: yes + + - name: Remove swapfile from /etc/fstab + mount: + name: "{{ item }}" + fstype: swap + state: absent + with_items: + - swap + - none + + - name: Disable swap + command: swapoff -a + +- name: Start local registry + hosts: master + become: true + tasks: + + - name: Start a local registry + command: docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +- name: Set up nodes to use local registry + hosts: all + become: true + tasks: + + - name: Ensuring /etc/rancher/k3s + ansible.builtin.file: + path: /etc/rancher/k3s + state: directory + mode: '0755' + + - name: Creating /etc/rancher/k3s/registries.yaml + copy: + content: "" + dest: /etc/rancher/k3s/registries.yaml + force: no + + - name: Updating /etc/rancher/k3s/registries.yaml + blockinfile: + path: /etc/rancher/k3s/registries.yaml + block: | + mirrors: + admin-1.local: + endpoint: + - "http://admin-1:5000" + +- name: Local DNS + hosts: all + become: true + gather_facts: yes + tasks: + + - name: Update /etc/hosts file with node name + tags: etchostsupdate + lineinfile: + path: "/etc/hosts" + regexp: ".*\t{{ hostvars[item]['ansible_fqdn']}}\t{{ hostvars[item]['ansible_hostname']}}" + line: "{{ hostvars[item]['ansible_eth1'].ipv4.address }}\t{{ hostvars[item]['ansible_fqdn']}}\t{{ hostvars[item]['ansible_hostname']}}" + state: present + backup: yes + register: etchostsupdate + with_items: "{{groups['all']}}" + + - name: Patch /etc/hosts + lineinfile: + path: /etc/hosts + regexp: '^127\.0\.2\.1.*' + state: absent + + - name: Add local names to /etc/hosts + lineinfile: + path: /etc/hosts + line: "127.0.0.1 longhorn.local s3gw.local s3gw-no-tls.local s3gw-ui.local" diff --git a/tools/env/playbooks/ingress-traefik-deploy.yaml b/tools/env/playbooks/ingress-traefik-deploy.yaml new file mode 100644 index 00000000..6338ffd4 --- /dev/null +++ b/tools/env/playbooks/ingress-traefik-deploy.yaml @@ -0,0 +1,33 @@ +- name: Longhorn/s3gw Traefik ingress deploy + hosts: kubectl + tasks: + + - name: Wait Traefik controller to become ready, this could take a while ... + command: kubectl wait --namespace kube-system --for=condition=ready pod --selector=app.kubernetes.io/name=traefik --timeout=30s + register: result + until: result.rc == 0 + retries: 20 + delay: 5 + + - name: Copy Traefik ingresses cfg to local dir + copy: src=../ingress-traefik dest=/home/{{ user }} mode=0777 + + - name: Apply traefik-nodeport.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/traefik-nodeport.yaml + + - name: Apply longhorn-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/longhorn-ingress.yaml + + - name: Apply s3gw-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/s3gw-ingress.yaml + + - name: Apply s3gw-ui-ingress.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/ingress-traefik/s3gw-ui-ingress.yaml diff --git a/tools/env/playbooks/k3s-post-install.yaml b/tools/env/playbooks/k3s-post-install.yaml new file mode 100644 index 00000000..d8aaab80 --- /dev/null +++ b/tools/env/playbooks/k3s-post-install.yaml @@ -0,0 +1,43 @@ +- name: K3s extras + hosts: master + become: true + tasks: + + - name: add .kube/config + copy: + src: /etc/rancher/k3s/k3s.yaml + dest: /home/{{ user }}/.kube/config + owner: "{{ user }}" + mode: '0600' + remote_src: yes + + - name: export KUBECONFIG + lineinfile: + path: /home/{{ user }}/.bashrc + line: "export KUBECONFIG=~/.kube/config" + + - name: Setting kubectl alias and enabling kubectl bash completion + command: "{{ item }}" + with_items: + - /bin/bash -c "sudo echo 'source <(kubectl completion bash)' >> /home/{{ user }}/.bashrc" + - /bin/bash -c "sudo touch /etc/bash_completion.d/kubectl" + - /bin/bash -c "sudo chmod 777 /etc/bash_completion.d/kubectl" + - /bin/bash -c "kubectl completion bash > /etc/bash_completion.d/kubectl" + - /bin/bash -c "sudo echo 'alias k=kubectl' >> /home/{{ user }}/.bashrc" + - /bin/bash -c "sudo echo 'complete -F __start_kubectl k' >> /home/{{ user }}/.bashrc" + + - name: Patch k3s.yaml for this session + file: + path: /etc/rancher/k3s/k3s.yaml + mode: '0644' + +- name: Probe cluster + hosts: kubectl + tasks: + + - name: Wait for admin-1 to become ready + command: kubectl get nodes admin-1 + register: result + until: result.stdout.find("NotReady") == -1 + retries: 25 + delay: 5 diff --git a/tools/env/playbooks/load-scen.yaml b/tools/env/playbooks/load-scen.yaml new file mode 100644 index 00000000..9f328ba8 --- /dev/null +++ b/tools/env/playbooks/load-scen.yaml @@ -0,0 +1,22 @@ +- name: Load scenario + hosts: kubectl + tasks: + + - name: Create scenarios dir + file: + path: /home/{{ user }}/scenarios + state: directory + mode: '0777' + + - name: Copy s3cmd.cfg + copy: src=../s3cmd.cfg dest=/home/{{ user }}/scenarios + + - name: Copy scenario's resources + copy: src=../scenarios/{{ scenario }} dest=/home/{{ user }}/scenarios mode=0777 + + - name: Run load-scen.sh + shell: /home/{{ user }}/scenarios/{{ scenario }}/load-scen.sh + register: out + args: + chdir: /home/{{ user }}/scenarios/{{ scenario }}/ + - debug: var=out.stdout_lines diff --git a/tools/env/playbooks/longhorn-deploy.yaml b/tools/env/playbooks/longhorn-deploy.yaml new file mode 100644 index 00000000..5bd1da99 --- /dev/null +++ b/tools/env/playbooks/longhorn-deploy.yaml @@ -0,0 +1,9 @@ +- name: Deploy Longhorn + hosts: kubectl + tasks: + + - name: Installing iscsi... + command: kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.4/deploy/prerequisite/longhorn-iscsi-installation.yaml + + - name: Deploy Longhorn + command: kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.2.4/deploy/longhorn.yaml diff --git a/tools/env/playbooks/s3gw-deploy.yaml b/tools/env/playbooks/s3gw-deploy.yaml new file mode 100644 index 00000000..8ee7c4ae --- /dev/null +++ b/tools/env/playbooks/s3gw-deploy.yaml @@ -0,0 +1,63 @@ +- name: s3gw deploy + hosts: kubectl + tasks: + + - name: Copy Longhorn cfg to local dir + copy: src=../longhorn dest=/home/{{ user }} mode=0777 + + - name: Copy s3gw cfg to local dir + copy: src=../s3gw dest=/home/{{ user }} mode=0777 + + - name: Set S3GW_IMAGE in s3gw-deployment.yaml + replace: + path: /home/{{ user }}/s3gw/s3gw-deployment.yaml + regexp: '##S3GW_IMAGE##' + replace: "{{ s3gw_image }}" + + - name: Set S3GW_IMAGE_PULL_POLICY in s3gw-deployment.yaml + replace: + path: /home/{{ user }}/s3gw/s3gw-deployment.yaml + regexp: '##S3GW_IMAGE_PULL_POLICY##' + replace: "{{ s3gw_image_pull_policy }}" + + - name: Apply s3gw-namespace.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-namespace.yaml + + - name: Apply longhorn-storageclass.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/longhorn/longhorn-storageclass.yaml + + - name: Apply s3gw-pvc.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-pvc.yaml + + - name: Apply s3gw-config.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-config.yaml + + - name: Apply s3gw-secret.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-secret.yaml + + - name: Apply s3gw-deployment.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-deployment.yaml + + - name: Apply s3gw-service.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw/s3gw-service.yaml + + - name: Wait s3gw application to become ready, this could take a while ... + command: kubectl wait --namespace s3gw-system --for=condition=ready pod --selector=app.aquarist-labs.io/name=s3gw --timeout=30s + register: result + until: result.rc == 0 + retries: 20 + delay: 2 diff --git a/tools/env/playbooks/s3gw-ui-deploy.yaml b/tools/env/playbooks/s3gw-ui-deploy.yaml new file mode 100644 index 00000000..e1fe6679 --- /dev/null +++ b/tools/env/playbooks/s3gw-ui-deploy.yaml @@ -0,0 +1,59 @@ +- name: s3gw-ui build image + hosts: kubectl + become: true + tasks: + + - name: Copy s3gw-ui directory to local directory + copy: src=../s3gw-ui dest=/home/{{ user }} mode=0777 + + - name: Set S3GW_UI_IMAGE in s3gw-ui-deployment.yaml + replace: + path: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml + regexp: '##S3GW_UI_IMAGE##' + replace: "{{ s3gw_ui_image }}" + + - name: Set S3GW_UI_IMAGE_PULL_POLICY in s3gw-ui-deployment.yaml + replace: + path: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml + regexp: '##S3GW_UI_IMAGE_PULL_POLICY##' + replace: "{{ s3gw_ui_image_pull_policy }}" + + - name: Copy Dockerfile.s3gw-ui to local directory + copy: src=../../build-ui/Dockerfile.s3gw-ui dest=/home/{{ user }}/s3gw-ui mode=0777 + + - name: Git s3gw-ui checkout + git: + repo: "{{ s3gw_ui_repo }}" + dest: /home/{{ user }}/s3gw-ui/s3gw-ui + version: "{{ s3gw_ui_version }}" + + - name: Build s3gw-ui image + community.docker.docker_image: + build: + path: /home/{{ user }}/s3gw-ui/s3gw-ui + dockerfile: /home/{{ user }}/s3gw-ui/Dockerfile.s3gw-ui + name: s3gw-ui + tag: latest + source: build + + - name: Tag and push s3gw-ui to local registry + community.docker.docker_image: + name: s3gw-ui + repository: admin-1:5000/s3gw-ui + tag: latest + push: yes + source: local + +- name: s3gw UI deploy + hosts: kubectl + tasks: + + - name: Apply s3gw-ui-deployment.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw-ui/s3gw-ui-deployment.yaml + + - name: Apply s3gw-ui-service.yaml + kubernetes.core.k8s: + state: present + src: /home/{{ user }}/s3gw-ui/s3gw-ui-service.yaml diff --git a/tools/env/s3cmd.cfg b/tools/env/s3cmd.cfg new file mode 100644 index 00000000..173c40ae --- /dev/null +++ b/tools/env/s3cmd.cfg @@ -0,0 +1,87 @@ +[default] +access_key = 0555b35654ad1656d804 +access_token = +add_encoding_exts = +add_headers = +bucket_location = US +ca_certs_file = +cache_file = +check_ssl_certificate = False +check_ssl_hostname = True +cloudfront_host = cloudfront.amazonaws.com +connection_max_age = 5 +connection_pooling = True +content_disposition = +content_type = +default_mime_type = binary/octet-stream +delay_updates = False +delete_after = False +delete_after_fetch = False +delete_removed = False +dry_run = False +enable_multipart = True +encoding = UTF-8 +encrypt = False +expiry_date = +expiry_days = +expiry_prefix = +follow_symlinks = False +force = False +get_continue = False +gpg_command = /usr/bin/gpg +gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s +gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s +gpg_passphrase = +guess_mime_type = True +host_base = https://s3gw.local +host_bucket = https://s3gw.local/%(bucket) +human_readable_sizes = False +invalidate_default_index_on_cf = False +invalidate_default_index_root_on_cf = True +invalidate_on_cf = False +kms_key = +limit = -1 +limitrate = 0 +list_md5 = False +log_target_prefix = +long_listing = False +max_delete = -1 +mime_type = +multipart_chunk_size_mb = 15 +multipart_copy_chunk_size_mb = 1024 +multipart_max_chunks = 10000 +preserve_attrs = True +progress_meter = True +proxy_host = +proxy_port = 0 +public_url_use_https = False +put_continue = False +recursive = False +recv_chunk = 65536 +reduced_redundancy = False +requester_pays = False +restore_days = 1 +restore_priority = Standard +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== +send_chunk = 65536 +server_side_encryption = False +signature_v2 = True +signurl_use_https = False +simpledb_host = sdb.amazonaws.com +skip_existing = False +socket_timeout = 300 +ssl_client_cert_file = +ssl_client_key_file = +stats = False +stop_on_error = False +storage_class = +throttle_max = 100 +upload_id = +urlencoding_mode = normal +use_http_expect = False +use_https = True +use_mime_magic = True +verbosity = INFO +website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/ +website_error = +website_index = index.html diff --git a/tools/env/s3gw-dev.yaml b/tools/env/s3gw-dev.yaml new file mode 100644 index 00000000..a1a90c49 --- /dev/null +++ b/tools/env/s3gw-dev.yaml @@ -0,0 +1,355 @@ +# s3gw-dev.yaml - setup a k3s cluster with longhorn and s3gw +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file was auto-generated by generate-spec.sh on 2022/53/03 16:11:42 CET +# + +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: longhorn-system +type: Opaque +data: + # The Base64 encoded s3gw default user access key. + AWS_ACCESS_KEY_ID: "MDU1NWIzNTY1NGFkMTY1NmQ4MDQ=" + # The Base64 encoded s3gw default user secret key. + AWS_SECRET_ACCESS_KEY: "aDdHaHh1QkxUcmxoVlV5eFNQVUtVVjhyLzJFSTRuZ3FKeEQ3aUJkQllMaHdsdU4zMEphVDNRPT0=" + # The Base64 encoded URL http://s3gw-service.s3gw-system:80/ + AWS_ENDPOINTS: aHR0cDovL3MzZ3ctc2VydmljZS5zM2d3LXN5c3RlbTo4MC8= +--- +apiVersion: v1 +kind: Secret +metadata: + name: longhorn-ingress-secret + namespace: longhorn-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: longhorn-single + namespace: s3gw-system +provisioner: driver.longhorn.io +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate +parameters: + fsType: "ext4" + numberOfReplicas: "1" + staleReplicaTimeout: "2880" + fromBackup: "" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: s3gw-system +spec: {} +status: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3gw-pvc + namespace: s3gw-system +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn-single + resources: + requests: + storage: 2Gi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: s3gw-config + namespace: s3gw-system +data: + RGW_BACKEND_STORE: "sfs" + DEBUG_RGW: "1" + RGW_SERVICE_URL: "https://s3gw.local" + RGW_DEFAULT_USER_SYSTEM: "1" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw + name: s3gw + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw + spec: + containers: + - name: s3gw + image: localhost/s3gw:latest + imagePullPolicy: Never + args: ["--rgw-backend-store", $(RGW_BACKEND_STORE), "--debug-rgw", $(DEBUG_RGW)] + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + volumeMounts: + - name: s3gw-lh-store + mountPath: /data + ports: + - containerPort: 7480 + volumes: + - name: s3gw-lh-store + persistentVolumeClaim: + claimName: s3gw-pvc +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: s3gw-system +type: Opaque +stringData: + RGW_DEFAULT_USER_ACCESS_KEY: "0555b35654ad1656d804" + RGW_DEFAULT_USER_SECRET_KEY: "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==" +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-ingress-secret + namespace: s3gw-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +--- +apiVersion: v1 +kind: Service +metadata: + name: s3gw-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw + ports: + - protocol: TCP + port: 80 + targetPort: 7480 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + name: s3gw-ui + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw-ui + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + spec: + containers: + - name: s3gw-ui + image: localhost/s3gw-ui:latest + imagePullPolicy: Never + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: s3gw-ui-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw-ui + ports: + - protocol: TCP + port: 80 + targetPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-traefik-nodeport + namespace: kube-system +spec: + type: NodePort + ports: + - port: 80 + nodePort: 30080 + targetPort: 8000 + protocol: TCP + name: http + - port: 443 + nodePort: 30443 + targetPort: 8443 + protocol: TCP + name: https + selector: + app.kubernetes.io/instance: traefik + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: traefik +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: longhorn-system +spec: + tls: + - hosts: + - longhorn.local + secretName: longhorn-ingress-secret + rules: + - host: longhorn.local + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: longhorn-frontend + port: + number: 80 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: cors-header + namespace: s3gw-system +spec: + headers: + accessControlAllowMethods: + - "GET" + - "HEAD" + - "PUT" + - "POST" + - "DELETE" + - "OPTIONS" + accessControlAllowOriginList: + - "*" + accessControlAllowHeaders: + - "*" + accessControlExposeHeaders: + - "ETag" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw-ui.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 diff --git a/tools/env/s3gw-ui/s3gw-ui-deployment.yaml b/tools/env/s3gw-ui/s3gw-ui-deployment.yaml new file mode 100644 index 00000000..9ed2305b --- /dev/null +++ b/tools/env/s3gw-ui/s3gw-ui-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + name: s3gw-ui + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw-ui + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + spec: + containers: + - name: s3gw-ui + image: ##S3GW_UI_IMAGE## + imagePullPolicy: ##S3GW_UI_IMAGE_PULL_POLICY## + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + ports: + - containerPort: 8080 diff --git a/tools/env/s3gw-ui/s3gw-ui-service.yaml b/tools/env/s3gw-ui/s3gw-ui-service.yaml new file mode 100644 index 00000000..55d56364 --- /dev/null +++ b/tools/env/s3gw-ui/s3gw-ui-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: s3gw-ui-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw-ui + ports: + - protocol: TCP + port: 80 + targetPort: 8080 diff --git a/tools/env/s3gw.yaml b/tools/env/s3gw.yaml new file mode 100644 index 00000000..5107294b --- /dev/null +++ b/tools/env/s3gw.yaml @@ -0,0 +1,355 @@ +# s3gw.yaml - setup a k3s cluster with longhorn and s3gw +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file was auto-generated by generate-spec.sh on 2022/53/03 16:11:37 CET +# + +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: longhorn-system +type: Opaque +data: + # The Base64 encoded s3gw default user access key. + AWS_ACCESS_KEY_ID: "MDU1NWIzNTY1NGFkMTY1NmQ4MDQ=" + # The Base64 encoded s3gw default user secret key. + AWS_SECRET_ACCESS_KEY: "aDdHaHh1QkxUcmxoVlV5eFNQVUtVVjhyLzJFSTRuZ3FKeEQ3aUJkQllMaHdsdU4zMEphVDNRPT0=" + # The Base64 encoded URL http://s3gw-service.s3gw-system:80/ + AWS_ENDPOINTS: aHR0cDovL3MzZ3ctc2VydmljZS5zM2d3LXN5c3RlbTo4MC8= +--- +apiVersion: v1 +kind: Secret +metadata: + name: longhorn-ingress-secret + namespace: longhorn-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: longhorn-single + namespace: s3gw-system +provisioner: driver.longhorn.io +allowVolumeExpansion: true +reclaimPolicy: Delete +volumeBindingMode: Immediate +parameters: + fsType: "ext4" + numberOfReplicas: "1" + staleReplicaTimeout: "2880" + fromBackup: "" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: s3gw-system +spec: {} +status: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3gw-pvc + namespace: s3gw-system +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn-single + resources: + requests: + storage: 2Gi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: s3gw-config + namespace: s3gw-system +data: + RGW_BACKEND_STORE: "sfs" + DEBUG_RGW: "1" + RGW_SERVICE_URL: "https://s3gw.local" + RGW_DEFAULT_USER_SYSTEM: "1" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw + name: s3gw + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw + spec: + containers: + - name: s3gw + image: ghcr.io/aquarist-labs/s3gw:latest + imagePullPolicy: Always + args: ["--rgw-backend-store", $(RGW_BACKEND_STORE), "--debug-rgw", $(DEBUG_RGW)] + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + volumeMounts: + - name: s3gw-lh-store + mountPath: /data + ports: + - containerPort: 7480 + volumes: + - name: s3gw-lh-store + persistentVolumeClaim: + claimName: s3gw-pvc +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: s3gw-system +type: Opaque +stringData: + RGW_DEFAULT_USER_ACCESS_KEY: "0555b35654ad1656d804" + RGW_DEFAULT_USER_SECRET_KEY: "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==" +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-ingress-secret + namespace: s3gw-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +--- +apiVersion: v1 +kind: Service +metadata: + name: s3gw-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw + ports: + - protocol: TCP + port: 80 + targetPort: 7480 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + name: s3gw-ui + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw-ui + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw-ui + spec: + containers: + - name: s3gw-ui + image: ghcr.io/aquarist-labs/s3gw-ui:latest + imagePullPolicy: Never + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: s3gw-ui-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw-ui + ports: + - protocol: TCP + port: 80 + targetPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-traefik-nodeport + namespace: kube-system +spec: + type: NodePort + ports: + - port: 80 + nodePort: 30080 + targetPort: 8000 + protocol: TCP + name: http + - port: 443 + nodePort: 30443 + targetPort: 8443 + protocol: TCP + name: https + selector: + app.kubernetes.io/instance: traefik + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: traefik +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: longhorn-system +spec: + tls: + - hosts: + - longhorn.local + secretName: longhorn-ingress-secret + rules: + - host: longhorn.local + http: + paths: + - pathType: Prefix + path: / + backend: + service: + name: longhorn-frontend + port: + number: 80 +--- +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: cors-header + namespace: s3gw-system +spec: + headers: + accessControlAllowMethods: + - "GET" + - "HEAD" + - "PUT" + - "POST" + - "DELETE" + - "OPTIONS" + accessControlAllowOriginList: + - "*" + accessControlAllowHeaders: + - "*" + accessControlExposeHeaders: + - "ETag" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + tls: + - hosts: + - s3gw-ui.local + secretName: s3gw-ingress-secret + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: s3gw-ui-no-tls-ingress + namespace: s3gw-system + annotations: + traefik.ingress.kubernetes.io/router.middlewares: s3gw-system-cors-header@kubernetescrd +spec: + rules: + - host: s3gw-ui.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: s3gw-ui-service + port: + number: 80 diff --git a/tools/env/s3gw/s3gw-config.yaml b/tools/env/s3gw/s3gw-config.yaml new file mode 100644 index 00000000..003b3b72 --- /dev/null +++ b/tools/env/s3gw/s3gw-config.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: s3gw-config + namespace: s3gw-system +data: + RGW_BACKEND_STORE: "sfs" + DEBUG_RGW: "1" + RGW_SERVICE_URL: "https://s3gw.local" + RGW_DEFAULT_USER_ID: "testid" + RGW_DEFAULT_USER_DISPLAY_NAME: "M. Tester" + RGW_DEFAULT_USER_EMAIL: "tester@ceph.com" + RGW_DEFAULT_USER_CAPS: "usage=read,write;users=read,write" + RGW_DEFAULT_USER_SYSTEM: "1" + RGW_DEFAULT_USER_ASSUMED_ROLE_ARN: "" diff --git a/tools/env/s3gw/s3gw-deployment.yaml b/tools/env/s3gw/s3gw-deployment.yaml new file mode 100644 index 00000000..8540b114 --- /dev/null +++ b/tools/env/s3gw/s3gw-deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.aquarist-labs.io/name: s3gw + name: s3gw + namespace: s3gw-system +spec: + replicas: 1 + selector: + matchLabels: + app.aquarist-labs.io/name: s3gw + strategy: {} + template: + metadata: + labels: + app.aquarist-labs.io/name: s3gw + spec: + containers: + - name: s3gw + image: ##S3GW_IMAGE## + imagePullPolicy: ##S3GW_IMAGE_PULL_POLICY## + args: ["--rgw-backend-store", $(RGW_BACKEND_STORE), "--debug-rgw", $(DEBUG_RGW)] + envFrom: + - configMapRef: + name: s3gw-config + - secretRef: + name: s3gw-secret + volumeMounts: + - name: s3gw-lh-store + mountPath: /data + ports: + - containerPort: 7480 + volumes: + - name: s3gw-lh-store + persistentVolumeClaim: + claimName: s3gw-pvc diff --git a/tools/env/s3gw/s3gw-ingress-secret.yaml b/tools/env/s3gw/s3gw-ingress-secret.yaml new file mode 100644 index 00000000..7d91737a --- /dev/null +++ b/tools/env/s3gw/s3gw-ingress-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-ingress-secret + namespace: s3gw-system +type: kubernetes.io/tls +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/tools/env/s3gw/s3gw-namespace.yaml b/tools/env/s3gw/s3gw-namespace.yaml new file mode 100644 index 00000000..993ad8c5 --- /dev/null +++ b/tools/env/s3gw/s3gw-namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: s3gw-system +spec: {} +status: {} diff --git a/tools/env/s3gw/s3gw-pvc.yaml b/tools/env/s3gw/s3gw-pvc.yaml new file mode 100644 index 00000000..1fc1b54a --- /dev/null +++ b/tools/env/s3gw/s3gw-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: s3gw-pvc + namespace: s3gw-system +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn-single + resources: + requests: + storage: 2Gi diff --git a/tools/env/s3gw/s3gw-secret.yaml b/tools/env/s3gw/s3gw-secret.yaml new file mode 100644 index 00000000..f3cad209 --- /dev/null +++ b/tools/env/s3gw/s3gw-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: s3gw-secret + namespace: s3gw-system +type: Opaque +stringData: + RGW_DEFAULT_USER_ACCESS_KEY: "0555b35654ad1656d804" + RGW_DEFAULT_USER_SECRET_KEY: "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==" diff --git a/tools/env/s3gw/s3gw-service.yaml b/tools/env/s3gw/s3gw-service.yaml new file mode 100644 index 00000000..8c39769c --- /dev/null +++ b/tools/env/s3gw/s3gw-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: s3gw-service + namespace: s3gw-system +spec: + selector: + app.aquarist-labs.io/name: s3gw + ports: + - protocol: TCP + port: 80 + targetPort: 7480 diff --git a/tools/env/scenarios/default/load-scen.sh b/tools/env/scenarios/default/load-scen.sh new file mode 100755 index 00000000..d726b50a --- /dev/null +++ b/tools/env/scenarios/default/load-scen.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -e + +s3cmd -c ../s3cmd.cfg mb s3://test + +exit 0 diff --git a/tools/env/setup-vm.sh b/tools/env/setup-vm.sh new file mode 100755 index 00000000..a592ce9a --- /dev/null +++ b/tools/env/setup-vm.sh @@ -0,0 +1,114 @@ +#!/bin/sh + +set -e + +export BOX_NAME=${BOX_NAME:-"opensuse/Leap-15.3.x86_64"} +export VM_PROVIDER=${VM_PROVIDER:-"libvirt"} +export VM_NET=${VM_NET:-"10.46.201.0"} +export VM_NET_LAST_OCTET_START=${CLUSTER_NET_LAST_OCTET_START:-"101"} +export VM_BRIDGE_INET=${VM_BRIDGE_INET:-"eth0"} +export ADMIN_COUNT=${ADMIN_COUNT:-"1"} +export WORKER_COUNT=${WORKER_COUNT:-"1"} +export ADMIN_MEM=${ADMIN_MEM:-"4096"} +export ADMIN_CPU=${ADMIN_CPU:-"2"} +export ADMIN_DISK=${ADMIN_DISK:-"no"} +export ADMIN_DISK_SIZE=${ADMIN_DISK_SIZE:-"8G"} +export WORKER_MEM=${WORKER_MEM:-"4096"} +export WORKER_CPU=${WORKER_CPU:-"2"} +export WORKER_DISK=${WORKER_DISK:-"no"} +export WORKER_DISK_SIZE=${WORKER_DISK_SIZE:-"8G"} +export CONTAINER_ENGINE=${CONTAINER_ENGINE:-"podman"} +export STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP:-"no"} +export STOP_AFTER_K3S_INSTALL=${STOP_AFTER_K3S_INSTALL:-"no"} +export S3GW_IMAGE=${S3GW_IMAGE:-"ghcr.io/aquarist-labs/s3gw:latest"} +export S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY:-"Always"} +export PROV_USER=${PROV_USER:-"vagrant"} + +#these defaults will change +export S3GW_UI_REPO=${S3GW_UI_REPO:-"https://github.com/aquarist-labs/aws-s3-explorer.git"} +export S3GW_UI_VERSION=${S3GW_UI_VERSION:-"s3gw-ui-testing"} + +export SCENARIO=${SCENARIO:-"default"} +export K3S_VERSION=${K3S_VERSION:-"v1.23.6+k3s1"} + +start_env() { + echo "Starting environment ..." + echo "WORKER_COUNT=${WORKER_COUNT}" + vagrant up +} + +build_env() { + echo "BOX_NAME=${BOX_NAME}" + echo "VM_PROVIDER=${VM_PROVIDER}" + echo "VM_NET=${VM_NET}" + echo "VM_NET_LAST_OCTET_START=${VM_NET_LAST_OCTET_START}" + echo "VM_BRIDGE_INET=${VM_BRIDGE_INET}" + echo "ADMIN_COUNT=${ADMIN_COUNT}" + echo "WORKER_COUNT=${WORKER_COUNT}" + echo "ADMIN_MEM=${ADMIN_MEM}" + echo "ADMIN_CPU=${ADMIN_CPU}" + echo "ADMIN_DISK=${ADMIN_DISK}" + echo "ADMIN_DISK_SIZE=${ADMIN_DISK_SIZE}" + echo "WORKER_MEM=${WORKER_MEM}" + echo "WORKER_CPU=${WORKER_CPU}" + echo "WORKER_DISK=${WORKER_DISK}" + echo "WORKER_DISK_SIZE=${WORKER_DISK_SIZE}" + echo "CONTAINER_ENGINE=${CONTAINER_ENGINE}" + echo "STOP_AFTER_BOOTSTRAP=${STOP_AFTER_BOOTSTRAP}" + echo "STOP_AFTER_K3S_INSTALL=${STOP_AFTER_K3S_INSTALL}" + echo "S3GW_IMAGE=${S3GW_IMAGE}" + echo "S3GW_IMAGE_PULL_POLICY=${S3GW_IMAGE_PULL_POLICY}" + echo "PROV_USER=${PROV_USER}" + echo "S3GW_UI_REPO=${S3GW_UI_REPO}" + echo "S3GW_UI_VERSION=${S3GW_UI_VERSION}" + echo "SCENARIO=${SCENARIO}" + echo "K3S_VERSION=${K3S_VERSION}" + + echo "Building environment ..." + vagrant up --provision + echo "Built" + + echo "Cleaning ..." + rm -rf ./*.tar + echo "Cleaned" + echo + echo "Connect to admin node with:" + echo "vagrant ssh admin-1" +} + +destroy_env() { + echo "Destroying environment ..." + echo "WORKER_COUNT=${WORKER_COUNT}" + vagrant destroy -f +} + +ssh_vm() { + echo "Connecting to $1 ..." + echo "WORKER_COUNT=${WORKER_COUNT}" + + vagrant ssh $1 +} + +if [ $# -eq 0 ]; then + build_env +elif [ $# -eq 1 ]; then + case $1 in + start) + start_env + ;; + build) + build_env + ;; + destroy) + destroy_env + ;; + esac +else + case $1 in + ssh) + ssh_vm $2 + ;; + esac +fi + +exit 0 diff --git a/tools/env/setup.sh b/tools/env/setup.sh new file mode 100755 index 00000000..cd47184d --- /dev/null +++ b/tools/env/setup.sh @@ -0,0 +1,345 @@ +#!/bin/bash +# setup.sh - setup a k3s cluster with longhorn and s3gw +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +INSTALL_K3S_VERSION=v1.24.7+k3s1 +INSTALL_LONGHORN_VERSION=v1.3.2 + +ghraw="https://raw.githubusercontent.com" +install_s3gw=true +dev_env=false +use_local_image=0 +has_image=false +s3gw_image="ghcr.io/aquarist-labs/s3gw:latest" + +#this will likely change to have defaults as s3gw_image +use_local_image_s3exp=0 +has_image_s3exp=false +s3gw_image_s3exp="ghcr.io/aquarist-labs/s3gw-ui:latest" +longhorn_custom_settings=false + +function info() { + echo "[INFO] $*" >/dev/stdout +} + +function error() { + echo "[ERROR] ${@}" >&2 +} + +function apply() { + desc=${1} + yaml=${2} + + [[ -z "${desc}" || -z "${yaml}" ]] && \ + error "Missing parameters to function apply." && \ + exit 1 + + echo "${desc}" + k3s kubectl apply -f ./${yaml} || ( + error "Failed to create ${desc}." + exit 1 + ) +} + +function wait_ingresses() { + echo -n "Waiting for cluster to become ready..." + ip="" + until [ -n "${ip}" ] + do + echo -n "." && sleep 1; + ip=$(kubectl get -n s3gw-system ingress s3gw-ingress -o 'jsonpath={.status.loadBalancer.ingress[].ip}'); + done +} + +function show_ingresses() { + ip=$(kubectl get -n s3gw-system ingress s3gw-ingress -o 'jsonpath={.status.loadBalancer.ingress[].ip}'); + echo "" + echo "Please add the following line to /etc/hosts to be able to access" + echo "the Longhorn UI and s3gw:" + echo "" + echo "${ip} longhorn.local s3gw.local s3gw-ui.local" + echo "" + echo "Longhorn UI available at: https://longhorn.local" + echo " https://longhorn.local:30443" + echo "s3gw available at: http://s3gw.local" + echo " http://s3gw.local:30080" + echo " https://s3gw.local" + echo " https://s3gw.local:30443" + echo " http://s3gw.local" + echo " http://s3gw.local:30080" + echo "s3gw-ui available at: http://s3gw-ui.local" + echo " http://s3gw-ui.local:30080" + echo " https://s3gw-ui.local" + echo " https://s3gw-ui.local:30443" + echo "" + echo "Note, the UI will not be able to access the RGW AdminOps API when" + # Check https://github.com/aquarist-labs/s3gw/issues/31 to get more + # information about the CORS issues. + echo "using HTTPS and self-signed certificates because of CORS issues." + echo "To workaround that, please open the URL https://s3gw.local in the" + echo "browser and accept the SSL certificate before accessing the UI." + echo "" +} + +function install_on_vm() { + echo "Proceeding to install on a virtual machine..." + WORKER_COUNT=0 + S3GW_IMAGE=$s3gw_image + source ./setup-vm.sh build +} + +function export_local_image() { + info "Checking for local s3gw image..." + img=$(podman images --noheading --sort created s3gw:latest --format '{{.Repository}}:{{.Tag}}' | \ + head -n 1) + + if [[ -z "${img}" ]]; then + error "Unable to find local s3gw image." + exit 1 + fi + + rm -rf ./s3gw.ctr.tar + info "Exporting ${img}..." + podman image save ${img} -o ./s3gw.ctr.tar || ( + error "Failed to export s3gw image." + exit 1 + ) +} + +function import_local_image() { + info "Importing local s3gw container image..." + sudo k3s ctr images import ./s3gw.ctr.tar || ( + error "Failed to import local s3gw image." + exit 1 + ) +} + +function export_local_ui_image() { + info "Checking for local s3gw-ui image..." + img=$(podman images --noheading --sort created s3gw-ui:latest --format '{{.Repository}}:{{.Tag}}' | \ + head -n 1) + + if [[ -z "${img}" ]]; then + error "Unable to find local s3gw-ui image." + exit 1 + fi + + rm -rf ./s3gw-ui.ctr.tar + info "Exporting ${img}..." + podman image save ${img} -o ./s3gw-ui.ctr.tar || ( + error "Failed to export s3gw-ui image." + exit 1 + ) +} + +function import_local_ui_image() { + info "Importing local s3gw-ui container image..." + sudo k3s ctr images import ./s3gw-ui.ctr.tar || ( + error "Failed to import local s3gw-ui image." + exit 1 + ) +} + +function check_longhorn_custom_settings() { + if [[ ! -e "./longhorn-setting.yaml" ]]; then + error "Unable to find longhorn-setting.yaml file." + exit 1 + fi +} + +# https://github.com/mikefarah/yq +function yq() { + podman run --rm -i \ + -e LONGHORN_SETTING="$(cat longhorn-setting.yaml)" \ + -v "${PWD}":/workdir \ + mikefarah/yq "$@" +} + +while [[ $# -gt 0 ]]; do + case $1 in + --dev) + dev_env=true + echo "======================================" + echo " INSTALLING DEVELOPMENT ENVIRONMENT " + echo "======================================" + echo + ;; + --s3gw-image) + s3gw_image=$2 + has_image=true + shift 1 + ;; + --s3gw-image-s3exp) + s3gw_image_s3exp=$2 + has_image_s3exp=true + shift 1 + ;; + --show-ingresses) + show_ingresses + exit 0 + ;; + --vm) + install_on_vm + exit 0 + ;; + --import-local-image) + export_local_image + import_local_image + exit 0 + ;; + --import-local-ui-image) + export_local_ui_image + import_local_ui_image + exit 0 + ;; + --no-s3gw) + install_s3gw=false + ;; + --longhorn-custom-settings) + check_longhorn_custom_settings + longhorn_custom_settings=true + ;; + *) + error "Unknown argument '${1}'" + exit 1 + ;; + esac + shift +done + +if $install_s3gw ; then + if [[ -z "${s3gw_image}" ]]; then + error "s3gw image not provided" + exit 1 + fi + + if $dev_env ; then + if [[ ! -e "./s3gw.ctr.tar" ]]; then + export_local_image + fi + use_local_image=1 + ! $has_image && s3gw_image="localhost/s3gw:latest" + echo "Using local s3gw image '${s3gw_image}'." + fi + + if [[ -z "${s3gw_image_s3exp}" ]]; then + error "s3gw-ui image not provided" + exit 1 + fi + + if $dev_env ; then + if [[ ! -e "./s3gw-ui.ctr.tar" ]]; then + export_local_ui_image + fi + use_local_image_s3exp=1 + ! $has_image_s3exp && s3gw_image_s3exp="localhost/s3gw-ui:latest" + echo "Using local s3gw-ui image '${s3gw_image_s3exp}'." + fi +fi + +if k3s --version >&/dev/null ; then + error "K3s already installed, we won't proceed." + exit 0 +fi + +echo "Installing K3s..." +curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${INSTALL_K3S_VERSION} sh -s - --write-kubeconfig-mode 644 || ( + error "Failed to install K3s." + exit 1 +) + +# https://longhorn.io/docs/1.3.2/deploy/install/#installing-open-iscsi +echo "Installing iscsi..." +k3s kubectl apply \ + -f ${ghraw}/longhorn/longhorn/${INSTALL_LONGHORN_VERSION}/deploy/prerequisite/longhorn-iscsi-installation.yaml || ( + error "Failed to install iscsi." + exit 1 +) + +echo "Installing Longhorn..." +if $longhorn_custom_settings ; then + curl -s ${ghraw}/longhorn/longhorn/${INSTALL_LONGHORN_VERSION}/deploy/longhorn.yaml | \ + yq 'select(.kind == "ConfigMap" and .metadata.name == "longhorn-default-setting").data = env(LONGHORN_SETTING)' | \ + yq 'select(.kind != null)' | \ + k3s kubectl apply -f - || ( + error "Failed to install Longhorn." + exit 1 + ) +else + k3s kubectl apply \ + -f ${ghraw}/longhorn/longhorn/${INSTALL_LONGHORN_VERSION}/deploy/longhorn.yaml || ( + error "Failed to install Longhorn." + exit 1 + ) +fi + +if $install_s3gw ; then + if [ ${use_local_image} -eq 1 ]; then + import_local_image + else + echo "Pulling s3gw container image..." + sudo k3s ctr images pull ${s3gw_image} || ( + error "Failed to pull s3gw image ${s3gw_image}." + exit 1 + ) + fi + + if [ ${use_local_image_s3exp} -eq 1 ]; then + import_local_ui_image + else + echo "Pulling s3gw-ui container image..." + sudo k3s ctr images pull ${s3gw_image_s3exp} || ( + error "Failed to pull s3gw-ui image ${s3gw_image_s3exp}." + exit 1 + ) + fi +fi + +# Workaround a K8s behaviour that CustomResourceDefinition must be +# established before they can be used by a resource. +# https://github.com/kubernetes/kubectl/issues/1117 +# k3s kubectl wait --for=condition=established --timeout=60s crd middlewares.traefik.containo.us +echo -n "Waiting for CRD to be established..." +while [[ $(kubectl get crd middlewares.traefik.containo.us -o 'jsonpath={..status.conditions[?(@.type=="Established")].status}' 2>/dev/null) != "True" ]]; do + echo -n "." && sleep 1; +done +echo + +if $install_s3gw ; then + s3gw_yaml="s3gw.yaml" + $dev_env && s3gw_yaml="s3gw-dev.yaml" + + if [[ -e ${s3gw_yaml} ]]; then + apply "Installing s3gw from spec file at '${s3gw_yaml}'..." ${s3gw_yaml} + elif [[ -e "generate-spec.sh" ]]; then + extra="" + $dev_env && extra="--dev" + echo "Generating s3gw spec file at '${s3gw_yaml}'..." + ./generate-spec.sh --output ${s3gw_yaml} ${extra} --ingress ${ingress} + apply "Installing s3gw from spec file at '${s3gw_yaml}'..." ${s3gw_yaml} + else + echo "Installing s3gw..." + k3s kubectl apply \ + -f ${ghraw}/aquarist-labs/s3gw-tools/main/env/s3gw.yaml || ( + error "Failed to install s3gw." + exit 1 + ) + fi + + wait_ingresses + show_ingresses +fi diff --git a/tools/scripts/patch-s3gw-deployment.sh b/tools/scripts/patch-s3gw-deployment.sh new file mode 100755 index 00000000..0e43d51b --- /dev/null +++ b/tools/scripts/patch-s3gw-deployment.sh @@ -0,0 +1,238 @@ +#!/bin/bash + +# This script should be used while doing development on s3gw's radosgw binaries. +# By calling this script, we patch the s3gw deployment inside k8s as this: +# +# 1) A base image is used, ghcr.io/aquarist-labs/s3gw:latest (default). +# 2) We create a PVC and a helper copier pod that mounts that PVC. +# 3) We copy the built binaries on the PVC by calling an equivalent `kubectl cp` command +# on the copier pod. +# +# 4) We mount the same PVC on the s3gw pod at the location where +# the binaries are expected (/radosgw). +# +# Patching the deployment forces the s3gw pod to restart with the new binaries in place. + +set -e +timeout=120s +clean_data=false + +RADOSGW_BUILD_PATH="${RADOSGW_BUILD_PATH:-"./build"}" +S3GW_DEPLOYMENT_NS="${S3GW_DEPLOYMENT_NS:-"default"}" +S3GW_DEPLOYMENT_BI="${S3GW_DEPLOYMENT_BI:-"ghcr.io/aquarist-labs/s3gw:latest"}" +PVC_STORAGE_CLASS="${PVC_STORAGE_CLASS:-"longhorn"}" + +error() { + echo "error: $*" >/dev/stderr +} + +usage() { + cat << EOF +usage: $0 CMD [args...] + +options + --clean-data Specifies whether delete the /data content in the s3gw pod. + +env variables + RADOSGW_BUILD_PATH Specifies the Ceph output build directory. + S3GW_DEPLOYMENT_NS Specifies the s3gw namespace in Kubernetes. + S3GW_DEPLOYMENT_BI Specifies the s3gw image to be used when patching the s3gw deployment. + PVC_STORAGE_CLASS Specifies the storage class to be used for the radosgw-binary PVC. + +EOF +} + +while [[ $# -gt 0 ]]; do + case $1 in + --clean-data) + clean_data=true + shift 1 + ;; + *) + usage + exit 1 + ;; + esac +done + +echo +echo Deployment patch configuration +echo " - binaries location: ${RADOSGW_BUILD_PATH}" +echo " - s3gw namespace: ${S3GW_DEPLOYMENT_NS}" +echo " - s3gw base image: ${S3GW_DEPLOYMENT_BI}" +echo " - pvc storage class: ${PVC_STORAGE_CLASS}" +echo " - Cleaning s3gw data: ${clean_data}" +echo + +items=( + ${RADOSGW_BUILD_PATH}/bin/radosgw + ${RADOSGW_BUILD_PATH}/lib/libceph-common.so + ${RADOSGW_BUILD_PATH}/lib/libceph-common.so.2 + ${RADOSGW_BUILD_PATH}/lib/libradosgw.so + ${RADOSGW_BUILD_PATH}/lib/libradosgw.so.2 + ${RADOSGW_BUILD_PATH}/lib/libradosgw.so.2.0.0 + ${RADOSGW_BUILD_PATH}/lib/librados.so + ${RADOSGW_BUILD_PATH}/lib/librados.so.2 + ${RADOSGW_BUILD_PATH}/lib/librados.so.2.0.0 +) + +TAR_ITEMS="" +for item in ${items[@]}; do + TAR_ITEMS=$TAR_ITEMS$item" " +done + +echo "Creating the radosgw-binary PVC..." +cat </dev/stderr +} + +usage() { + cat << EOF +usage: $0 LOGFILE [options] + +mandatory: + --branch NAME Name of aquarist-labs/ceph.git branch. + --pr ID Pull Request ID from aquarist-labs/ceph.git. + --sha STRING SHA256 of tested commit from tested branch. + + One of --branch or --pr must be specified, but never the two at the + same time. + +options: + --help|-h This message + --user NAME User generating this report (default: ${USER}). + --publish Publish the result to the 's3gw-status' repository. + --output|-o Output file + +example: + $ $0 s3gw-s3test-2022-06-03-205212-1ldv/s3gw-s3tests.log \\ + --branch wip-foo \\ + --sha 123aadsfsdf3244 \\ + --user joao + +NOTE: + + When using the '--publish' option, a clone of 's3gw-status.git' will be + required in the same directory from which this script is run. We recommend + configuring this repository prior to using this option to ensure properly + signed commits. + +EOF +} + +res_errors=() +res_fails=() +res_okay=() +resfile= +branch= +prid= +sha= +user=${USER} +publish=false + +posargs=() +while [[ $# -gt 0 ]]; do + + case ${1} in + --help|-h) + usage + exit 0 + ;; + --branch) + branch="${2}" + shift 1 + ;; + --pr) + prid="${2}" + shift 1 + ;; + --sha) + sha="${2}" + shift 1 + ;; + --user) + user="${2}" + shift 1 + ;; + --publish) + publish=true + ;; + --output|-o) + outfn="${2}" + shift 1 + ;; + *) + posargs=(${posargs[@]} ${1}) + ;; + esac + shift 1 + +done + +resfile=${posargs[0]} + +if [[ -z "${resfile}" ]]; then + error "error: results file not provided." + usage + exit 1 + +elif [[ -z "${branch}" && -z "${prid}" ]]; then + error "error: neither --branch nor --pr specified." + exit 1 + +elif [[ -n "${branch}" && -n "${prid}" ]]; then + error "error: both --branch and --pr specified." + exit 1 + +elif [[ -z "${sha}" ]]; then + error "error: --sha not specified." + exit 1 +fi + +if $publish ; then + + if [[ ! -d "s3gw-status.git" ]]; then + cat <>${outfn} <>${outfn} <>${outfn} <>${outfn} <>${outfn} <>${outfn} <>${outfn} < ${s3cfg} << EOF +[default] +access_key = test +secret_key = test +host_base = ${url}/ +host_bucket = ${url}/%(bucket) +signurl_use_https = False +use_https = False +signature_v2 = True +signurl_use_https = False +EOF + +pushd ${testpath} + +# Please note: rgw will refuse bucket names with upper case letters. +# This is due to amazon s3's bucket naming restrictions. +# See: +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html +# +bucket="s3gw-test-$(tr -dc a-z0-9 < /dev/urandom | head -c 4)" + +s3 ls s3:// || exit 1 +s3 mb s3://${bucket} || exit 1 +s3 ls s3://${bucket} || exit 1 +s3 ls s3://${bucket}-dne && exit 1 + +dd if=/dev/random bs=1k count=1k of=obj1.bin || exit 1 +dd if=/dev/random bs=1k count=2k of=obj2.bin || exit 1 + +s3 put obj1.bin s3://${bucket}/ || exit 1 +s3 put obj1.bin s3://${bucket}/obj1.bin || exit 1 +s3 put obj1.bin s3://${bucket}/obj1.bin.2 || exit 1 +s3 put obj1.bin s3://${bucket}/my/obj1.bin || exit 1 +s3 get s3://${bucket}/obj1.bin obj1.bin.local || exit 1 +orig_md5=$(md5sum -b obj1.bin | cut -f1 -d' ') +down_md5=$(md5sum -b obj1.bin.local | cut -f1 -d' ') + +[[ "${orig_md5}" == "${down_md5}" ]] || exit 1 + +s3 get s3://${bucket}/does-not-exist.bin && exit 1 + +must_have=("obj1.bin" "obj1.bin.2" "my/obj1.bin") +ifs_old=$IFS +IFS=$'\n' +lst=($(s3 ls s3://${bucket})) + +[[ ${#lst[@]} -eq 3 ]] || exit 1 +for what in ${must_have[@]} ; do + found=false + for e in ${lst[@]}; do + r=$(echo $e | grep "s3://${bucket}/${what}$") + [[ -n "${r}" ]] && found=true && break + done + $found || exit 1 +done + +s3 rm s3://${bucket}/obj1.bin.2 || exit 1 + +s3 put obj2.bin s3://${bucket}/obj2.bin || exit 1 +s3 put obj1.bin s3://${bucket}/obj2.bin || exit 1 +s3 get s3://${bucket}/obj2.bin obj2.bin.local || exit 1 +md5_before=$(md5sum -b obj2.bin | cut -f1 -d' ') +md5_after=$(md5sum -b obj2.bin.local | cut -f1 -d' ') +md5_expected=$(md5sum -b obj1.bin | cut -f1 -d' ') + +[[ "${md5_before}" != "${md5_after}" ]] || exit 1 +[[ "${md5_after}" == "${md5_expected}" ]] || exit 1 + +md5_obj1=$(md5sum -b obj1.bin | cut -f1 -d' ') + +do_copy() { + dst_bucket=$1 + + # For now this operation fails. While the copy actually succeeds, s3cmd then + # tries to perform an ACL operation on the bucket/object, and that fails. + # We need to ensure the object is there instead, and check it matches in + # contents. + s3 cp s3://${bucket}/obj1.bin s3://${dst_bucket}/obj1.bin.copy || true + s3 get s3://${dst_bucket}/obj1.bin.copy obj1.bin.copy.${dst_bucket} || exit 1 + + md5_copy=$(md5sum -b obj1.bin.copy.${dst_bucket} | cut -f1 -d' ') + [[ "${md5_copy}" == "${md5_obj1}" ]] || exit 1 + + if ! s3 ls s3://${dst_bucket} | grep -q obj1.bin.copy ; then + exit 1 + fi +} + +# copy from $bucket/obj to $bucket/obj.copy +do_copy ${bucket} + +# copy from $bucket/obj to $newbucket/obj.copy +newbucket="${bucket}-2" +s3 mb s3://${newbucket} || exit 1 +do_copy ${newbucket} + +# delete the bucket contents +s3 del --recursive --force s3://${bucket} + +# list the bucket, it should be empty +lst=($(s3 ls s3://${bucket})) +[[ ${#lst[@]} -eq 0 ]] || exit 1 + +# remove the bucket +s3 rb s3://${bucket} + +# should no longer be available +s3 ls s3://${bucket} && exit 1 + +exit 0 diff --git a/tools/tests/s3gw-test.sh b/tools/tests/s3gw-test.sh new file mode 100755 index 00000000..6b6cd32e --- /dev/null +++ b/tools/tests/s3gw-test.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +img="s3gw:latest" + + +error() { + echo "error: $*" >/dev/stderr +} + + +usage() { + + cat < "${OUTPUT_DIR}/logs/${test}/radosgw.log" 2>&1 & + JOB="$!" + + # sleep until s3gw has spun up + while ! curl -s localhost:7480 > /dev/null ; do sleep .1 ; done + fi + + pushd "${S3TEST_REPO}" > /dev/null || exit 1 +} + + +_run() { + local test="$1" + local result= + local name ; name="$(echo "$test" | cut -d ':' -f 2)" + + _setup "$test" + + # this is needed for nosetests + export S3TEST_CONF + if nosetests \ + -c "${S3TEST_CONF}" \ + -s \ + -a '!fails_on_rgw,!lifecycle_expiration,!fails_strict_rfc2616' \ + "$test" > "${OUTPUT_DIR}/logs/${test}/test.output" 2>&1 ; then + result="success" + else + result="failure" + fi + + echo "$test : $result" + + yq -i \ + ".tests += [{\"name\": \"${name}\", \"result\": \"${result}\"}]" \ + "${TMPFILE}" + _teardown +} + + +_teardown() { + if [ -n "$CONTAINER" ] ; then + podman kill "$CONTAINER" + else + kill "$JOB" + rm -rf "${TMPDIR}" + fi + + popd > /dev/null || exit 1 +} + + +_convert() { + yq -o=json '.' "${TMPFILE}" > "${OUTPUT_FILE}" + rm "${TMPFILE}" +} + + +_main() { + [ -d "${OUTPUT_DIR}" ] || mkdir -p "${OUTPUT_DIR}" + [ -d "${OUTPUT_DIR}/logs" ] || mkdir -p "${OUTPUT_DIR}/logs" + + TMPFILE="$(mktemp -q -p "${OUTPUT_DIR}" report.XXXXXX.ymal)" + [ -f "${TMPFILE}" ] || echo "tests:" > "${TMPFILE}" + + if [ -n "$1" ] ; then + _run "$1" + else + while read -r test ; do + _run "$test" + done < <( grep -v '#' "$S3TEST_LIST" ) + fi + + _convert +} + + +_main "$@" diff --git a/tools/tests/test-s3gw-buckets-rest-api.py b/tools/tests/test-s3gw-buckets-rest-api.py new file mode 100755 index 00000000..1e52748d --- /dev/null +++ b/tools/tests/test-s3gw-buckets-rest-api.py @@ -0,0 +1,38 @@ +import requests, json +import unittest +import sys +from awsauth import S3Auth +from datetime import datetime + +class UserRestAPITests(unittest.TestCase): + ACCESS_KEY='test' + SECRET_KEY='test' + URL='http://127.0.0.1:7480' + + def setUp(self): + self.auth = S3Auth(UserRestAPITests.ACCESS_KEY, UserRestAPITests.SECRET_KEY, self.URL) + + def test_smoke_test(self): + dt = datetime.now() + ts = datetime.timestamp(dt) + bucket_name = "foo." + str(ts) + + # add a bucket and check the json response for a system user + response = requests.put(self.URL + "/" + bucket_name, auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertIsInstance(json_response, dict) + self.assertIsInstance(json_response["bucket_info"], dict) + self.assertIsInstance(json_response["bucket_info"]["bucket"], dict) + self.assertNotEqual("", json_response["bucket_info"]["bucket"]["bucket_id"]) + self.assertEqual(json_response["bucket_info"]["bucket"]["bucket_id"], json_response["bucket_info"]["bucket"]["marker"]) + self.assertEqual(bucket_name, json_response["bucket_info"]["bucket"]["name"]) + self.assertNotEqual("", json_response["bucket_info"]["creation_time"]) + +if __name__ == "__main__": + if len(sys.argv) == 2: + address_port = sys.argv.pop() + UserRestAPITests.URL = 'http://{0}'.format(address_port) + unittest.main() + else: + print ("usage: {0} ADDRESS:PORT".format(sys.argv[0])) diff --git a/tools/tests/test-s3gw-multipart.py b/tools/tests/test-s3gw-multipart.py new file mode 100755 index 00000000..c41558c2 --- /dev/null +++ b/tools/tests/test-s3gw-multipart.py @@ -0,0 +1,445 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from pathlib import Path +import string +from typing import Any, Dict, List, Tuple +import unittest +import boto3, boto3.s3.transfer +import random +import tempfile +from pydantic import BaseModel +import hashlib + +ACCESS_KEY = "test" +SECRET_KEY = "test" +URL = "http://127.0.0.1:7480" + +BUCKET_NAME_LEN = 8 +OBJECT_NAME_LEN = 10 +UPLOAD_ID_LEN = 12 + + +class MultipartPart(BaseModel): + path: Path + size: int + md5: str + + +class MultipartFile(BaseModel): + parts: List[MultipartPart] + object: Path + md5: str + + +class MultipartUploadSmokeTests(unittest.TestCase): + + buckets: List[str] + + def setUp(self) -> None: + self.s3 = boto3.resource( # type: ignore + "s3", + endpoint_url=URL, + aws_access_key_id=ACCESS_KEY, + aws_secret_access_key=SECRET_KEY, + ) + self.s3c = boto3.client( # type: ignore + "s3", + endpoint_url=URL, + aws_access_key_id=ACCESS_KEY, + aws_secret_access_key=SECRET_KEY, + ) + self.testdir = tempfile.TemporaryDirectory() + self.testpath = Path(self.testdir.name) + self.buckets = [] + + def tearDown(self) -> None: + # cleanup buckets + for name in self.buckets: + bucket = self.s3.Bucket(name) + bucket.objects.delete() + bucket.delete() + + self.s3c.close() + self.s3.meta.client.close() + self.testdir.cleanup() + + def create_bucket(self) -> str: + name = self.get_random_bucket_name() + self.s3c.create_bucket(Bucket=name) + self.assert_bucket_exists(name) + assert name not in self.buckets + self.buckets.append(name) + return name + + def get_random_name(self, len: int) -> str: + return "".join( + random.choice(string.ascii_lowercase) for _ in range(len) + ) + + def get_random_bucket_name(self) -> str: + return self.get_random_name(BUCKET_NAME_LEN) + + def get_random_object_name(self) -> str: + return self.get_random_name(OBJECT_NAME_LEN) + + def get_random_upload_id(self) -> str: + return self.get_random_name(UPLOAD_ID_LEN) + + def gen_multipart( + self, objname: str, size: int, partsize: int + ) -> MultipartFile: + + path: Path = self.testpath / objname + parts_lst: List[MultipartPart] = [] + + nparts = int(size / partsize) + last_part_size = size % partsize + if last_part_size > 0: + nparts += 1 + + out_full = path.open("wb") + full_md5 = hashlib.md5() + + for i in range(nparts): + partfile: Path = Path(f"{path}.part.{i}") + s = partsize + if last_part_size > 0 and i == nparts - 1: + s = last_part_size + data = os.urandom(s) + with partfile.open("wb") as out_part: + out_part.write(data) + out_full.write(data) + md5 = hashlib.md5(data) + full_md5.update(data) + parts_lst.append( + MultipartPart(path=partfile, size=s, md5=md5.hexdigest()) + ) + + out_full.close() + return MultipartFile( + parts=parts_lst, object=path, md5=full_md5.hexdigest() + ) + + def gen_random_file(self, objname: str, size: int) -> Tuple[Path, str]: + path: Path = self.testpath / objname + data = os.urandom(size) + with path.open("wb") as outfd: + outfd.write(data) + md5 = hashlib.md5(data) + return path, md5.hexdigest() + + def assert_bucket_exists(self, name: str) -> None: + res = self.s3c.list_buckets() + found = False + for b in res["Buckets"]: + if "Name" in b and b["Name"] == name: + found = True + break + self.assertTrue(found) + + def test_dne_upload_multipart(self): + bucket_name = self.create_bucket() + objname = self.get_random_object_name() + upload_id = self.get_random_upload_id() + upload = self.s3.MultipartUpload(bucket_name, objname, upload_id) + part = upload.Part(1) # type: ignore + has_error = False + try: + part.upload(Body=b"foobarbaz") + except self.s3.meta.client.exceptions.NoSuchUpload: + has_error = True + + self.assertTrue(has_error) + self.s3c.delete_bucket(Bucket=bucket_name) + return + + def test_multipart_upload_download(self): + bucket_name = self.create_bucket() + objname = self.get_random_object_name() + objsize = 100 * 1024**2 # 100 MB + objpath, md5 = self.gen_random_file(objname, objsize) + + cfg = boto3.s3.transfer.TransferConfig( + multipart_threshold=10 * 1024, # 10 MB + max_concurrency=10, + multipart_chunksize=10 * 1024**2, # 10 MB + use_threads=True, + ) + + obj = self.s3.Object(bucket_name, objname) + obj.upload_file(objpath.as_posix(), Config=cfg) + + downobj = self.testpath / f"{objname}.down.bin" + obj.download_file(downobj.as_posix(), Config=cfg) + + with downobj.open("rb") as fd: + down_md5 = hashlib.md5(fd.read()) + + self.assertTrue(down_md5.hexdigest() == md5) + + def test_upload_multipart_manual(self): + bucket_name = self.create_bucket() + objname = self.get_random_object_name() + objsize = 100 * 1024**2 # 100 MB + partsize = 10 * 1024**2 # 10 MB + mp = self.gen_multipart(objname, objsize, partsize) + print( + f"generated multipart upload, bucket: {bucket_name}, " + f"obj: {objname}, path: {mp.object}" + ) + + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + + parts_lst: List[Dict[str, Any]] = [] + upload = self.s3.MultipartUpload(bucket_name, objname, res["UploadId"]) + part_num = 1 + for part_entry in mp.parts: + part = upload.Part(part_num) # type: ignore + sz = part_entry.path.stat().st_size + print( + f"upload part {part_num}/{len(mp.parts)}, " + f"md5: {part_entry.md5}, size: {sz}" + ) + with part_entry.path.open("rb") as fd: + res = part.upload(Body=fd.read()) + self.assertTrue("ETag" in res) + etag = res["ETag"] + print(f"uploaded part {part_num}/{len(mp.parts)}, etag: {etag}") + parts_lst.append({"ETag": etag, "PartNumber": part_num}) + part_num += 1 + + print(f"parts_lst: {parts_lst}") + upload.complete(MultipartUpload={"Parts": parts_lst}) # type: ignore + + cfg = boto3.s3.transfer.TransferConfig( + multipart_threshold=10 * 1024, + max_concurrency=10, + multipart_chunksize=10 * 1024**2, + use_threads=True, + ) + downobj = self.testpath / f"{objname}.down.bin" + print(f"download object to {str(downobj)}") + + with downobj.open("wb") as fd: + self.s3c.download_fileobj(bucket_name, objname, fd, Config=cfg) + self.assertTrue(downobj.exists()) + self.assertTrue(downobj.is_file()) + + with downobj.open("rb") as fd: + md5 = hashlib.md5(fd.read()) + + sz = downobj.stat().st_size + orig_sz = mp.object.stat().st_size + print(f"expected md5: {mp.md5}, size: {orig_sz}") + print(f" got md5: {md5.hexdigest()}, size: {sz}") + with mp.object.open("rb") as fd: + md5_2 = hashlib.md5(fd.read()) + print(f"actual md5: {md5_2.hexdigest()}") + self.assertTrue(md5.hexdigest() == mp.md5) + + def test_list_ongoing_parts(self): + bucket_name = self.create_bucket() + objname = self.get_random_object_name() + objsize = 100 * 1024**2 # 100 MB + partsize = 10 * 1024**2 # 10 MB + mp = self.gen_multipart(objname, objsize, partsize) + print( + f"generated multipart upload, bucket: {bucket_name}, " + f"obj: {objname}, path: {mp.object}" + ) + self.assertTrue(len(mp.parts) == 10) + + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + upload_id = res["UploadId"] + + res = self.s3c.list_parts( + Bucket=bucket_name, Key=objname, UploadId=upload_id + ) + if "Parts" in res: + # the "Parts" entry may or may not be present if the multipart + # upload has zero parts. + self.assertTrue(len(res["Parts"]) == 0) + + # upload a part + upload = self.s3.MultipartUpload(bucket_name, objname, res["UploadId"]) + part = upload.Part(1) # type: ignore + part_entry = mp.parts[0] + part_size = part_entry.path.stat().st_size + with part_entry.path.open("rb") as fd: + res = part.upload(Body=fd.read()) + self.assertTrue("ETag" in res) + etag = res["ETag"] + + # check part is listed + res = self.s3c.list_parts( + Bucket=bucket_name, Key=objname, UploadId=upload_id + ) + self.assertTrue("Parts" in res) + self.assertTrue(len(res["Parts"]) == 1) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + res_part = res["Parts"][0] + self.assertTrue( + "PartNumber" in res_part and res_part["PartNumber"] == 1 + ) + self.assertTrue("ETag" in res_part and res_part["ETag"] == etag) + self.assertTrue("Size" in res_part and res_part["Size"] == part_size) + + def test_list_multipart_uploads(self): + bucket_name = self.create_bucket() + + # we need known object names so we can have deterministic results later + # on when obtaining the multiparts list. + objname = "aaaa" + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + if "Uploads" in res: + # "Uploads" may or may not be present if there are zero multipart + # uploads in progress. + self.assertTrue(len(res["Uploads"]) == 0) + + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + upload_id = res["UploadId"] + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + entry = res["Uploads"][0] + self.assertTrue("UploadId" in entry and entry["UploadId"] == upload_id) + + # what about if we have a limit on the number of uploads returned? + objname2 = "bbbb" + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname2) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + upload_id2 = res["UploadId"] + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name, MaxUploads=1) + self.assertTrue("IsTruncated" in res and res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + self.assertTrue( + "NextUploadIdMarker" in res + and res["NextUploadIdMarker"] == upload_id + ) + self.assertTrue( + "NextKeyMarker" in res and res["NextKeyMarker"] == objname + ) + entry = res["Uploads"][0] + self.assertTrue("UploadId" in entry and entry["UploadId"] == upload_id) + + res = self.s3c.list_multipart_uploads( + Bucket=bucket_name, MaxUploads=1, KeyMarker=objname + ) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + self.assertTrue( + "NextUploadIdMarker" in res + and res["NextUploadIdMarker"] == upload_id2 + ) + self.assertTrue( + "NextKeyMarker" in res and res["NextKeyMarker"] == objname2 + ) + entry = res["Uploads"][0] + self.assertTrue("UploadId" in entry and entry["UploadId"] == upload_id2) + + def test_abort_multipart_upload(self): + bucket_name = self.create_bucket() + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + if "Uploads" in res: + # "Uploads" may or may not be present if there are zero multipart + # uploads in progress. + self.assertTrue(len(res["Uploads"]) == 0) + + objname = "aaaa" + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + upload_id = res["UploadId"] + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + entry = res["Uploads"][0] + self.assertTrue("UploadId" in entry and entry["UploadId"] == upload_id) + + # doesn't return relevant information + self.s3c.abort_multipart_upload( + Bucket=bucket_name, Key=objname, UploadId=upload_id + ) + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + if "Uploads" in res: + self.assertTrue(len(res["Uploads"]) == 0) + + upload = self.s3.MultipartUpload(bucket_name, objname, upload_id) + part = upload.Part(1) # type: ignore + has_error = False + try: + part.upload(Body=f"foobarbaz") + except self.s3.meta.client.exceptions.NoSuchUpload: + has_error = True + self.assertTrue(has_error) + + # XXX: this is likely a bug, we should not have a multipart upload if + # it was not created via 'create_multipart_upload()', because it was + # not inited in the backend. + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + + res = self.s3c.create_multipart_upload(Bucket=bucket_name, Key=objname) + self.assertTrue("UploadId" in res) + self.assertTrue(len(res["UploadId"]) > 0) + self.assertNotEqual(upload_id, res["UploadId"]) + upload_id2 = res["UploadId"] + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 2) + + # doesn't return relevant information + self.s3c.abort_multipart_upload( + Bucket=bucket_name, Key=objname, UploadId=upload_id2 + ) + + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + self.assertTrue("IsTruncated" in res and not res["IsTruncated"]) + self.assertTrue("Uploads" in res and len(res["Uploads"]) == 1) + + # ensure bucket is removed + self.s3c.delete_bucket(Bucket=bucket_name) + + # ensure there are no more multiparts for this bucket, which should be + # the case since we expect the bucket to not exist. + has_error = False + try: + res = self.s3c.list_multipart_uploads(Bucket=bucket_name) + # we should never reach this point + print(f"oops! res = {res}") + except self.s3.meta.client.exceptions.NoSuchBucket: + has_error = True + self.assertTrue(has_error) diff --git a/tools/tests/test-s3gw-users-rest-api.py b/tools/tests/test-s3gw-users-rest-api.py new file mode 100755 index 00000000..7d94739e --- /dev/null +++ b/tools/tests/test-s3gw-users-rest-api.py @@ -0,0 +1,80 @@ +import requests, json +import unittest +import sys +from awsauth import S3Auth + +class UserRestAPITests(unittest.TestCase): + ACCESS_KEY='test' + SECRET_KEY='test' + URL='http://127.0.0.1:7480' + + def setUp(self): + self.auth = S3Auth(UserRestAPITests.ACCESS_KEY, UserRestAPITests.SECRET_KEY, self.URL) + + def test_smoke_test(self): + # list users using the metadata endpoint. + response = requests.get(self.URL + '/admin/metadata/user', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertIsInstance(json_response, list) + self.assertIn("testid", json_response) + + # list users (we should get only testid (user created at startup)) + response = requests.get(self.URL + '/admin/user?list', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertIsInstance(json_response, dict) + self.assertIsInstance(json_response["keys"], list) + self.assertEqual(1, len(json_response["keys"])) + self.assertIn("testid", json_response["keys"]) + + # add a user + response = requests.put(self.URL + '/admin/user?uid=user2&display-name=TEST+NAME', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertEqual("user2", json_response["user_id"]) + self.assertEqual("TEST NAME", json_response["display_name"]) + keys = json_response["keys"] + self.assertEqual(1, len(keys)) + self.assertEqual("user2", keys[0]["user"]) + self.assertNotEqual("", keys[0]["access_key"]) + self.assertNotEqual("", keys[0]["secret_key"]) + + # get info new user + response = requests.get(self.URL + '/admin/user?uid=user2', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertEqual("user2", json_response["user_id"]) + self.assertEqual("TEST NAME", json_response["display_name"]) + keys = json_response["keys"] + self.assertEqual(1, len(keys)) + self.assertEqual("user2", keys[0]["user"]) + self.assertNotEqual("", keys[0]["access_key"]) + self.assertNotEqual("", keys[0]["secret_key"]) + + # list users (we should get testid and user2) + response = requests.get(self.URL + '/admin/user?list', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertEqual(2, len(json_response["keys"])) + self.assertEqual("testid", json_response["keys"][0]) + self.assertEqual("user2", json_response["keys"][1]) + + # delete user2 + response = requests.delete(self.URL + '/admin/user?uid=user2', auth=self.auth) + self.assertEqual(200, response.status_code) + + # list users (we should get only testid) + response = requests.get(self.URL + '/admin/user?list', auth=self.auth) + self.assertEqual(200, response.status_code) + json_response = json.loads(response.content) + self.assertEqual(1, len(json_response["keys"])) + self.assertEqual("testid", json_response["keys"][0]) + +if __name__ == "__main__": + if len(sys.argv) == 2: + address_port = sys.argv.pop() + UserRestAPITests.URL = 'http://{0}'.format(address_port) + unittest.main() + else: + print ("usage: {0} ADDRESS:PORT".format(sys.argv[0])) diff --git a/tools/tests/test-s3gw-versioning-smoke.py b/tools/tests/test-s3gw-versioning-smoke.py new file mode 100755 index 00000000..a981fc05 --- /dev/null +++ b/tools/tests/test-s3gw-versioning-smoke.py @@ -0,0 +1,283 @@ +# Copyright 2022 SUSE, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import sys +import boto3, botocore +import random +import string +import tempfile +import os +import filecmp + +class VersioningSmokeTests(unittest.TestCase): + ACCESS_KEY='test' + SECRET_KEY='test' + URL='http://127.0.0.1:7480' + BUCKET_NAME_LENGTH=8 + OBJECT_NAME_LENGTH=10 + + def setUp(self): + self.s3_client = boto3.client('s3', + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test") + + self.s3 = boto3.resource('s3', + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test") + + self.test_dir = tempfile.TemporaryDirectory() + + def tearDown(self): + self.s3_client.close() + self.test_dir.cleanup() + + def get_random_name(self, length) -> str: + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def get_random_bucket_name(self) -> str: + return self.get_random_name(VersioningSmokeTests.BUCKET_NAME_LENGTH) + + def get_random_object_name(self) -> str: + return self.get_random_name(VersioningSmokeTests.OBJECT_NAME_LENGTH) + + def generate_random_file(self, path, size=4): + # size passed is in mb + size = size * 1024 * 1024 + with open(path, 'wb') as fout: + fout.write(os.urandom(size)) + + def assert_bucket_exists(self, bucket_name): + response = self.s3_client.list_buckets() + found = False + for bucket in response['Buckets']: + if (bucket["Name"] == bucket_name): + found = True + self.assertTrue(found) + + def test_create_bucket_enable_versioning(self): + bucket_name = self.get_random_bucket_name() + self.s3_client.create_bucket(Bucket=bucket_name) + self.assert_bucket_exists(bucket_name) + # ensure versioning is disabled (default) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) + self.assertFalse('Status' in response) + response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, + VersioningConfiguration={ + 'MFADelete': 'Disabled', + 'Status': 'Enabled'}) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertTrue('Status' in response) + self.assertEqual('Enabled', response['Status']) + + def test_put_objects_versioning_enabled(self): + bucket_name = self.get_random_bucket_name() + self.s3_client.create_bucket(Bucket=bucket_name) + self.assert_bucket_exists(bucket_name) + response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, + VersioningConfiguration={ + 'MFADelete': 'Disabled', + 'Status': 'Enabled'}) + object_name = self.get_random_object_name() + test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + self.generate_random_file(test_file_path_1) + # upload the file + self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) + + # get the file and compare with the original + test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) + self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + + # now upload again with different content + test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + self.generate_random_file(test_file_path_2) + self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) + test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) + self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + + # get etag of object + response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) + self.assertTrue('ETag' in response) + etag = response['ETag'] + + # check that we have 2 versions + # only 1 version should be flagged as the latest + response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) + self.assertTrue('Versions' in response) + self.assertEqual(2, len(response['Versions'])) + num_latest = 0 + last_version_id = '' + previous_version_id = '' + for version in response['Versions']: + self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) + self.assertEqual(object_name, version['Key']) + self.assertEqual('STANDARD', version['StorageClass']) + self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) + self.assertNotEqual('null', version['VersionId']) + if (version['IsLatest']): + num_latest += 1 + last_version_id = version['VersionId'] + self.assertEqual(etag, version['ETag']) + else: + previous_version_id = version['VersionId'] + + # check that all etags differ + for version in response['Versions']: + etag = version['ETag'] + version_id = version['VersionId'] + for version2 in response['Versions']: + version_id2 = version2['VersionId'] + if (version_id2 != version_id): + etag2 = version2['ETag'] + self.assertNotEqual(etag, etag2) + + + self.assertEqual(1, num_latest) + self.assertNotEqual('', last_version_id) + self.assertNotEqual('', previous_version_id) + + # download by version_id + # download the last version + check_version_file = os.path.join(self.test_dir.name, 'check_version.bin') + bucket = self.s3.Bucket(bucket_name) + bucket.download_file( + object_name, + check_version_file, + ExtraArgs={"VersionId": last_version_id}) + self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file, shallow=False)) + + # download the previous version + check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + bucket.download_file( + object_name, + check_version_file_2, + ExtraArgs={"VersionId": previous_version_id}) + self.assertTrue(filecmp.cmp(test_file_path_1, check_version_file_2, shallow=False)) + + # delete the object + self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) + + # check that we have 2 versions plus 1 DeleteMarker + # only 1 version should be flagged as the latest + response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) + self.assertTrue('Versions' in response) + self.assertEqual(2, len(response['Versions'])) + + num_latest = 0 + deleted_version_id = '' + for version in response['Versions']: + self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) + self.assertEqual(object_name, version['Key']) + self.assertEqual('STANDARD', version['StorageClass']) + self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) + self.assertNotEqual('null', version['VersionId']) + self.assertFalse(version['IsLatest']) + + self.assertEqual(1, len(response['DeleteMarkers'])) + + # try to download the file, a 404 error should be returned + check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + with self.assertRaises(botocore.exceptions.ClientError) as context: + response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) + self.assertTrue('404' in str(context.exception)) + + # download the previous version, it should still be reachable + check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + bucket.download_file( + object_name, + check_version_file_2, + ExtraArgs={"VersionId": last_version_id}) + self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file_2, shallow=False)) + + def test_put_objects_no_versioning(self): + bucket_name = self.get_random_bucket_name() + self.s3_client.create_bucket(Bucket=bucket_name) + self.assert_bucket_exists(bucket_name) + object_name = self.get_random_object_name() + test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + self.generate_random_file(test_file_path_1) + # upload the file + self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) + + # get the file and compare with the original + test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) + self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + + # now upload again with different content + test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + self.generate_random_file(test_file_path_2) + self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) + test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) + self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + + # get etag of object + response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) + self.assertTrue('ETag' in response) + etag = response['ETag'] + + # check that we have 1 version only + # only 1 version should be flagged as the latest + response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) + self.assertTrue('Versions' in response) + self.assertEqual(1, len(response['Versions'])) + num_latest = 0 + last_version_id = '' + previous_version_id = '' + for version in response['Versions']: + self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) + self.assertEqual(object_name, version['Key']) + self.assertEqual('STANDARD', version['StorageClass']) + self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) + self.assertEqual(etag, version['ETag']) + self.assertEqual('null', version['VersionId']) + self.assertTrue(version['IsLatest']) + + # delete the object + self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) + + # we should still have 0 versions and 1 delete marker + response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) + self.assertTrue('DeleteMarkers' in response) + self.assertFalse('Versions' in response) + self.assertEqual(1, len(response['DeleteMarkers'])) + + num_latest = 0 + deleted_version_id = '' + for version in response['DeleteMarkers']: + self.assertEqual(object_name, version['Key']) + self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) + self.assertEqual('null', version['VersionId']) + self.assertTrue(version['IsLatest']) + + # try to download the file, a 404 error should be returned + check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + with self.assertRaises(botocore.exceptions.ClientError) as context: + response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) + self.assertTrue('404' in str(context.exception)) + +if __name__ == "__main__": + if len(sys.argv) == 2: + address_port = sys.argv.pop() + VersioningSmokeTests.URL = 'http://{0}'.format(address_port) + unittest.main() + else: + print ("usage: {0} ADDRESS:PORT".format(sys.argv[0]))