From a6b15015fa344abb1a3c08271f5a2b2827c4d665 Mon Sep 17 00:00:00 2001 From: Egor Tarasov Date: Mon, 30 Sep 2024 13:44:15 +0200 Subject: [PATCH] Enable 3 node deployment (#249) --- .github/workflows/check-pr.yml | 95 ------------ .github/workflows/run-tests.yml | 145 +++++------------- .github/workflows/upload-artifacts.yml | 3 - .golangci.yml | 33 ---- Makefile | 4 +- docs/tests.md | 4 +- e2e/kind-cluster-config.yaml | 19 --- .../data/storage-block-4-2-config-tls.yaml | 115 -------------- ... storage-mirror-3-dc-config-nodeSets.yaml} | 81 ++++++---- ...orage-mirror-3-dc-config-staticCreds.yaml} | 81 ++++++---- .../data/storage-mirror-3-dc-config-tls.yaml | 130 ++++++++++++++++ ...s.yaml => storage-mirror-3-dc-config.yaml} | 81 ++++++---- ...aml => storage-mirror-3-dc-dynconfig.yaml} | 67 ++++---- e2e/tests/smoke_test.go | 45 +++--- e2e/tests/test-objects/objects.go | 8 +- .../controllers/database/controller_test.go | 2 +- .../databasenodeset/controller_test.go | 8 +- .../controllers/monitoring/monitoring_test.go | 2 +- .../remotedatabasenodeset/controller_test.go | 18 +-- .../remotestoragenodeset/controller_test.go | 12 +- .../controllers/storage/controller_test.go | 2 +- .../storagenodeset/controller_test.go | 15 +- samples/minikube/database.yaml | 2 +- samples/minikube/storage.yaml | 2 +- samples/storage-mirror-3dc.yaml | 2 +- 25 files changed, 420 insertions(+), 556 deletions(-) delete mode 100644 .github/workflows/check-pr.yml delete mode 100644 e2e/tests/data/storage-block-4-2-config-tls.yaml rename e2e/tests/data/{storage-block-4-2-config.yaml => storage-mirror-3-dc-config-nodeSets.yaml} (54%) rename e2e/tests/data/{storage-block-4-2-config-staticCreds.yaml => storage-mirror-3-dc-config-staticCreds.yaml} (55%) create mode 100644 e2e/tests/data/storage-mirror-3-dc-config-tls.yaml rename e2e/tests/data/{storage-block-4-2-config-nodeSets.yaml => storage-mirror-3-dc-config.yaml} (54%) rename e2e/tests/data/{storage-block-4-2-dynconfig.yaml => storage-mirror-3-dc-dynconfig.yaml} (60%) diff --git a/.github/workflows/check-pr.yml b/.github/workflows/check-pr.yml deleted file mode 100644 index 7dfb9dec..00000000 --- a/.github/workflows/check-pr.yml +++ /dev/null @@ -1,95 +0,0 @@ -name: check-pr -on: - pull_request_target: - branches: - - 'master' - paths-ignore: - - 'docs/**' - types: - - 'opened' - - 'synchronize' - - 'reopened' - - 'labeled' -jobs: - check-running-allowed: - runs-on: ubuntu-latest - outputs: - result: ${{ steps.check-ownership-membership.outputs.result }} - steps: - - id: check-ownership-membership - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - script: | - // This is used primarily in forks. Repository owner - // should be allowed to run anything. - const userLogin = context.payload.pull_request.user.login; - - // How to interpret membership status code: - // https://docs.github.com/en/rest/orgs/members?apiVersion=2022-11-28#check-organization-membership-for-a-user - const isOrgMember = async function () { - try { - const response = await github.rest.orgs.checkMembershipForUser({ - org: context.payload.organization.login, - username: userLogin, - }); - return response.status == 204; - } catch (error) { - if (error.status && error.status == 404) { - return false; - } - throw error; - } - } - - if (context.payload.repository.owner.login == userLogin) { - return true; - } - - if (await isOrgMember()) { - return true; - } - - const labels = context.payload.pull_request.labels; - const okToTestLabel = labels.find( - label => label.name == 'ok-to-test' - ); - return okToTestLabel !== undefined; - - name: comment-if-waiting-on-ok - if: steps.check-ownership-membership.outputs.result == 'false' && - github.event.action == 'opened' - uses: actions/github-script@v6 - with: - script: | - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: 'Hi! Thank you for contributing!\nThe tests on this PR will run after a maintainer adds an `ok-to-test` label to this PR manually. Thank you for your patience!' - }); - - name: cleanup-test-label - uses: actions/github-script@v6 - with: - script: | - const { owner, repo } = context.repo; - const prNumber = context.payload.pull_request.number; - const labelToRemove = 'ok-to-test'; - try { - const result = await github.rest.issues.removeLabel({ - owner, - repo, - issue_number: prNumber, - name: labelToRemove - }); - } catch(e) { - // ignore the 404 error that arises - // when the label did not exist for the - // organization member - console.log(e); - } - run-tests: - needs: - - check-running-allowed - if: needs.check-running-allowed.outputs.result == 'true' - uses: ./.github/workflows/run-tests.yml - secrets: inherit diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index dd847b36..683976dd 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -1,61 +1,18 @@ -# Implicit requirements -# runner must have `docker` and `curl` installed (true on github-runners) - name: run-tests on: - workflow_call: - workflow_dispatch: + - pull_request + - workflow_dispatch jobs: - start-runner: - runs-on: ubuntu-latest - outputs: - runner-label: ${{ steps.start-yc-runner.outputs.label }} - instance-id: ${{ steps.start-yc-runner.outputs.instance-id }} - steps: - - name: start-yc-runner - id: start-yc-runner - uses: yc-actions/yc-github-runner@v1 - with: - mode: start - yc-sa-json-credentials: ${{ secrets.CI_RUNNER_CREATOR_KEY }} - github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - folder-id: b1gmgbhccra2qca8v5g6 - image-id: fd80o2eikcn22b229tsa - cores: 16 - disk-type: network-ssd-nonreplicated - disk-size: 465GB - memory: 32GB - core-fraction: 100 - subnet-id: e9bu12i8ocv6q8kl83ru - user: yc-admin - ssh-public-key: ${{ secrets.CI_RUNNER_DEBUG_SHH_PUBLIC_KEY }} - smart-checkout: - needs: - - start-runner - runs-on: ${{ needs.start-runner.outputs.runner-label }} - steps: - - name: checkout-when-fork-source - uses: actions/checkout@v3 - if: github.event.pull_request.head.sha != '' - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: checkout-when-this-repo-source - uses: actions/checkout@v3 - if: github.event.pull_request.head.sha == '' lint: concurrency: group: lint-golangci-${{ github.head_ref || github.ref_name }} cancel-in-progress: true - needs: - - start-runner - - smart-checkout - runs-on: ${{ needs.start-runner.outputs.runner-label }} + runs-on: ubuntu-latest steps: - - name: set-env-vars - run: | - echo "HOME=/actions-runner" >> $GITHUB_ENV + - name: checkout + uses: actions/checkout@v3 - name: setup-go uses: actions/setup-go@v3 with: @@ -63,47 +20,35 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v2 with: - version: v1.52.2 + version: v1.58.1 code-format-check: concurrency: group: lint-autoformat-${{ github.head_ref || github.ref_name }} cancel-in-progress: true - needs: - - start-runner - - smart-checkout - runs-on: ${{ needs.start-runner.outputs.runner-label }} + runs-on: ubuntu-latest steps: - - name: set-env-vars - run: | - echo "HOME=/actions-runner" >> $GITHUB_ENV + - name: checkout + uses: actions/checkout@v3 - name: setup-go uses: actions/setup-go@v3 with: go-version: '1.20' - - name: Install utilities + - name: install-utilities run: | go install mvdan.cc/gofumpt@v0.5.0 go install github.com/rinchsan/gosimports/cmd/gosimports@v0.3.8 - name: format all files with auto-formatter run: bash ./.github/scripts/format-all-go-code.sh "$PWD" - - name: Check repository diff + - name: check-repository-diff run: bash ./.github/scripts/check-work-copy-equals-to-committed.sh "auto-format broken" run-unit-tests: concurrency: group: run-unit-tests-${{ github.head_ref || github.ref_name }} cancel-in-progress: true - needs: - - start-runner - - smart-checkout - - lint - - code-format-check - runs-on: ${{ needs.start-runner.outputs.runner-label }} - outputs: - result: ${{ steps.run-unit-tests.outputs.result }} + runs-on: ubuntu-latest steps: - - name: set-env-vars - run: | - echo "HOME=/actions-runner" >> $GITHUB_ENV + - name: checkout + uses: actions/checkout@v3 - name: setup-go uses: actions/setup-go@v3 with: @@ -139,18 +84,19 @@ jobs: group: run-e2e-tests-${{ github.head_ref || github.ref_name }} cancel-in-progress: true needs: - - start-runner - - smart-checkout - - lint - - code-format-check - run-unit-tests - runs-on: ${{ needs.start-runner.outputs.runner-label }} - outputs: - result: ${{ steps.run-e2e-tests.outputs.result }} + runs-on: ubuntu-latest steps: - - name: set-env-vars - run: | - echo "HOME=/actions-runner" >> $GITHUB_ENV + - name: maximize-build-space + uses: AdityaGarg8/remove-unwanted-software@v4.1 + with: + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + remove-dotnet: 'true' + remove-swapfile: 'true' + - name: checkout + uses: actions/checkout@v3 - name: setup-go uses: actions/setup-go@v3 with: @@ -175,10 +121,6 @@ jobs: echo "$(pwd)" >> $GITHUB_PATH echo "$HOME/ydb/bin" >> $GITHUB_PATH echo "$HOME/go/bin" >> $GITHUB_PATH - - name: configure-system - run: | - sudo sysctl fs.inotify.max_user_instances=1280 - sudo sysctl fs.inotify.max_user_watches=655360 - name: check-dependencies run: | gcc --version @@ -192,7 +134,12 @@ jobs: kind create cluster \ --image=kindest/node:v1.25.3@sha256:cd248d1438192f7814fbca8fede13cfe5b9918746dfa12583976158a834fd5c5 \ --config=./e2e/kind-cluster-config.yaml + kubectl wait --timeout=5m --for=condition=ready node -l worker=true + + kubectl label --overwrite node kind-worker topology.kubernetes.io/zone=fakeZone1 + kubectl label --overwrite node kind-worker2 topology.kubernetes.io/zone=fakeZone2 + kubectl label --overwrite node kind-worker3 topology.kubernetes.io/zone=fakeZone3 - name: build-operator-image uses: docker/build-push-action@v3 with: @@ -203,7 +150,7 @@ jobs: tags: kind/ydb-operator:current - name: load-and-deploy-operator run: | - kind load docker-image kind/ydb-operator:current + kind load docker-image kind/ydb-operator:current --nodes kind-worker,kind-worker2,kind-worker3 - name: pull-and-load-kube-webhook-certgen-image uses: nick-fields/retry@v3 with: @@ -212,12 +159,13 @@ jobs: max_attempts: 3 command: | docker pull k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0 - kind load docker-image k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0 + kind load docker-image k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0 --nodes kind-worker,kind-worker2,kind-worker3 - name: pull-and-load-ydb-image run: | - # TODO would be cool to parse YDB image from manifests to avoid duplicating information - docker pull cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 - kind load docker-image cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 + YDB_IMAGE=$(grep "anchor_for_fetching_image_from_workflow" ./e2e/tests/**/*.go | grep -o -E '"cr\.yandex.*"') + YDB_IMAGE=${YDB_IMAGE:1:-1} # strip "" + docker pull $YDB_IMAGE + kind load docker-image $YDB_IMAGE --nodes kind-worker,kind-worker2,kind-worker3 - name: setup-gotestsum run: | go install gotest.tools/gotestsum@v1.12.0 @@ -225,7 +173,7 @@ jobs: id: run-e2e-tests run: | gotestsum --format pkgname --jsonfile log.json -- -v -timeout 3600s -p 1 ./e2e/... -ginkgo.vv - - name: convert-to-human-readable + - name: convert-json-log-to-human-readable run: jq -r '.Output| gsub("[\\n]"; "")' log.json 2>/dev/null 1>log.txt || true - name: artifact-upload-step uses: actions/upload-artifact@v4 @@ -240,21 +188,4 @@ jobs: - name: teardown-k8s-cluster run: | kind delete cluster - stop-runner: - needs: - - start-runner - - lint - - code-format-check - - run-unit-tests - - run-e2e-tests - runs-on: ubuntu-latest - if: always() - steps: - - name: stop-yc-runner - uses: yc-actions/yc-github-runner@v1 - with: - mode: stop - yc-sa-json-credentials: ${{ secrets.CI_RUNNER_CREATOR_KEY }} - github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - label: ${{ needs.start-runner.outputs.runner-label }} - instance-id: ${{ needs.start-runner.outputs.instance-id }} + diff --git a/.github/workflows/upload-artifacts.yml b/.github/workflows/upload-artifacts.yml index e636e988..6979673c 100644 --- a/.github/workflows/upload-artifacts.yml +++ b/.github/workflows/upload-artifacts.yml @@ -1,6 +1,3 @@ -# Implicit requirements -# runner must have `docker` and `curl` installed (true on github-runners) - name: upload-artifacts on: push: diff --git a/.golangci.yml b/.golangci.yml index c605df0f..ba5d9180 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -129,67 +129,34 @@ linters-settings: linters: disable-all: true enable: -# - cyclop - - deadcode - - depguard - dogsled -# - dupl - errcheck - errorlint -# - exhaustive -# - exhaustivestruct -# - forbidigo -# - funlen -# - gci -# - gocognit - goconst - gocritic - gocyclo -# - godot -# - godox # tmp disable due to FIXME & XXX - gofmt # On why gofmt when goimports is enabled - https://github.com/golang/go/issues/21476 - gofumpt - goheader - goimports -# - gomnd -# - gomoddirectives -# - gomodguard - gosec - gosimple - govet - - depguard -# - ifshort -# - ireturn -# - lll # disable due to kubebuilder comments - makezero - misspell - ineffassign - misspell - nakedret - nestif -# - nilnil -# - nlreturn -# - nolintlint -# - prealloc - predeclared - rowserrcheck - - revive - staticcheck - stylecheck - - structcheck -# - tagliatelle -# - testpackage -# - thelper -# - tenv - typecheck - unconvert - unparam - unused -# - varnamelen - - varcheck - whitespace -# - wrapcheck -# - wsl issues: # List of regexps of issue texts to exclude, empty list by default. diff --git a/Makefile b/Makefile index 82c10a1e..1b34ec1a 100644 --- a/Makefile +++ b/Makefile @@ -70,8 +70,8 @@ kind-init: kind create cluster --config e2e/kind-cluster-config.yaml --name kind-ydb-operator; \ docker pull k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0; \ kind load docker-image k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0 --name kind-ydb-operator; \ - docker pull cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17; \ - kind load docker-image cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 --name kind-ydb-operator + docker pull cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7; \ + kind load docker-image cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7 --name kind-ydb-operator kind-load: docker tag cr.yandex/yc/ydb-operator:latest kind/ydb-operator:current diff --git a/docs/tests.md b/docs/tests.md index 6996f97f..028cdfc8 100644 --- a/docs/tests.md +++ b/docs/tests.md @@ -88,14 +88,14 @@ kind create cluster \ kubectl config use-context kind-local-kind # Within tests, the following two images are used: -# cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 +# cr.yandex/crptqonuodf51kdj7a7d/ydb: # kind/ydb-operator:current # You have to download the ydb image and build the operator image yourself. Then, explicitly # upload them into the kind cluster. Refer to `./github/e2e.yaml` github workflow which essentially # does the same thing. kind --name local-kind load docker-image kind/ydb-operator:current -kind --name local-kind load docker-image ydb:23.3.17 +kind --name local-kind load docker-image ydb: # Run all tests with disabled concurrency, because there is only one cluster to run tests against go test -p 1 -v ./... diff --git a/e2e/kind-cluster-config.yaml b/e2e/kind-cluster-config.yaml index be227cf7..0a1f7068 100644 --- a/e2e/kind-cluster-config.yaml +++ b/e2e/kind-cluster-config.yaml @@ -1,9 +1,5 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 -containerdConfigPatches: -- |- - [plugins."io.containerd.grpc.v1.cri".containerd] - snapshotter = "native" nodes: - role: control-plane - role: worker @@ -15,18 +11,3 @@ nodes: - role: worker labels: worker: true -- role: worker - labels: - worker: true -- role: worker - labels: - worker: true -- role: worker - labels: - worker: true -- role: worker - labels: - worker: true -- role: worker - labels: - worker: true diff --git a/e2e/tests/data/storage-block-4-2-config-tls.yaml b/e2e/tests/data/storage-block-4-2-config-tls.yaml deleted file mode 100644 index 2fcd115e..00000000 --- a/e2e/tests/data/storage-block-4-2-config-tls.yaml +++ /dev/null @@ -1,115 +0,0 @@ -static_erasure: block-4-2 -host_configs: - - drive: - - path: SectorMap:1:1 - type: SSD - host_config_id: 1 -domains_config: - domain: - - name: Root - storage_pool_types: - - kind: ssd - pool_config: - box_id: 1 - erasure_species: block-4-2 - kind: ssd - pdisk_filter: - - property: - - type: SSD - vdisk_kind: Default - state_storage: - - ring: - node: [1, 2, 3, 4, 5, 6, 7, 8] - nto_select: 5 - ssid: 1 -table_service_config: - sql_version: 1 -actor_system_config: - executor: - - name: System - threads: 1 - type: BASIC - - name: User - threads: 1 - type: BASIC - - name: Batch - threads: 1 - type: BASIC - - name: IO - threads: 1 - time_per_mailbox_micro_secs: 100 - type: IO - - name: IC - spin_threshold: 10 - threads: 4 - time_per_mailbox_micro_secs: 100 - type: BASIC - scheduler: - progress_threshold: 10000 - resolution: 256 - spin_threshold: 0 -blob_storage_config: - service_set: - groups: - - erasure_species: block-4-2 - rings: - - fail_domains: - - vdisk_locations: - - node_id: 1 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 2 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 3 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 4 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 5 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 6 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 7 - pdisk_category: SSD - path: SectorMap:1:1 - - vdisk_locations: - - node_id: 8 - pdisk_category: SSD - path: SectorMap:1:1 -channel_profile_config: - profile: - - channel: - - erasure_species: block-4-2 - pdisk_category: 1 - storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 - storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 - storage_pool_kind: ssd - profile_id: 0 -grpc_config: - start_grpc_proxy: true - ssl_port: 2135 - ca: /etc/ssl/certs/ca-certificates.crt - cert: /tls/grpc/tls.crt - key: /tls/grpc/tls.key - grpc_memory_quota_bytes: '1073741824' - host: '[::]' - keep_alive_enable: true - keep_alive_idle_timeout_trigger_sec: 90 - keep_alive_max_probe_count: 3 - keep_alive_probe_interval_sec: 10 - services: [legacy, yql, scripting, cms, discovery, monitoring, import, export, locking, maintenance] - streaming_config: {enable_output_streams: true} diff --git a/e2e/tests/data/storage-block-4-2-config.yaml b/e2e/tests/data/storage-mirror-3-dc-config-nodeSets.yaml similarity index 54% rename from e2e/tests/data/storage-block-4-2-config.yaml rename to e2e/tests/data/storage-mirror-3-dc-config-nodeSets.yaml index 831fa5f5..0384fb6b 100644 --- a/e2e/tests/data/storage-block-4-2-config.yaml +++ b/e2e/tests/data/storage-mirror-3-dc-config-nodeSets.yaml @@ -1,8 +1,12 @@ -static_erasure: block-4-2 +static_erasure: mirror-3-dc host_configs: - drive: - path: SectorMap:1:1 type: SSD + - path: SectorMap:2:1 + type: SSD + - path: SectorMap:3:1 + type: SSD host_config_id: 1 domains_config: domain: @@ -11,47 +15,52 @@ domains_config: - kind: ssd pool_config: box_id: 1 - erasure_species: block-4-2 + erasure_species: mirror-3-dc kind: ssd + geometry: + realm_level_begin: 10 + realm_level_end: 20 + domain_level_begin: 10 + domain_level_end: 256 pdisk_filter: - property: - - type: SSD + - type: SSD # device type to match host_configs.drive.type vdisk_kind: Default state_storage: - ring: - node: [1, 2, 3, 4, 5, 6, 7, 8] - nto_select: 5 + node: [1, 2, 3] + nto_select: 3 ssid: 1 table_service_config: sql_version: 1 actor_system_config: - executor: - - name: System + executor: + - name: System threads: 1 type: BASIC - - name: User + - name: User threads: 1 type: BASIC - - name: Batch - threads: 1 + - name: Batch + threads: 1 type: BASIC - - name: IO + - name: IO threads: 1 time_per_mailbox_micro_secs: 100 type: IO - - name: IC + - name: IC spin_threshold: 10 - threads: 4 + threads: 4 time_per_mailbox_micro_secs: 100 type: BASIC scheduler: progress_threshold: 10000 resolution: 256 spin_threshold: 0 -blob_storage_config: +blob_storage_config: # configuration of static blobstorage group. service_set: groups: - - erasure_species: block-4-2 + - erasure_species: mirror-3-dc rings: - fail_domains: - vdisk_locations: @@ -59,44 +68,50 @@ blob_storage_config: pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 2 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 3 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 4 + - node_id: 2 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 5 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 6 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 7 + - node_id: 3 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 8 + - node_id: 3 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:3:1 channel_profile_config: profile: - channel: - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd profile_id: 0 grpc_config: diff --git a/e2e/tests/data/storage-block-4-2-config-staticCreds.yaml b/e2e/tests/data/storage-mirror-3-dc-config-staticCreds.yaml similarity index 55% rename from e2e/tests/data/storage-block-4-2-config-staticCreds.yaml rename to e2e/tests/data/storage-mirror-3-dc-config-staticCreds.yaml index f6cc762b..b6f5ea20 100644 --- a/e2e/tests/data/storage-block-4-2-config-staticCreds.yaml +++ b/e2e/tests/data/storage-mirror-3-dc-config-staticCreds.yaml @@ -1,8 +1,12 @@ -static_erasure: block-4-2 +static_erasure: mirror-3-dc host_configs: - drive: - path: SectorMap:1:1 type: SSD + - path: SectorMap:2:1 + type: SSD + - path: SectorMap:3:1 + type: SSD host_config_id: 1 domains_config: security_config: @@ -13,47 +17,52 @@ domains_config: - kind: ssd pool_config: box_id: 1 - erasure_species: block-4-2 + erasure_species: mirror-3-dc kind: ssd + geometry: + realm_level_begin: 10 + realm_level_end: 20 + domain_level_begin: 10 + domain_level_end: 256 pdisk_filter: - property: - - type: SSD + - type: SSD # device type to match host_configs.drive.type vdisk_kind: Default state_storage: - ring: - node: [1, 2, 3, 4, 5, 6, 7, 8] - nto_select: 5 + node: [1, 2, 3] + nto_select: 3 ssid: 1 table_service_config: sql_version: 1 actor_system_config: - executor: - - name: System + executor: + - name: System threads: 1 type: BASIC - - name: User + - name: User threads: 1 type: BASIC - - name: Batch - threads: 1 + - name: Batch + threads: 1 type: BASIC - - name: IO + - name: IO threads: 1 time_per_mailbox_micro_secs: 100 type: IO - - name: IC + - name: IC spin_threshold: 10 - threads: 4 + threads: 4 time_per_mailbox_micro_secs: 100 type: BASIC scheduler: progress_threshold: 10000 resolution: 256 spin_threshold: 0 -blob_storage_config: +blob_storage_config: # configuration of static blobstorage group. service_set: groups: - - erasure_species: block-4-2 + - erasure_species: mirror-3-dc rings: - fail_domains: - vdisk_locations: @@ -61,44 +70,50 @@ blob_storage_config: pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 2 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 3 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 4 + - node_id: 2 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 5 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 6 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 7 + - node_id: 3 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 8 + - node_id: 3 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:3:1 channel_profile_config: profile: - channel: - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd profile_id: 0 grpc_config: diff --git a/e2e/tests/data/storage-mirror-3-dc-config-tls.yaml b/e2e/tests/data/storage-mirror-3-dc-config-tls.yaml new file mode 100644 index 00000000..2d9093e2 --- /dev/null +++ b/e2e/tests/data/storage-mirror-3-dc-config-tls.yaml @@ -0,0 +1,130 @@ +static_erasure: mirror-3-dc +host_configs: + - drive: + - path: SectorMap:1:1 + type: SSD + - path: SectorMap:2:1 + type: SSD + - path: SectorMap:3:1 + type: SSD + host_config_id: 1 +domains_config: + domain: + - name: Root + storage_pool_types: + - kind: ssd + pool_config: + box_id: 1 + erasure_species: mirror-3-dc + kind: ssd + geometry: + realm_level_begin: 10 + realm_level_end: 20 + domain_level_begin: 10 + domain_level_end: 256 + pdisk_filter: + - property: + - type: SSD # device type to match host_configs.drive.type + vdisk_kind: Default + state_storage: + - ring: + node: [1, 2, 3] + nto_select: 3 + ssid: 1 +table_service_config: + sql_version: 1 +actor_system_config: + executor: + - name: System + threads: 1 + type: BASIC + - name: User + threads: 1 + type: BASIC + - name: Batch + threads: 1 + type: BASIC + - name: IO + threads: 1 + time_per_mailbox_micro_secs: 100 + type: IO + - name: IC + spin_threshold: 10 + threads: 4 + time_per_mailbox_micro_secs: 100 + type: BASIC + scheduler: + progress_threshold: 10000 + resolution: 256 + spin_threshold: 0 +blob_storage_config: # configuration of static blobstorage group. + service_set: + groups: + - erasure_species: mirror-3-dc + rings: + - fail_domains: + - vdisk_locations: + - node_id: 1 + pdisk_category: SSD + path: SectorMap:1:1 + - vdisk_locations: + - node_id: 1 + pdisk_category: SSD + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 1 + pdisk_category: SSD + path: SectorMap:3:1 + - fail_domains: + - vdisk_locations: + - node_id: 2 + pdisk_category: SSD + path: SectorMap:1:1 + - vdisk_locations: + - node_id: 2 + pdisk_category: SSD + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 2 + pdisk_category: SSD + path: SectorMap:3:1 + - fail_domains: + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:1:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:3:1 +channel_profile_config: + profile: + - channel: + - erasure_species: mirror-3-dc + pdisk_category: 0 + storage_pool_kind: ssd + - erasure_species: mirror-3-dc + pdisk_category: 0 + storage_pool_kind: ssd + - erasure_species: mirror-3-dc + pdisk_category: 0 + storage_pool_kind: ssd + profile_id: 0 +grpc_config: + start_grpc_proxy: true + ssl_port: 2135 + ca: /etc/ssl/certs/ca-certificates.crt + cert: /tls/grpc/tls.crt + key: /tls/grpc/tls.key + grpc_memory_quota_bytes: '1073741824' + host: '[::]' + keep_alive_enable: true + keep_alive_idle_timeout_trigger_sec: 90 + keep_alive_max_probe_count: 3 + keep_alive_probe_interval_sec: 10 + services: [legacy, yql, scripting, cms, discovery, monitoring, import, export, locking, maintenance] + streaming_config: {enable_output_streams: true} diff --git a/e2e/tests/data/storage-block-4-2-config-nodeSets.yaml b/e2e/tests/data/storage-mirror-3-dc-config.yaml similarity index 54% rename from e2e/tests/data/storage-block-4-2-config-nodeSets.yaml rename to e2e/tests/data/storage-mirror-3-dc-config.yaml index 831fa5f5..0384fb6b 100644 --- a/e2e/tests/data/storage-block-4-2-config-nodeSets.yaml +++ b/e2e/tests/data/storage-mirror-3-dc-config.yaml @@ -1,8 +1,12 @@ -static_erasure: block-4-2 +static_erasure: mirror-3-dc host_configs: - drive: - path: SectorMap:1:1 type: SSD + - path: SectorMap:2:1 + type: SSD + - path: SectorMap:3:1 + type: SSD host_config_id: 1 domains_config: domain: @@ -11,47 +15,52 @@ domains_config: - kind: ssd pool_config: box_id: 1 - erasure_species: block-4-2 + erasure_species: mirror-3-dc kind: ssd + geometry: + realm_level_begin: 10 + realm_level_end: 20 + domain_level_begin: 10 + domain_level_end: 256 pdisk_filter: - property: - - type: SSD + - type: SSD # device type to match host_configs.drive.type vdisk_kind: Default state_storage: - ring: - node: [1, 2, 3, 4, 5, 6, 7, 8] - nto_select: 5 + node: [1, 2, 3] + nto_select: 3 ssid: 1 table_service_config: sql_version: 1 actor_system_config: - executor: - - name: System + executor: + - name: System threads: 1 type: BASIC - - name: User + - name: User threads: 1 type: BASIC - - name: Batch - threads: 1 + - name: Batch + threads: 1 type: BASIC - - name: IO + - name: IO threads: 1 time_per_mailbox_micro_secs: 100 type: IO - - name: IC + - name: IC spin_threshold: 10 - threads: 4 + threads: 4 time_per_mailbox_micro_secs: 100 type: BASIC scheduler: progress_threshold: 10000 resolution: 256 spin_threshold: 0 -blob_storage_config: +blob_storage_config: # configuration of static blobstorage group. service_set: groups: - - erasure_species: block-4-2 + - erasure_species: mirror-3-dc rings: - fail_domains: - vdisk_locations: @@ -59,44 +68,50 @@ blob_storage_config: pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 2 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 3 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 4 + - node_id: 2 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 5 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: 6 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: 7 + - node_id: 3 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: 8 + - node_id: 3 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:3:1 channel_profile_config: profile: - channel: - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd profile_id: 0 grpc_config: diff --git a/e2e/tests/data/storage-block-4-2-dynconfig.yaml b/e2e/tests/data/storage-mirror-3-dc-dynconfig.yaml similarity index 60% rename from e2e/tests/data/storage-block-4-2-dynconfig.yaml rename to e2e/tests/data/storage-mirror-3-dc-dynconfig.yaml index d45e523c..f4f7c06f 100644 --- a/e2e/tests/data/storage-block-4-2-dynconfig.yaml +++ b/e2e/tests/data/storage-mirror-3-dc-dynconfig.yaml @@ -6,11 +6,15 @@ allowed_labels: {} selector_config: [] config: yaml_config_enabled: true - static_erasure: block-4-2 + static_erasure: mirror-3-dc host_configs: - drive: - path: SectorMap:1:1 type: SSD + - path: SectorMap:2:1 + type: SSD + - path: SectorMap:3:1 + type: SSD host_config_id: 1 domains_config: domain: @@ -19,16 +23,21 @@ config: - kind: ssd pool_config: box_id: 1 - erasure_species: block-4-2 + erasure_species: mirror-3-dc kind: ssd + geometry: + realm_level_begin: 10 + realm_level_end: 20 + domain_level_begin: 10 + domain_level_end: 256 pdisk_filter: - property: - - type: SSD + - type: SSD # device type to match host_configs.drive.type vdisk_kind: Default state_storage: - ring: - node: [1, 2, 3, 4, 5, 6, 7, 8] - nto_select: 5 + node: [1, 2, 3] + nto_select: 3 ssid: 1 table_service_config: sql_version: 1 @@ -56,55 +65,61 @@ config: progress_threshold: 10000 resolution: 256 spin_threshold: 0 - blob_storage_config: + blob_storage_config: # configuration of static blobstorage group. service_set: groups: - - erasure_species: block-4-2 + - erasure_species: mirror-3-dc rings: - fail_domains: - vdisk_locations: - - node_id: storage-0 + - node_id: 1 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: storage-1 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: storage-2 + - node_id: 1 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: storage-3 + - node_id: 2 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: storage-4 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 - vdisk_locations: - - node_id: storage-5 + - node_id: 2 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:3:1 + - fail_domains: - vdisk_locations: - - node_id: storage-6 + - node_id: 3 pdisk_category: SSD path: SectorMap:1:1 - vdisk_locations: - - node_id: storage-7 + - node_id: 3 pdisk_category: SSD - path: SectorMap:1:1 + path: SectorMap:2:1 + - vdisk_locations: + - node_id: 3 + pdisk_category: SSD + path: SectorMap:3:1 channel_profile_config: profile: - channel: - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd - - erasure_species: block-4-2 - pdisk_category: 1 + - erasure_species: mirror-3-dc + pdisk_category: 0 storage_pool_kind: ssd profile_id: 0 grpc_config: diff --git a/e2e/tests/smoke_test.go b/e2e/tests/smoke_test.go index 0af88c3a..c0ac3c29 100644 --- a/e2e/tests/smoke_test.go +++ b/e2e/tests/smoke_test.go @@ -37,7 +37,7 @@ import ( const ( Timeout = time.Second * 600 - Interval = time.Second * 5 + Interval = time.Second * 2 ) func podIsReady(conditions []corev1.PodCondition) bool { @@ -143,7 +143,7 @@ func checkPodsRunningAndReady(ctx context.Context, podLabelKey, podLabelValue st g.Expect(podIsReady(pod.Status.Conditions)).Should(BeTrue()) } return true - }, 30*time.Second, Interval).Should(BeTrue()) + }, test.Timeout, test.Interval).Should(BeTrue()) } func bringYdbCliToPod(podName, podNamespace string) { @@ -152,6 +152,7 @@ func bringYdbCliToPod(podName, podNamespace string) { "-n", podNamespace, "cp", + // This implicitly relies on 'ydb' cli binary installed in your system fmt.Sprintf("%v/ydb/bin/ydb", os.ExpandEnv("$HOME")), fmt.Sprintf("%v:/tmp/ydb", podName), } @@ -248,7 +249,7 @@ func portForward(ctx context.Context, svcName string, svcNamespace string, port return fmt.Errorf("kubectl port-forward stderr: %s", content) } return nil - }, 60*time.Second, Interval).Should(BeNil()) + }, Timeout, test.Interval).Should(BeNil()) } var _ = Describe("Operator smoke test", func() { @@ -259,7 +260,7 @@ var _ = Describe("Operator smoke test", func() { var databaseSample *v1alpha1.Database BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-block-4-2-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config.yaml")) databaseSample = testobjects.DefaultDatabase() ctx = context.Background() @@ -431,7 +432,7 @@ var _ = Describe("Operator smoke test", func() { })).Should(Succeed()) return len(storagePods.Items) == int(storageSample.Spec.Nodes) - }, 20*time.Second, Interval).Should(BeTrue()) + }, test.Timeout, test.Interval).Should(BeTrue()) By("deleting a StatefulSet...") statefulSet := v1.StatefulSet{} @@ -458,7 +459,7 @@ var _ = Describe("Operator smoke test", func() { })).Should(Succeed()) return len(storagePods.Items) == 0 - }, 20*time.Second, Interval).Should(BeTrue()) + }, test.Timeout, test.Interval).Should(BeTrue()) By("setting storage freeze back to Running...") storage = v1alpha1.Storage{} @@ -493,19 +494,19 @@ var _ = Describe("Operator smoke test", func() { It("create storage and database with nodeSets", func() { By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-block-4-2-config-nodeSets.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config-nodeSets.yaml")) testNodeSetName := "nodeset" - for idx := 1; idx <= 2; idx++ { + for idx := 1; idx <= 3; idx++ { storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ Name: testNodeSetName + "-" + strconv.Itoa(idx), StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ Name: testNodeSetName + "-" + strconv.Itoa(idx), DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) } @@ -532,18 +533,24 @@ var _ = Describe("Operator smoke test", func() { database := v1alpha1.Database{} databasePods := corev1.PodList{} - By("delete nodeSetSpec inline to check inheritance...") + By("modify nodeSetSpec inline to check inheritance...") Eventually(func(g Gomega) error { g.Expect(k8sClient.Get(ctx, types.NamespacedName{ Name: databaseSample.Name, Namespace: testobjects.YdbNamespace, }, &database)).Should(Succeed()) - database.Spec.Nodes = 4 + database.Spec.Nodes = 2 database.Spec.NodeSets = []v1alpha1.DatabaseNodeSetSpecInline{ { Name: testNodeSetName + "-" + strconv.Itoa(1), DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, + }, + }, + { + Name: testNodeSetName + "-" + strconv.Itoa(2), + DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ + Nodes: 1, }, }, } @@ -578,7 +585,7 @@ var _ = Describe("Operator smoke test", func() { It("operatorConnection check, create storage with default staticCredentials", func() { By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-block-4-2-config-staticCreds.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config-staticCreds.yaml")) storageSample.Spec.OperatorConnection = &v1alpha1.ConnectionOptions{ StaticCredentials: &v1alpha1.StaticCredentialsAuth{ Username: "root", @@ -639,7 +646,7 @@ var _ = Describe("Operator smoke test", func() { }() By("create storage...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-block-4-2-config-tls.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config-tls.yaml")) storageSample.Spec.Service.GRPC.TLSConfiguration.Enabled = true storageSample.Spec.Service.GRPC.TLSConfiguration.Certificate = corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, @@ -738,7 +745,7 @@ var _ = Describe("Operator smoke test", func() { return false } return !foundStorage.DeletionTimestamp.IsZero() - }, test.Timeout, test.Interval).Should(BeTrue()) + }, Timeout, test.Interval).Should(BeTrue()) By("checking that Storage is present in cluster...") Consistently(func() error { @@ -761,12 +768,12 @@ var _ = Describe("Operator smoke test", func() { Namespace: testobjects.YdbNamespace, }, &foundStorage) return apierrors.IsNotFound(err) - }, test.Timeout, test.Interval).Should(BeTrue()) + }, Timeout, test.Interval).Should(BeTrue()) }) It("check storage with dynconfig", func() { By("create storage...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-block-4-2-dynconfig.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-dynconfig.yaml")) Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) defer func() { @@ -839,7 +846,7 @@ var _ = Describe("Operator smoke test", func() { transport := &http.Transport{TLSClientConfig: tlsConfig} client := &http.Client{ Transport: transport, - Timeout: 10 * time.Second, + Timeout: test.Timeout, } resp, err := client.Get(url) if err != nil { diff --git a/e2e/tests/test-objects/objects.go b/e2e/tests/test-objects/objects.go index 17d30094..c369d4cc 100644 --- a/e2e/tests/test-objects/objects.go +++ b/e2e/tests/test-objects/objects.go @@ -11,7 +11,7 @@ import ( ) const ( - YdbImage = "cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17" + YdbImage = "cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7" // anchor_for_fetching_image_from_workflow YdbNamespace = "ydb" StorageName = "storage" DatabaseName = "database" @@ -59,7 +59,7 @@ func DefaultStorage(storageYamlConfigPath string) *v1alpha1.Storage { StorageClusterSpec: v1alpha1.StorageClusterSpec{ Domain: DefaultDomain, OperatorSync: true, - Erasure: "block-4-2", + Erasure: "mirror-3-dc", Image: &v1alpha1.PodImage{ Name: YdbImage, PullPolicyName: &defaultPolicy, @@ -90,7 +90,7 @@ func DefaultStorage(storageYamlConfigPath string) *v1alpha1.Storage { }, }, StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 8, + Nodes: 3, DataStore: []corev1.PersistentVolumeClaimSpec{}, Resources: &corev1.ResourceRequirements{}, @@ -156,7 +156,7 @@ func DefaultDatabase() *v1alpha1.Database { }, }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 8, + Nodes: 3, Resources: &v1alpha1.DatabaseResources{ StorageUnits: []v1alpha1.StorageUnit{ { diff --git a/internal/controllers/database/controller_test.go b/internal/controllers/database/controller_test.go index ff70c105..15333aec 100644 --- a/internal/controllers/database/controller_test.go +++ b/internal/controllers/database/controller_test.go @@ -62,7 +62,7 @@ var _ = Describe("Database controller medium tests", func() { }, } Expect(k8sClient.Create(ctx, &namespace)).Should(Succeed()) - storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, &storageSample)).Should(Succeed()) By("checking that Storage created on local cluster...") diff --git a/internal/controllers/databasenodeset/controller_test.go b/internal/controllers/databasenodeset/controller_test.go index 819c7ba6..47beb813 100644 --- a/internal/controllers/databasenodeset/controller_test.go +++ b/internal/controllers/databasenodeset/controller_test.go @@ -71,7 +71,7 @@ var _ = Describe("DatabaseNodeSet controller medium tests", func() { } Expect(k8sClient.Create(ctx, &namespace)).Should(Succeed()) - storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, &storageSample)).Should(Succeed()) By("checking that Storage created on local cluster...") @@ -103,7 +103,7 @@ var _ = Describe("DatabaseNodeSet controller medium tests", func() { databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ Name: testNodeSetName, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) @@ -160,7 +160,7 @@ var _ = Describe("DatabaseNodeSet controller medium tests", func() { testNodeSetLabel: "true", }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 2, + Nodes: 1, }, }) @@ -170,7 +170,7 @@ var _ = Describe("DatabaseNodeSet controller medium tests", func() { v1alpha1.AnnotationDataCenter: "envtest", }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 2, + Nodes: 1, }, }) diff --git a/internal/controllers/monitoring/monitoring_test.go b/internal/controllers/monitoring/monitoring_test.go index edc049e0..5a31d55c 100644 --- a/internal/controllers/monitoring/monitoring_test.go +++ b/internal/controllers/monitoring/monitoring_test.go @@ -119,7 +119,7 @@ func createMockDBAndSvc() { func createMockStorageAndSvc() { GinkgoHelper() - stor := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + stor := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, stor)).Should(Succeed()) stor.Status.State = StorageReady diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go index 2e7ca0a8..c401b4f9 100644 --- a/internal/controllers/remotedatabasenodeset/controller_test.go +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -206,12 +206,12 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { var databaseSample *v1alpha1.Database BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) databaseSample = testobjects.DefaultDatabase() databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ Name: testNodeSetName + "-local", DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ @@ -220,7 +220,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { Cluster: testRemoteCluster, }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ @@ -229,7 +229,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { Cluster: testRemoteCluster, }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) @@ -445,7 +445,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { Cluster: testRemoteCluster, }, StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) @@ -570,7 +570,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { { Name: testNodeSetName + "-local", DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 2, }, }, { @@ -579,7 +579,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { Cluster: testRemoteCluster, }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }, } @@ -823,7 +823,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { { Name: testNodeSetName + "-local", DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 2, }, }, { @@ -832,7 +832,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { Cluster: testRemoteCluster, }, DatabaseNodeSpec: v1alpha1.DatabaseNodeSpec{ - Nodes: 4, + Nodes: 1, }, }, } diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index 28882efe..e1195aff 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -179,11 +179,11 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { var storageSample *v1alpha1.Storage BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ Name: testNodeSetName + "-local", StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 4, + Nodes: 1, }, }) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ @@ -192,7 +192,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { Cluster: testRemoteCluster, }, StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 2, + Nodes: 1, }, }) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ @@ -201,7 +201,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { Cluster: testRemoteCluster, }, StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 2, + Nodes: 1, }, }) @@ -569,7 +569,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { { Name: testNodeSetName + "-local", StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 6, + Nodes: 2, }, }, { @@ -578,7 +578,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { Cluster: testRemoteCluster, }, StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 2, + Nodes: 1, }, }, } diff --git a/internal/controllers/storage/controller_test.go b/internal/controllers/storage/controller_test.go index fb364be3..ded230bb 100644 --- a/internal/controllers/storage/controller_test.go +++ b/internal/controllers/storage/controller_test.go @@ -62,7 +62,7 @@ var _ = Describe("Storage controller medium tests", func() { }) It("Checking field propagation to objects", func() { - storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) tmpFilesDir := "/tmp/mounted_volume" testVolumeName := "sample-volume" diff --git a/internal/controllers/storagenodeset/controller_test.go b/internal/controllers/storagenodeset/controller_test.go index 230fc2f0..c93149e6 100644 --- a/internal/controllers/storagenodeset/controller_test.go +++ b/internal/controllers/storagenodeset/controller_test.go @@ -62,15 +62,16 @@ var _ = Describe("StorageNodeSet controller medium tests", func() { }) It("Check controller operation through nodeSetSpec inline spec in Storage object", func() { - storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) // Test create inline nodeSetSpec in Storage object testNodeSetName := "nodeset" - for idx := 1; idx <= 4; idx++ { + storageNodeSetAmount := 3 + for idx := 1; idx <= storageNodeSetAmount; idx++ { storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ Name: testNodeSetName + "-" + strconv.Itoa(idx), StorageNodeSpec: v1alpha1.StorageNodeSpec{ - Nodes: 2, + Nodes: 1, }, }) } @@ -84,14 +85,14 @@ var _ = Describe("StorageNodeSet controller medium tests", func() { ))).Should(Succeed()) foundStorageNodeSet := make(map[int]bool) for _, storageNodeSet := range storageNodeSets.Items { - for idxNodeSet := 1; idxNodeSet <= 4; idxNodeSet++ { + for idxNodeSet := 1; idxNodeSet <= storageNodeSetAmount; idxNodeSet++ { if storageNodeSet.Name == testobjects.StorageName+"-"+testNodeSetName+"-"+strconv.Itoa(idxNodeSet) { foundStorageNodeSet[idxNodeSet] = true break } } } - for idxNodeSet := 1; idxNodeSet <= 4; idxNodeSet++ { + for idxNodeSet := 1; idxNodeSet <= storageNodeSetAmount; idxNodeSet++ { if !foundStorageNodeSet[idxNodeSet] { return false } @@ -107,14 +108,14 @@ var _ = Describe("StorageNodeSet controller medium tests", func() { ))).Should(Succeed()) foundStatefulSet := make(map[int]bool) for _, statefulSet := range storageStatefulSets.Items { - for idxNodeSet := 1; idxNodeSet <= 4; idxNodeSet++ { + for idxNodeSet := 1; idxNodeSet <= storageNodeSetAmount; idxNodeSet++ { if statefulSet.Name == testobjects.StorageName+"-"+testNodeSetName+"-"+strconv.Itoa(idxNodeSet) { foundStatefulSet[idxNodeSet] = true break } } } - for idxNodeSet := 1; idxNodeSet <= 4; idxNodeSet++ { + for idxNodeSet := 1; idxNodeSet <= storageNodeSetAmount; idxNodeSet++ { if !foundStatefulSet[idxNodeSet] { return false } diff --git a/samples/minikube/database.yaml b/samples/minikube/database.yaml index 0eab7436..af2c61f1 100644 --- a/samples/minikube/database.yaml +++ b/samples/minikube/database.yaml @@ -4,7 +4,7 @@ metadata: name: database-minikube-sample spec: image: - name: cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 + name: cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7 nodes: 1 domain: Root service: diff --git a/samples/minikube/storage.yaml b/samples/minikube/storage.yaml index 43d2db5a..0c407115 100644 --- a/samples/minikube/storage.yaml +++ b/samples/minikube/storage.yaml @@ -5,7 +5,7 @@ metadata: spec: dataStore: [] image: - name: cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 + name: cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7 nodes: 1 domain: Root erasure: none diff --git a/samples/storage-mirror-3dc.yaml b/samples/storage-mirror-3dc.yaml index 7b1d42d3..b4d0801a 100644 --- a/samples/storage-mirror-3dc.yaml +++ b/samples/storage-mirror-3dc.yaml @@ -11,7 +11,7 @@ spec: requests: storage: 80Gi image: - name: cr.yandex/crptqonuodf51kdj7a7d/ydb:23.3.17 + name: cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7 nodes: 9 erasure: mirror-3-dc configuration: |-