diff --git a/.github/workflows/k8s-ci.yml b/.github/workflows/k8s-ci.yml index 1dab033c1..e1cf21cd0 100644 --- a/.github/workflows/k8s-ci.yml +++ b/.github/workflows/k8s-ci.yml @@ -28,24 +28,11 @@ jobs: run: nix-shell ./scripts/helm/shell.nix --run "echo" - name: Pre-populate pytest nix-shell run: nix-shell ./scripts/python/shell.nix --run "echo" - - name: Build binaries and images - id: build - run: | - TAG=$(nix-shell ./scripts/helm/shell.nix --run './scripts/python/generate-test-tag.sh') - TEST_DIR=$(realpath $(mktemp -d ./test-dir-XXXXXX)) - nix-shell ./scripts/helm/shell.nix --run "./scripts/python/tag-chart.sh $TAG" - RUSTFLAGS="-C debuginfo=0 -C strip=debuginfo" ./scripts/release.sh --tag $TAG --build-binary-out $TEST_DIR --no-static-linking --skip-publish --debug - echo "tag=$TAG" >> $GITHUB_OUTPUT - echo "bin=$TEST_DIR" >> $GITHUB_OUTPUT - name: BootStrap k8s cluster - run: | - nix-shell ./scripts/k8s/shell.nix --run "./scripts/k8s/deployer.sh start --label" - - name: Load images to Kind cluster - run: nix-shell ./scripts/k8s/shell.nix --run "./scripts/k8s/load-images-to-kind.sh --tag ${{ steps.build.outputs.tag }} --trim-debug-suffix" + run: nix-shell ./scripts/k8s/shell.nix --run "./scripts/k8s/deployer.sh start --label" - name: Run Pytests run: | - export UPGRADE_TARGET_VERSION=${{ steps.build.outputs.tag }} - export TEST_DIR=${{ steps.build.outputs.bin }} + export REUSE_CLUSTER=1 nix-shell ./scripts/python/shell.nix --run "./scripts/python/test.sh" - name: The job has failed if: ${{ failure() }} diff --git a/.gitignore b/.gitignore index 6398c18f6..3ef6e280e 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,5 @@ __pycache__ # Pytest assets /test-dir-* tests/bdd/venv -pytest.log \ No newline at end of file +pytest.log +/tests/bdd/chart-vnext/ diff --git a/nix/pkgs/images/default.nix b/nix/pkgs/images/default.nix index f614faeeb..d4c0e6c8a 100644 --- a/nix/pkgs/images/default.nix +++ b/nix/pkgs/images/default.nix @@ -26,7 +26,7 @@ let } // config; }; build-exporter-image = { buildType }: { - io-engine = build-extensions-image rec{ + io-engine = build-extensions-image rec { inherit buildType; package = extensions.${buildType}.metrics.exporter.io-engine; pname = package.pname; @@ -54,10 +54,12 @@ let # todo: handle this properly? # Script doesn't need to be used with main branch `--alias-tag `. # The repo chart is already prepared. - if [[ "$(semver validate ${tag})" == "valid" ]] && - [[ ! ${tag} =~ ^(v?[0-9]+\.[0-9]+\.[0-9]+-0-(main|release)-unstable(-[0-9]+){6}-0)$ ]]; then - CHART_FILE=build/chart/Chart.yaml build/scripts/helm/publish-chart-yaml.sh --app-tag ${tag} --override-index "" - fi + # if [[ "$(semver validate ${tag})" == "valid" ]] && + # [[ ! ${tag} =~ ^(v?[0-9]+\.[0-9]+\.[0-9]+-0-(main|release)-unstable(-[0-9]+){6}-0)$ ]]; then + # CHART_FILE=build/chart/Chart.yaml build/scripts/helm/publish-chart-yaml.sh --app-tag ${tag} --override-index "" + # fi + # TODO: add chart source override or just reuse existing chart and git restore... + CHART_FILE=build/chart/Chart.yaml build/scripts/helm/publish-chart-yaml.sh --app-tag ${tag} --override-index "" # This modifies the build helm chart in-place with missing values of the # dependent charts, i.e. the values from the dependent helm charts which diff --git a/scripts/k8s/deployer.sh b/scripts/k8s/deployer.sh index a08bc423a..c97bbff2a 100755 --- a/scripts/k8s/deployer.sh +++ b/scripts/k8s/deployer.sh @@ -16,6 +16,7 @@ KUBECTL="kubectl" DOCKER="docker" HUGE_PAGES=1800 LABEL= +CLEANUP="false" SUDO=${SUDO:-"sudo"} help() { @@ -31,6 +32,7 @@ Options: --dry-run Don't do anything, just output steps. --hugepages Add 2MiB hugepages (Default: $HUGE_PAGES). --label Label worker nodes with the io-engine selector. + --cleanup Prior to starting, stops the running instance of the deployer. Command: start Start the k8s cluster. @@ -90,6 +92,9 @@ while [ "$#" -gt 0 ]; do --label) LABEL="true" shift;; + --cleanup) + CLEANUP="true" + shift;; --hugepages) shift test $# -lt 1 && die "Missing hugepage number" @@ -115,12 +120,14 @@ if [ -z "$COMMAND" ]; then die "No command specified!" fi -if [ "$COMMAND" = "stop" ]; then - if command -v nvme 2>dev/null; then +if [ "$COMMAND" = "stop" ] || [ "$CLEANUP" = "true" ]; then + if command -v nvme &>/dev/null; then $SUDO nvme disconnect-all fi $KIND delete cluster - exit 0 + if [ "$COMMAND" = "stop" ]; then + exit 0 + fi fi "$SCRIPT_DIR"/setup-io-prereq.sh --hugepages "$HUGE_PAGES" --nvme-tcp $DRY_RUN diff --git a/scripts/python/tag-chart.sh b/scripts/python/tag-chart.sh index 63dd74bff..4d158abf6 100755 --- a/scripts/python/tag-chart.sh +++ b/scripts/python/tag-chart.sh @@ -15,7 +15,7 @@ fi CHART_VERSION=${1#v} IMAGE_TAG="v$CHART_VERSION" -CHART_DIR="$ROOT_DIR/chart" +CHART_DIR=${CHART_DIR:-"$ROOT_DIR/chart"} # TODO: tests should work with its own copy of the chart. Shouldn't modify the chart. # chart/Chart.yaml yq_ibl " diff --git a/scripts/python/upgrade-test-helper.sh b/scripts/python/upgrade-test-helper.sh new file mode 100755 index 000000000..f93ec764a --- /dev/null +++ b/scripts/python/upgrade-test-helper.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash + +# "Fork" the local chart into a separate location and build the container images and the plugin binary +# with a given TAG (or figures out the tag using $SCRIPT_DIR/build-upgrade-images.sh). +# This is used by the upgrade test + +SCRIPT_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]:-"$0"}")")" +ROOT_DIR="$SCRIPT_DIR/../.." + +# Imports +source "$ROOT_DIR"/scripts/utils/log.sh + +set -euo pipefail + +TAG= +CHART_VNEXT= +CHART="$ROOT_DIR/chart" +CHART_FORK="false" +IMAGE_BUILD="false" +IMAGE_LOAD="false" + +# Print usage options for this script. +print_help() { + cat < The tag for the vnext chart. + Default: Automatically figured out. + --chart The path where the chart will be copied to and modified for the vnext tag. + Default: \$workspace_root/tests/python/chart_vnext. + --fork Forks the vnext chart from "$CHART" into CHART_VNEXT. + --build Builds the container images and the plugin binary. + --load Loads the images into the kind cluster. + +Examples: + $(basename "${0}") --fork + +The kubectl-mayastor binary will be built at CHART_VNEXT/kubectl-plugin/bin/kubectl-mayastor +EOF +} + +# Parse args. +while test $# -gt 0; do + arg="$1" + case "$arg" in + --tag) + TAG="$1" + ;; + --chart) + CHART_VNEXT="$1" + ;; + --fork) + CHART_FORK="true" + ;; + --build) + IMAGE_BUILD="true" + ;; + --load) + IMAGE_LOAD="true" + ;; + -h* | --help*) + print_help + exit 0 + ;; + *) + print_help + log_fatal "unexpected argument '$arg'" 1 + ;; + esac + shift +done + +if [ "$(kubectl config current-context)" != "kind-kind" ]; then + log_fatal "Only Supported on Kind Clusters!" +fi + +if [ -z "$TAG" ]; then + TAG="$("$SCRIPT_DIR"/generate-test-tag.sh)" +fi + +if [ -z "$CHART_VNEXT" ]; then + CHART_VNEXT="$ROOT_DIR/tests/bdd/chart-vnext" +fi +KUBECTL_MAYASTOR="$CHART_VNEXT/kubectl-plugin/bin/kubectl-mayastor" + +# Ensure the chart vnext is created, copied from the original +if [ "$CHART_FORK" = "true" ]; then + mkdir -p "$CHART_VNEXT" + rm -r "${CHART_VNEXT:?}"/* + cp -r "$CHART/." "${CHART_VNEXT:?}" + + # Tag the vnext chart + CHART_DIR="$CHART_VNEXT" "$SCRIPT_DIR"/tag-chart.sh "$TAG" +fi + +# Build the vnext images and kubectl-binary (in debug mode) +if [ "$IMAGE_BUILD" = "true" ]; then + RUSTFLAGS="-C debuginfo=0 -C strip=debuginfo" "$ROOT_DIR"/scripts/release.sh --tag "$TAG" --build-binary-out "$CHART_VNEXT" --no-static-linking --skip-publish --debug + + # Ensure binary is on the correct version + PLUGIN_VERSION="$($KUBECTL_MAYASTOR --version)" + if [[ ! "$PLUGIN_VERSION" =~ ^Kubectl\ Plugin\ \(kubectl-mayastor\).*\($TAG\+0\)$ ]]; then + log_fatal "The built kubectl-plugin reports version $PLUGIN_VERSION but we want $TAG" + fi +fi + +# Load the images into the kind cluster +if [ "$IMAGE_LOAD" = "true" ]; then + "$ROOT_DIR"/scripts/k8s/load-images-to-kind.sh --tag "$TAG" --trim-debug-suffix +fi diff --git a/tests/bdd/common/__init__.py b/tests/bdd/common/__init__.py new file mode 100644 index 000000000..0279e1598 --- /dev/null +++ b/tests/bdd/common/__init__.py @@ -0,0 +1,62 @@ +import logging +import os +import subprocess + +logger = logging.getLogger(__name__) + + +def root_dir(): + file_path = os.path.abspath(__file__) + return file_path.split("tests/bdd")[0] + + +def chart_vnext(): + vnext = os.getenv("CHART_VNEXT") + if vnext is not None: + return vnext + return os.path.join(root_dir(), "./tests/bdd/chart-vnext") + + +def run( + command: str, + args: list[str] = None, + absolute=False, + capture_output=True, + log_run=True, + **kwargs, +): + if absolute: + command = [command] + else: + command = [os.path.join(root_dir(), command)] + if args is not None: + command.extend(args) + if log_run: + logger.info(f"Running '{command}'") + else: + logger.debug(f"Running '{command}'") + try: + result = subprocess.run( + command, capture_output=capture_output, check=True, text=True, **kwargs + ) + logger.debug( + f"Command '{command}' completed with:\nStdErr Output: {result.stderr}\nStdOut Output: {result.stdout}" + ) + return result.stdout.strip() + + except subprocess.CalledProcessError as e: + logger.error( + f"Command '{command}' failed with exit code {e.returncode}\nStdErr Output: {e.stderr}\nStdOut Output: {e.stdout}" + ) + raise e + + except Exception as e: + logger.error(f"An unexpected error occurred: {e}") + raise e + + +def env_cleanup(): + clean = os.getenv("CLEAN") + if clean is not None and clean.lower() in ("no", "false", "f", "0"): + return False + return True diff --git a/tests/bdd/common/helm.py b/tests/bdd/common/helm.py index 1a3cd1209..6f1afce76 100644 --- a/tests/bdd/common/helm.py +++ b/tests/bdd/common/helm.py @@ -5,8 +5,9 @@ from enum import Enum from shutil import which +import common +from common import root_dir, run from common.environment import get_env -from common.repo import root_dir, run_script logger = logging.getLogger(__name__) @@ -14,25 +15,10 @@ def repo_ls(): - try: - result = subprocess.run( - [helm_bin, "repo", "ls", "-o", "json"], - capture_output=True, - check=True, - text=True, - ) - return json.loads(result.stdout.strip()) - - except subprocess.CalledProcessError as e: - logger.error( - f"Error: command 'helm repo ls -o json' failed with exit code {e.returncode}" - ) - logger.error(f"Error Output: {e.stderr}") - return None - - except Exception as e: - logger.error(f"An unexpected error occurred: {e}") - return None + result = common.run( + helm_bin, ["repo", "ls", "-o", "json"], + ) + return json.loads(result) def repo_add_mayastor(): @@ -42,43 +28,29 @@ def repo_add_mayastor(): if r["url"] == "https://openebs.github.io/mayastor-extensions": return r["name"] - try: - repo_name = "mayastor" - subprocess.run( - [ - helm_bin, - "repo", - "add", - repo_name, - "https://openebs.github.io/mayastor-extensions", - ], - capture_output=True, - check=True, - text=True, - ) - - subprocess.run( - [ - helm_bin, - "repo", - "update", - ], - capture_output=True, - check=True, - text=True, - ) - return repo_name - - except subprocess.CalledProcessError as e: - logger.error( - f"Error: command 'helm repo add mayastor https://openebs.github.io/mayastor-extensions' failed with exit code {e.returncode}" - ) - logger.error(f"Error Output: {e.stderr}") - return None + repo_name = "mayastor" + common.run(helm_bin, [ + "repo", + "add", + repo_name, + "https://openebs.github.io/mayastor-extensions", + ]) - except Exception as e: - logger.error(f"An unexpected error occurred: {e}") - return None + subprocess.run( + [ + helm_bin, + "repo", + "update", + ], + capture_output=True, + check=True, + text=True, + ) + common.run(helm_bin, [ + "repo", + "update", + ]) + return repo_name def latest_chart_so_far(version=None): @@ -90,38 +62,21 @@ def latest_chart_so_far(version=None): version = v repo_name = repo_add_mayastor() - assert repo_name is not None - helm_search_command = [ + stdout = common.run( helm_bin, - "search", - "repo", - repo_name + "/mayastor", - "--version", - "<" + version, - "-o", - "json", - ] - try: - result = subprocess.run( - helm_search_command, - capture_output=True, - check=True, - text=True, - ) - result_chart_info = json.loads(result.stdout.strip()) - return result_chart_info[0]["version"] - - except subprocess.CalledProcessError as e: - logger.error( - f"Error: command {helm_search_command} failed with exit code {e.returncode}" - ) - logger.error(f"Error Output: {e.stderr}") - return None - - except Exception as e: - logger.error(f"An unexpected error occurred: {e}") - return None + [ + "search", + "repo", + repo_name + "/mayastor", + "--version", + "<" + version, + "-o", + "json", + ] + ) + result_chart_info = json.loads(stdout) + return result_chart_info[0]["version"] class ChartSource(Enum): @@ -152,8 +107,7 @@ def __init__(self): self.namespace = "mayastor" def get_metadata_mayastor(self): - command = [ - helm_bin, + args = [ "get", "metadata", "mayastor", @@ -162,25 +116,7 @@ def get_metadata_mayastor(self): "-o", "json", ] - try: - result = subprocess.run( - command, - capture_output=True, - check=True, - text=True, - ) - return json.loads(result.stdout.strip()) - - except subprocess.CalledProcessError as e: - logger.error( - f"Error: command '{command}' failed with exit code {e.returncode}" - ) - logger.error(f"Error Output: {e.stderr}") - raise e - - except Exception as e: - logger.error(f"An unexpected error occurred: {e}") - raise e + return json.loads(common.run(helm_bin, args)) def get_deployed(self, release: str): """ @@ -192,7 +128,6 @@ def get_deployed(self, release: str): str: A newline-separated string of deployed release names, or None if an error occurs. """ args = [ - helm_bin, "ls", "-n", self.namespace, @@ -200,25 +135,7 @@ def get_deployed(self, release: str): f"--filter=^{release}$", "-o=json", ] - try: - result = subprocess.run( - args, - capture_output=True, - check=True, - text=True, - ) - return result.stdout.strip() - - except subprocess.CalledProcessError as e: - logger.error( - f"command '{args}' failed with exit code {e.returncode}" - ) - logger.error(f"Error Output: {e.stderr}") - raise e - - except Exception as e: - logger.error(f"An unexpected error occurred: {e}") - raise e + return common.run(helm_bin, args) def install_mayastor(self, source: ChartSource, version=None): output_json = json.loads(self.get_deployed("mayastor")) @@ -227,7 +144,9 @@ def install_mayastor(self, source: ChartSource, version=None): logger.warning( f"Helm release 'mayastor' already exists in the 'mayastor' namespace @ v{current_version}." ) - assert current_version == version, f"Wanted to install {version}, but {current_version} already installed" + assert ( + current_version == version + ), f"Wanted to install {version}, but {current_version} already installed" return install_command = [] @@ -266,4 +185,4 @@ def install_mayastor(self, source: ChartSource, version=None): def generate_test_tag(): - return run_script("scripts/python/generate-test-tag.sh") + return run("scripts/python/generate-test-tag.sh") diff --git a/tests/bdd/common/k8s_deployer.py b/tests/bdd/common/k8s_deployer.py new file mode 100644 index 000000000..d4bdac7da --- /dev/null +++ b/tests/bdd/common/k8s_deployer.py @@ -0,0 +1,48 @@ +import logging +import os + +import common +from common import run + +logger = logging.getLogger(__name__) + + +def deployer(): + return "./scripts/k8s/deployer.sh" + + +def start(workers: int): + if carry_on(): + try: + common.run( + "helm", + [ + "uninstall", + "mayastor", + "-n=mayastor", + "--ignore-not-found", + "--wait", + ], + absolute=True, + ) + common.run( + "kubectl", ["delete", "jobs", "-n=mayastor", "--all"], absolute=True + ) + return + except: + pass + + run(deployer(), ["start", "--label", "--cleanup", f"--workers={workers}"]) + + +def stop(): + if common.env_cleanup(): + run(deployer(), ["stop"]) + + +def carry_on(): + clean = os.getenv("REUSE_CLUSTER") + if clean is not None and clean.lower() in ("yes", "true", "y", "1"): + cluster = common.run("kind", ["get", "clusters"], absolute=True, log_run=False) + return cluster == "kind" + return False diff --git a/tests/bdd/common/kubectl_mayastor.py b/tests/bdd/common/kubectl_mayastor.py index 7caad8ce5..ad655ef03 100644 --- a/tests/bdd/common/kubectl_mayastor.py +++ b/tests/bdd/common/kubectl_mayastor.py @@ -1,45 +1,19 @@ import logging import os -import subprocess -from shutil import which -from common.environment import get_env +import common logger = logging.getLogger(__name__) -def get_bin_path(): - bins = get_env("TEST_DIR") - if bins: - return os.path.join(bins, "kubectl-plugin/bin/kubectl-mayastor") - bin = which("kubectl-mayastor") - if bin is None: - msg = f"Failed to find kubectl-mayastor binary" - logging.error(msg) - raise Exception(msg) - return bin - - -def kubectl_mayastor(args: list[str]): - command = [get_bin_path()] - command.extend(args) - logger.info(f"Running kubectl-mayastor: {command}") - - try: - result = subprocess.run( - command, - capture_output=True, - check=True, - text=True, - ) - logger.debug(f"Error Output: {result.stderr}\nOut Output: {result.stdout}") - return result.stdout.strip() - - except subprocess.CalledProcessError as e: - logger.error( - f"Error: command '{command}' failed with exit code {e.returncode}\nError Output: {e.stderr}\nOut Output: {e.stdout}") - raise e - - except Exception as e: - logger.error(f"An unexpected error occurred whilst running kubectl-mayastor: {e}") - raise e +def plugin_vnext(): + chart_vnext = common.chart_vnext() + return os.path.join(chart_vnext, "kubectl-plugin/bin/kubectl-mayastor") + + +def upgrade_vnext(): + run(["upgrade"], log_run=True) + + +def run(args: list[str], log_run=False): + return common.run(plugin_vnext(), args, log_run=log_run) diff --git a/tests/bdd/features/test_upgrade.py b/tests/bdd/features/test_upgrade.py index e65d948ab..8799e3cb7 100644 --- a/tests/bdd/features/test_upgrade.py +++ b/tests/bdd/features/test_upgrade.py @@ -1,12 +1,14 @@ """Upgrade feature tests.""" + import json import logging +import common import pytest - +from common import k8s_deployer from common.environment import get_env from common.helm import ChartSource, HelmReleaseClient, latest_chart_so_far -from common.kubectl_mayastor import kubectl_mayastor +from common.kubectl_mayastor import upgrade_vnext from common.repo import run_script from kubernetes import client, config from pytest_bdd import given, scenario, then, when @@ -22,26 +24,56 @@ def test_upgrade_to_vnext(): """Upgrading to the local chart as v-next.""" +@given("a 2-worker node kind kubernetes cluster") +def _(): + """a 2-worker node kind kubernetes cluster.""" + k8s_deployer.start(workers=2) + yield + k8s_deployer.stop() + + @given("the latest mayastor helm chart is installed") def the_latest_mayastor_is_installed(latest_chart_version): """the latest mayastor helm chart is installed.""" helm.install_mayastor(ChartSource.HOSTED, latest_chart_version) -@when("a kubectl mayastor upgrade command is issued") -def a_kubectl_mayastor_upgrade_command_is_issued(): - """a kubectl mayastor upgrade command is issued.""" - kubectl_mayastor(["upgrade"]) - - -@then("all io-engine nodes shall be listed by kubectl-mayastor") +@given("all io-engine nodes shall be listed by kubectl-mayastor") def all_io_engine_nodes_shall_be_listed(latest_chart_version): """all io-engine nodes shall be listed by kubectl-mayastor.""" wait_rest_nodes_version(latest_chart_version) -@then("eventually the installed chart should be upgraded to the kubectl mayastor plugin's version") -def eventually_the_installed_chart_should_be_upgraded_to_the_kubectl_mayastor_plugins_version(latest_chart_version): +@given("a v-next chart is prepared") +def _(): + """a v-next chart is prepared.""" + common.run("./scripts/python/upgrade-test-helper.sh", ["--fork"]) + + +@given("the images and plugin are built for v-next") +def _(): + """the images and plugin are built for v-next.""" + common.run("./scripts/python/upgrade-test-helper.sh", ["--build"]) + + +@given("the images are loadable from the cluster") +def _(): + """the images are loadable from the cluster.""" + common.run("./scripts/python/upgrade-test-helper.sh", ["--load"]) + + +@when("a kubectl mayastor upgrade command is issued") +def a_kubectl_mayastor_upgrade_command_is_issued(): + """a kubectl mayastor upgrade command is issued.""" + upgrade_vnext() + + +@then( + "eventually the installed chart should be upgraded to the kubectl mayastor plugin's version" +) +def eventually_the_installed_chart_should_be_upgraded_to_the_kubectl_mayastor_plugins_version( + latest_chart_version, +): """the installed chart should be upgraded to the kubectl mayastor plugin's version.""" upgrade_target_version = get_env("UPGRADE_TARGET_VERSION") @@ -87,8 +119,10 @@ def data_plane_upgrade_succeeded(not_target_tag): ) io_engines = list( filter( - lambda pod: any(container.name == 'io-engine' for container in pod.spec.containers), - pods.items + lambda pod: any( + container.name == "io-engine" for container in pod.spec.containers + ), + pods.items, ) ) if len(io_engines) == 0: @@ -137,22 +171,34 @@ def latest_chart_version(): ) def wait_rest_nodes_version(version, match=True): config.load_kube_config() - nodes = client.CoreV1Api().list_node( - label_selector="openebs.io/engine=mayastor" - ) + nodes = client.CoreV1Api().list_node(label_selector="openebs.io/engine=mayastor") k8s_nodes = len(nodes.items) - rest_nodes = json.loads(kubectl_mayastor(["get", "nodes", "-o=json"])) + rest_nodes = json.loads( + common.kubectl_mayastor.run(["get", "nodes", "-o=json"], log_run=True) + ) rest_io_engines = len(rest_nodes) - assert k8s_nodes == rest_io_engines, f"Found {k8s_nodes} k8s nodes with the io-engine label, but only {rest_io_engines} nodes from kubectl-mayastor" + assert ( + k8s_nodes == rest_io_engines + ), f"Found {k8s_nodes} k8s nodes with the io-engine label, but only {rest_io_engines} nodes from kubectl-mayastor" - assert all(node["spec"]["version"] == node["state"]["version"] for node in rest_nodes) + assert all( + node["spec"]["version"] == node["state"]["version"] for node in rest_nodes + ) version_stripped = version.strip("v") if match: - all_on_version = all(node["spec"]["version"].strip("v") == version_stripped for node in rest_nodes) + all_on_version = all( + node["spec"]["version"].strip("v") == version_stripped + for node in rest_nodes + ) assert all_on_version, f"Not all nodes on the version v{version_stripped}" else: - all_not_on_version = all(node["spec"]["version"].strip("v") != version_stripped for node in rest_nodes) - assert all_not_on_version, f"Some of the nodes are still on the version v{version_stripped}" + all_not_on_version = all( + node["spec"]["version"].strip("v") != version_stripped + for node in rest_nodes + ) + assert ( + all_not_on_version + ), f"Some of the nodes are still on the version v{version_stripped}" diff --git a/tests/bdd/features/upgrade.feature b/tests/bdd/features/upgrade.feature index 3ec41f8b7..42945847e 100644 --- a/tests/bdd/features/upgrade.feature +++ b/tests/bdd/features/upgrade.feature @@ -1,8 +1,12 @@ Feature: Upgrade Background: - Given the latest mayastor helm chart is installed - Then all io-engine nodes shall be listed by kubectl-mayastor + Given a 2-worker node kind kubernetes cluster + And a v-next chart is prepared + And the images and plugin are built for v-next + And the images are loadable from the cluster + And the latest mayastor helm chart is installed + And all io-engine nodes shall be listed by kubectl-mayastor Scenario: Upgrading to the local chart as v-next When a kubectl mayastor upgrade command is issued