From d2ec209dc1b081e513447fad615371a6e9f274a2 Mon Sep 17 00:00:00 2001 From: Tuomo Tanskanen Date: Thu, 12 Dec 2024 14:24:00 +0200 Subject: [PATCH] cleanup and reformat bash syntax in 04 script Cleanup and reforamt bash syntax in 04 script. Also requires fixes to lib/common.sh, but to keep PR contained, I rather took out the utility functions and put them in new lib/utils.sh. Also, fixed some leftover issues I missed first time around in 01, 02 and 03 scripts for consistency. Signed-off-by: Tuomo Tanskanen --- 01_prepare_host.sh | 12 +- 02_configure_host.sh | 50 +-- 03_launch_mgmt_cluster.sh | 46 +-- 04_verify.sh | 421 ++++++++++++------------- cluster_cleanup.sh | 4 +- config_example.sh | 4 +- host_cleanup.sh | 13 +- lib/common.sh | 155 +-------- lib/utils.sh | 139 ++++++++ openstackclient.sh | 2 +- tests/test.sh | 27 +- ubuntu_bridge_network_configuration.sh | 8 +- vbmc.sh | 4 +- 13 files changed, 431 insertions(+), 454 deletions(-) create mode 100644 lib/utils.sh diff --git a/01_prepare_host.sh b/01_prepare_host.sh index ac3bf1fb4..bf34b95ae 100755 --- a/01_prepare_host.sh +++ b/01_prepare_host.sh @@ -3,9 +3,9 @@ set -eux # shellcheck disable=SC1091 -source lib/logging.sh +. lib/logging.sh # shellcheck disable=SC1091 -source lib/common.sh +. lib/common.sh if [[ "${EUID}" -eq 0 ]]; then echo "Please run 'make' as a non-root user" @@ -28,7 +28,7 @@ if [[ "${OS}" = "ubuntu" ]]; then ubuntu24) sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 ;; *) ;; - esac + esac elif [[ "${OS}" = "centos" ]] || [[ "${OS}" = "rhel" ]]; then sudo dnf upgrade -y --nobest @@ -53,14 +53,14 @@ fi # NOTE(tuminoid) lib/releases.sh must be after the jq and python installation # TODO: fix all of the lib/ scripts not to actually run code, but only define functions # shellcheck disable=SC1091 -source lib/releases.sh +. lib/releases.sh # shellcheck disable=SC1091 -source lib/download.sh +. lib/download.sh # NOTE(fmuyassarov) Make sure to source before runnig install-package-playbook.yml # because there are some vars exported in network.sh and used by # install-package-playbook.yml. # shellcheck disable=SC1091 -source lib/network.sh +. lib/network.sh # NOTE(dtantsur): system-site-packages is required because of certain Python # packages that cannot be pip-installed (firewalld, selinux, etc). diff --git a/02_configure_host.sh b/02_configure_host.sh index 64156c1bf..9f6da44e4 100755 --- a/02_configure_host.sh +++ b/02_configure_host.sh @@ -1,17 +1,19 @@ #!/usr/bin/env bash + set -eux # shellcheck disable=SC1091 -source lib/logging.sh +. lib/logging.sh +# shellcheck disable=SC1091 +. lib/common.sh # shellcheck disable=SC1091 -source lib/common.sh +. lib/network.sh # shellcheck disable=SC1091 -source lib/network.sh +. lib/releases.sh # shellcheck disable=SC1091 -source lib/releases.sh -# pre-pull node and container images +. lib/image_prepull.sh # shellcheck disable=SC1091 -source lib/image_prepull.sh +. lib/utils.sh # cleanup ci config file if it exists from earlier run rm -f "${CI_CONFIG_FILE}" @@ -30,7 +32,7 @@ elif grep -q svm /proc/cpuinfo; then fi # Clean, copy and extract local IPA -if [[ "${USE_LOCAL_IPA}" == "true" ]]; then +if [[ "${USE_LOCAL_IPA}" = "true" ]]; then sudo rm -f "${IRONIC_DATA_DIR}/html/images/ironic-python-agent*" sudo cp "${LOCAL_IPA_PATH}/ironic-python-agent.tar" "${IRONIC_DATA_DIR}/html/images" sudo tar --extract --file "${IRONIC_DATA_DIR}/html/images/ironic-python-agent.tar" \ @@ -94,7 +96,7 @@ init_minikube() fi } -if [[ "${EPHEMERAL_CLUSTER}" == "minikube" ]]; then +if [[ "${EPHEMERAL_CLUSTER}" = "minikube" ]]; then init_minikube fi @@ -137,19 +139,19 @@ EOF sudo virsh pool-autostart default fi -if [[ "${OS}" == "ubuntu" ]]; then +if [[ "${OS}" = "ubuntu" ]]; then # source ubuntu_bridge_network_configuration.sh # shellcheck disable=SC1091 source ubuntu_bridge_network_configuration.sh # shellcheck disable=SC1091 source disable_apparmor_driver_libvirtd.sh else - if [[ "${MANAGE_PRO_BRIDGE}" == "y" ]]; then + if [[ "${MANAGE_PRO_BRIDGE}" = "y" ]]; then # Adding an IP address in the libvirt definition for this network results in # dnsmasq being run, we don't want that as we have our own dnsmasq, so set # the IP address here if [[ ! -e /etc/NetworkManager/system-connections/provisioning.nmconnection ]]; then - if [[ "${BARE_METAL_PROVISIONER_SUBNET_IPV6_ONLY}" == "true" ]]; then + if [[ "${BARE_METAL_PROVISIONER_SUBNET_IPV6_ONLY}" = "true" ]]; then sudo tee -a /etc/NetworkManager/system-connections/provisioning.nmconnection <> "${WORKING_DIR}/bmhosts_crs.yaml" - i=$((i+1)) + i=$((i + 1)) done } @@ -412,7 +414,9 @@ apply_bm_hosts() list_nodes | make_bm_hosts if [[ -n "$(list_nodes)" ]]; then echo "bmhosts_crs.yaml is applying" - while ! kubectl apply -f "${WORKING_DIR}/bmhosts_crs.yaml" -n "${namespace}" &>/dev/null; do + # this is most useless loop ever - if it partially fails, it'll never be + # able to recover anyways... but keeping it for some edge cases where it may help + while ! kubectl apply -f "${WORKING_DIR}/bmhosts_crs.yaml" -n "${namespace}" >/dev/null; do sleep 3 done echo "bmhosts_crs.yaml is successfully applied" @@ -687,23 +691,24 @@ build_ipxe_firmware() if [[ "${IPXE_ENABLE_TLS}" = "true" ]]; then export IPXE_ENABLE_TLS_CENV_ARG="IPXE_ENABLE_TLS=true" - certs_mounts+=("-v ${IPXE_CACERT_FILE}:/certs/ca/ipxe/tls.crt") - certs_mounts+=("-v ${IPXE_CERT_FILE}:/certs/ipxe/tls.crt") - certs_mounts+=("-v ${IPXE_KEY_FILE}:/certs/ipxe/tls.key ") + certs_mounts+=(-v "${IPXE_CACERT_FILE}:/certs/ca/ipxe/tls.crt") + certs_mounts+=(-v "${IPXE_CERT_FILE}:/certs/ipxe/tls.crt") + certs_mounts+=(-v "${IPXE_KEY_FILE}:/certs/ipxe/tls.key") fi if [[ "${IPXE_ENABLE_IPV6}" = "true" ]]; then export IPXE_ENABLE_IPV6_CENV_ARG="IPXE_ENABLE_IPV6=true" fi - # shellcheck disable=SC2086,SC2068 + # shellcheck disable=SC2086 sudo "${CONTAINER_RUNTIME}" run \ --net host \ - --name ipxe-builder ${POD_NAME} \ + --name ipxe-builder \ + ${POD_NAME} \ -e "${IPXE_ENABLE_TLS_CENV_ARG}" \ -e "${IPXE_ENABLE_IPV6_CENV_ARG}" \ -e "IRONIC_IP=${IRONIC_HOST_IP}" \ - ${certs_mounts[@]} \ + "${certs_mounts[@]}" \ -v "${IRONIC_DATA_DIR}":/shared \ "${ipxe_builder_image}" } @@ -731,8 +736,8 @@ patch_clusterctl launch_cluster_api_provider_metal3 BMO_NAME_PREFIX="${NAMEPREFIX}" launch_baremetal_operator -launch_ironic_standalone_operator if [[ "${USE_IRSO}" = true ]]; then + launch_ironic_standalone_operator launch_ironic_via_irso else launch_ironic @@ -757,7 +762,6 @@ if [[ "${SKIP_APPLY_BMH:-false}" = "true" ]]; then list_nodes | make_bm_hosts popd else - # this is coming from lib/common.sh # shellcheck disable=SC2153 apply_bm_hosts "${NAMESPACE}" fi diff --git a/04_verify.sh b/04_verify.sh index e0ffebd5e..2f4f6b870 100755 --- a/04_verify.sh +++ b/04_verify.sh @@ -4,308 +4,285 @@ # code called via iterate() to false trigger SC2317 # shellcheck disable=SC2317 +# do not set -e, we want to process all failures, not just one set -u +export FAILS=0 + +# shellcheck disable=SC1091 +. lib/logging.sh # shellcheck disable=SC1091 -source lib/logging.sh +. lib/common.sh # shellcheck disable=SC1091 -source lib/common.sh +. lib/utils.sh # shellcheck disable=SC1091 -source lib/network.sh +. lib/network.sh # shellcheck disable=SC1091 -source lib/images.sh +. lib/images.sh + +BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}" +CAPM3_RUN_LOCAL="${CAPM3_RUN_LOCAL:-false}" -if [ "${EPHEMERAL_CLUSTER}" == "tilt" ]; then - exit 0 +KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/config}" +declare -a EXPTD_V1ALPHAX_V1BETAX_CRDS=( + clusters.cluster.x-k8s.io + kubeadmconfigs.bootstrap.cluster.x-k8s.io + kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io + machinedeployments.cluster.x-k8s.io + machines.cluster.x-k8s.io + machinesets.cluster.x-k8s.io + baremetalhosts.metal3.io +) +declare -a EXPTD_DEPLOYMENTS=( + capm3-system:capm3-controller-manager + capi-system:capi-controller-manager + capi-kubeadm-bootstrap-system:capi-kubeadm-bootstrap-controller-manager + capi-kubeadm-control-plane-system:capi-kubeadm-control-plane-controller-manager + baremetal-operator-system:baremetal-operator-controller-manager +) +declare -a EXPTD_RS=( + cluster.x-k8s.io/provider:infrastructure-metal3:capm3-system:2 + cluster.x-k8s.io/provider:cluster-api:capi-system:1 + cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-kubeadm-bootstrap-system:1 + cluster.x-k8s.io/provider:control-plane-kubeadm:capi-kubeadm-control-plane-system:1 +) +declare -a BRIDGES=( + provisioning + external +) +declare -a EXPTD_CONTAINERS=( + httpd-infra + registry + vbmc + sushy-tools +) + +if [[ "${EPHEMERAL_CLUSTER}" = "tilt" ]]; then + exit 0 fi -check_bm_hosts() { - local FAILS_CHECK="${FAILS}" - local NAME ADDRESS USER PASSWORD MAC CRED_NAME CRED_SECRET - local BARE_METAL_HOSTS BARE_METAL_HOST BARE_METAL_VMS BARE_METAL_VMNAME BARE_METAL_VM_IFACES - NAME="${1}" - ADDRESS="${2}" - USER="${3}" - PASSWORD="${4}" - MAC="${5}" - BARE_METAL_HOSTS="$(kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts\ - -n metal3 -o json)" - BARE_METAL_VMS="$(sudo virsh list --all)" - BARE_METAL_VMNAME="${NAME//-/_}" +check_bm_hosts() +{ + local name="$1" + local address="$2" + local user="$3" + local password="$4" + local mac="$5" + + local cred_name cred_secret + local bare_metal_hosts bare_metal_host bare_metal_vms bare_metal_vmname bare_metal_vm_ifaces + + bare_metal_hosts="$(kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts \ + -n metal3 -o json)" + bare_metal_vms="$(sudo virsh list --all)" + bare_metal_vmname="${name//-/_}" # Skip BMH verification if not applied if [[ "${SKIP_APPLY_BMH:-false}" != "true" ]]; then - # Verify BM host exists - RESULT_STR="${NAME} Baremetalhost exist" - echo "${BARE_METAL_HOSTS}" | grep -w "${NAME}" > /dev/null - process_status $? - - BARE_METAL_HOST="$(echo "${BARE_METAL_HOSTS}" | \ - jq ' .items[] | select(.metadata.name=="'"${NAME}"'" )')" - - # Verify addresses of the host - RESULT_STR="${NAME} Baremetalhost address correct" - equals "$(echo "${BARE_METAL_HOST}" | jq -r '.spec.bmc.address')" "${ADDRESS}" - - RESULT_STR="${NAME} Baremetalhost mac address correct" - equals "$(echo "${BARE_METAL_HOST}" | jq -r '.spec.bootMACAddress')" \ - "${MAC}" - - # Verify BM host status - RESULT_STR="${NAME} Baremetalhost status OK" - equals "$(echo "${BARE_METAL_HOST}" | jq -r '.status.operationalStatus')" \ - "OK" - - # Verify credentials exist - RESULT_STR="${NAME} Baremetalhost credentials secret exist" - CRED_NAME="$(echo "${BARE_METAL_HOST}" | jq -r '.spec.bmc.credentialsName')" - CRED_SECRET="$(kubectl get secret "${CRED_NAME}" -n metal3 -o json | \ - jq '.data')" - process_status $? - - # Verify credentials correct - RESULT_STR="${NAME} Baremetalhost password correct" - equals "$(echo "${CRED_SECRET}" | jq -r '.password' | \ - base64 --decode)" "${PASSWORD}" - - RESULT_STR="${NAME} Baremetalhost user correct" - equals "$(echo "${CRED_SECRET}" | jq -r '.username' | \ - base64 --decode)" "${USER}" + # Verify BM host exists + echo "${bare_metal_hosts}" | grep -w "${name}" > /dev/null + process_status $? "${name} Baremetalhost exist" + + bare_metal_host="$(echo "${bare_metal_hosts}" | \ + jq ' .items[] | select(.metadata.name=="'"${name}"'" )')" + + # Verify addresses of the host + equals "$(echo "${bare_metal_host}" | jq -r '.spec.bmc.address')" "${address}" \ + "${name} Baremetalhost address correct" + + equals "$(echo "${bare_metal_host}" | jq -r '.spec.bootMACAddress')" \ + "${mac}" "${name} Baremetalhost mac address correct" + + # Verify BM host status + equals "$(echo "${bare_metal_host}" | jq -r '.status.operationalStatus')" \ + "OK" "${name} Baremetalhost status OK" + + # Verify credentials exist + cred_name="$(echo "${bare_metal_host}" | jq -r '.spec.bmc.credentialsName')" + cred_secret="$(kubectl get secret "${cred_name}" -n metal3 -o json | \ + jq '.data')" + process_status $? "${name} Baremetalhost credentials secret exist" + + # Verify credentials correct + equals "$(echo "${cred_secret}" | jq -r '.password' | \ + base64 --decode)" "${password}" "${name} Baremetalhost password correct" + + equals "$(echo "${cred_secret}" | jq -r '.username' | \ + base64 --decode)" "${user}" "${name} Baremetalhost user correct" fi - + # Verify the VM was created - RESULT_STR="${NAME} Baremetalhost VM exist" - echo "${BARE_METAL_VMS} "| grep -w "${BARE_METAL_VMNAME}" > /dev/null - process_status $? + echo "${bare_metal_vms} "| grep -w "${bare_metal_vmname}" > /dev/null + process_status $? "${name} Baremetalhost VM exist" #Verify the VMs interfaces - BARE_METAL_VM_IFACES="$(sudo virsh domiflist "${BARE_METAL_VMNAME}")" - for bridge in ${BRIDGES}; do - RESULT_STR="${NAME} Baremetalhost VM interface ${bridge} exist" - echo "${BARE_METAL_VM_IFACES}" | grep -w "${bridge}" > /dev/null - process_status $? + bare_metal_vm_ifaces="$(sudo virsh domiflist "${bare_metal_vmname}")" + for bridge in "${BRIDGES[@]}"; do + echo "${bare_metal_vm_ifaces}" | grep -w "${bridge}" > /dev/null + process_status $? "${name} Baremetalhost VM interface ${bridge} exist" done # Skip introspection verification in no BMH applied if [[ "${SKIP_APPLY_BMH:-false}" != "true" ]]; then - #Verify the introspection completed successfully - RESULT_STR="${NAME} Baremetalhost introspecting completed" - is_in "$(echo "${BARE_METAL_HOST}" | jq -r '.status.provisioning.state')" \ - "ready available" + # Verify the introspection completed successfully + is_in "$(echo "${bare_metal_host}" | jq -r '.status.provisioning.state')" \ + "ready available" "${name} Baremetalhost introspecting completed" fi echo "" - - return "$((FAILS-FAILS_CHECK))" } - # Verify that a resource exists in a type -check_k8s_entity() { - local FAILS_CHECK="${FAILS}" - local ENTITY - local TYPE="${1}" - shift - for name in "${@}"; do - # Check entity exists - RESULT_STR="${TYPE} ${name} created" - NS="$(echo "${name}" | cut -d ':' -f1)" - NAME="$(echo "${name}" | cut -d ':' -f2)" - ENTITY="$(kubectl --kubeconfig "${KUBECONFIG}" get "${TYPE}" "${NAME}" \ - -n "${NS}" -o json)" - process_status $? - - # Check the replicabaremetalclusters - if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]] - then - RESULT_STR="${name} ${TYPE} replicas correct" - equals "$(echo "${ENTITY}" | jq -r '.status.readyReplicas')" \ - "$(echo "${ENTITY}" | jq -r '.status.replicas')" - fi - done - - return "$((FAILS-FAILS_CHECK))" -} - - -# Verify that a resource exists in a type -check_k8s_rs() { - local FAILS_CHECK="${FAILS}" - local ENTITY - for name in "${@}"; do - # Check entity exists - LABEL="$(echo "$name" | cut -f1 -d:)" - NAME="$(echo "$name" | cut -f2 -d:)" - NS="$(echo "${name}" | cut -d ':' -f3)" - NB="$(echo "${name}" | cut -d ':' -f4)" - ENTITIES="$(kubectl --kubeconfig "${KUBECONFIG}" get replicasets \ - -l "${LABEL}"="${NAME}" -n "${NS}" -o json)" - NB_ENTITIES="$(echo "$ENTITIES" | jq -r '.items | length')" - RESULT_STR="Replica sets with label ${LABEL}=${NAME} created" - equals "${NB_ENTITIES}" "${NB}" - - # Check the replicas - if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]] - then - for i in $(seq 0 $((NB_ENTITIES-1))); do - RESULT_STR="${NAME} replicas correct for replica set ${i}" - equals "$(echo "${ENTITIES}" | jq -r ".items[${i}].status.readyReplicas")" \ - "$(echo "${ENTITIES}" | jq -r ".items[${i}].status.replicas")" - done - fi - done - - return "$((FAILS-FAILS_CHECK))" +check_k8s_entity() +{ + local type="$1" + local entity ns name + + shift + for item in "$@"; do + # Check entity exists + ns="$(echo "${item}" | cut -d ':' -f1)" + name="$(echo "${item}" | cut -d ':' -f2)" + entity="$(kubectl --kubeconfig "${KUBECONFIG}" get "${type}" "${name}" \ + -n "${ns}" -o json)" + process_status $? "${type} ${name} created" + + # Check the replicabaremetalclusters + if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]]; then + equals "$(echo "${entity}" | jq -r '.status.readyReplicas')" \ + "$(echo "${entity}" | jq -r '.status.replicas')" \ + "${name} ${type} replicas correct" + fi + done } - # Verify that a resource exists in a type -check_k8s_pods() { - local FAILS_CHECK="${FAILS}" - local ENTITY - local NS="${2:-metal3}" - for name in "${@}"; do - # Check entity exists - LABEL=$(echo "$name" | cut -f1 -d:); - NAME=$(echo "$name" | cut -f2 -d:); - - ENTITY="$(kubectl --kubeconfig "${KUBECONFIG}" get pods \ - -l "${LABEL}"="${NAME}" -n "${NS}" -o json | jq '.items[0]')" - RESULT_STR="Pod ${NAME} created" - differs "${ENTITY}" "null" - done - - return "$((FAILS-FAILS_CHECK))" +check_k8s_rs() +{ + local label name ns nb entities nb_entities + + for item in "$@"; do + # Check entity exists + label="$(echo "${item}" | cut -f1 -d:)" + name="$(echo "${item}" | cut -f2 -d:)" + ns="$(echo "${item}" | cut -f3 -d:)" + nb="$(echo "${item}" | cut -f4 -d:)" + entities="$(kubectl --kubeconfig "${KUBECONFIG}" get replicasets \ + -l "${label}"="${name}" -n "${ns}" -o json)" + nb_entities="$(echo "${entities}" | jq -r '.items | length')" + equals "${nb_entities}" "${nb}" "Replica sets with label ${label}=${name} created" + + # Check the replicas + if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]]; then + for i in $(seq 0 $((nb_entities-1))); do + equals "$(echo "${entities}" | jq -r ".items[${i}].status.readyReplicas")" \ + "$(echo "${entities}" | jq -r ".items[${i}].status.replicas")" \ + "${name} replicas correct for replica set ${i}" + done + fi + done } # Verify a container is running -check_container(){ - local NAME="$1" - RESULT_STR="Container ${NAME} running" - sudo "${CONTAINER_RUNTIME}" ps | grep -w "$NAME$" > /dev/null - process_status $? - return $? +check_container() +{ + local name="$1" + local return_status + + sudo "${CONTAINER_RUNTIME}" ps | grep -w "${name}$" > /dev/null + return_status="$?" + process_status "${return_status}" "Container ${name} running" + return "${return_status}" } -KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/config}" -EXPTD_V1ALPHAX_V1BETAX_CRDS="clusters.cluster.x-k8s.io \ - kubeadmconfigs.bootstrap.cluster.x-k8s.io \ - kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io \ - machinedeployments.cluster.x-k8s.io \ - machines.cluster.x-k8s.io \ - machinesets.cluster.x-k8s.io \ - baremetalhosts.metal3.io" -EXPTD_DEPLOYMENTS="capm3-system:capm3-controller-manager \ - capi-system:capi-controller-manager \ - capi-kubeadm-bootstrap-system:capi-kubeadm-bootstrap-controller-manager \ - capi-kubeadm-control-plane-system:capi-kubeadm-control-plane-controller-manager \ - baremetal-operator-system:baremetal-operator-controller-manager" -EXPTD_RS="cluster.x-k8s.io/provider:infrastructure-metal3:capm3-system:2 \ - cluster.x-k8s.io/provider:cluster-api:capi-system:1 \ - cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-kubeadm-bootstrap-system:1 \ - cluster.x-k8s.io/provider:control-plane-kubeadm:capi-kubeadm-control-plane-system:1" -BRIDGES="provisioning external" -EXPTD_CONTAINERS="httpd-infra registry vbmc sushy-tools" - -FAILS=0 -BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}" -CAPM3_RUN_LOCAL="${CAPM3_RUN_LOCAL:-false}" - +# +# start verifying stuff +# # Verify networking -for bridge in ${BRIDGES}; do - RESULT_STR="Network ${bridge} exists" - ip link show dev "${bridge}" > /dev/null - process_status $? "Network ${bridge} exists" +for bridge in "${BRIDGES[@]}"; do + ip link show dev "${bridge}" > /dev/null + process_status $? "Network ${bridge} exists" done # Verify Kubernetes cluster is reachable -RESULT_STR="Kubernetes cluster reachable" kubectl version > /dev/null -process_status $? +process_status $? "Kubernetes cluster reachable" echo "" # Verify that the CRDs exist -RESULT_STR="Fetch CRDs" CRDS="$(kubectl --kubeconfig "${KUBECONFIG}" get crds)" process_status $? "Fetch CRDs" -LIST_OF_CRDS=("${EXPTD_V1ALPHAX_V1BETAX_CRDS}") - # shellcheck disable=SC2068 -for name in ${LIST_OF_CRDS[@]}; do - RESULT_STR="CRD ${name} created" - echo "${CRDS}" | grep -w "${name}" > /dev/null - process_status $? +for name in "${EXPTD_V1ALPHAX_V1BETAX_CRDS[@]}"; do + echo "${CRDS}" | grep -w "${name}" >/dev/null + process_status $? "CRD ${name} created" done echo "" # Verify v1beta1 Operators, Deployments, Replicasets -iterate check_k8s_entity deployments "${EXPTD_DEPLOYMENTS}" -iterate check_k8s_rs "${EXPTD_RS}" +iterate check_k8s_entity deployments "${EXPTD_DEPLOYMENTS[@]}" +iterate check_k8s_rs "${EXPTD_RS[@]}" # Skip verification related to virsh when running with fakeIPA -if [[ "${NODES_PLATFORM}" == "fake" ]]; then - echo "Skipping virsh nodes verification on fake vm platform" - exit 0 +if [[ "${NODES_PLATFORM}" = "fake" ]]; then + echo "Skipping virsh nodes verification on fake vm platform" + exit 0 fi # Verify the baremetal hosts ## Fetch the BM CRs -RESULT_STR="Fetch Baremetalhosts" -kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts -n metal3 -o json \ - > /dev/null -process_status $? +kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts -n metal3 -o json >/dev/null +process_status $? "Fetch Baremetalhosts" ## Fetch the VMs -RESULT_STR="Fetch Baremetalhosts VMs" -sudo virsh list --all > /dev/null -process_status $? +sudo virsh list --all >/dev/null +process_status $? "Fetch Baremetalhosts VMs" echo "" ## Verify if [[ -n "$(list_nodes)" ]]; then - while read -r name address user password mac; do - iterate check_bm_hosts "${name}" "${address}" "${user}" \ - "${password}" "${mac}" - echo "" - done <<< "$(list_nodes)" + while read -r name address user password mac; do + iterate check_bm_hosts "${name}" "${address}" "${user}" \ + "${password}" "${mac}" + echo "" + done <<< "$(list_nodes)" fi # Verify that the operator are running locally -if [[ "${BMO_RUN_LOCAL}" == true ]]; then - RESULT_STR="Baremetal operator locally running" - pgrep "operator-sdk" > /dev/null 2> /dev/null - process_status $? +if [[ "${BMO_RUN_LOCAL}" = true ]]; then + pgrep "operator-sdk" > /dev/null 2> /dev/null + process_status $? "Baremetal operator locally running" fi -if [[ "${CAPM3_RUN_LOCAL}" == true ]]; then - # shellcheck disable=SC2034 - RESULT_STR="CAPI operator locally running" - pgrep -f "go run ./main.go" > /dev/null 2> /dev/null - process_status $? + +if [[ "${CAPM3_RUN_LOCAL}" = true ]]; then + # shellcheck disable=SC2034 + pgrep -f "go run ./main.go" > /dev/null 2> /dev/null + process_status $? "CAPI operator locally running" fi -if [[ "${BMO_RUN_LOCAL}" == true ]] || [[ "${CAPM3_RUN_LOCAL}" == true ]]; then - echo "" + +if [[ "${BMO_RUN_LOCAL}" = true ]] || [[ "${CAPM3_RUN_LOCAL}" = true ]]; then + echo "" fi -for container in ${EXPTD_CONTAINERS}; do - iterate check_container "$container" +for container in "${EXPTD_CONTAINERS[@]}"; do + iterate check_container "${container}" done - IRONIC_NODES_ENDPOINT="${IRONIC_URL}nodes" status="$(curl -sk -o /dev/null -I -w "%{http_code}" "${IRONIC_NODES_ENDPOINT}")" -if [[ $status == 200 ]]; then - echo "⚠️ ⚠️ ⚠️ WARNING: Ironic endpoint is exposed for unauthenticated users" +if [[ "${status}" -eq 200 ]]; then + echo "WARNING: Ironic endpoint is exposed for unauthenticated users" exit 1 -elif [[ $status == 401 ]]; then +elif [[ "${status}" -eq 401 ]]; then echo "OK - Ironic endpoint is secured" else - echo "FAIL- got $status from ${IRONIC_NODES_ENDPOINT}, expected 401" + echo "FAIL- got ${status} from ${IRONIC_NODES_ENDPOINT}, expected 401" exit 1 fi echo "" -echo -e "\nNumber of failures : $FAILS" +echo -e "\nNumber of failures: ${FAILS}" exit "${FAILS}" diff --git a/cluster_cleanup.sh b/cluster_cleanup.sh index 78f6077e0..70858375e 100755 --- a/cluster_cleanup.sh +++ b/cluster_cleanup.sh @@ -3,15 +3,17 @@ set -eux # shellcheck disable=SC1091 -source lib/common.sh +. lib/common.sh # Delete cluster if [[ "${EPHEMERAL_CLUSTER}" = "kind" ]] || [[ "${EPHEMERAL_CLUSTER}" = "tilt" ]]; then sudo su -l -c "kind delete cluster || true" "${USER}" + # Kill and remove the running ironic containers if [[ -x "${BMOPATH}/tools/remove_local_ironic.sh" ]]; then "${BMOPATH}"/tools/remove_local_ironic.sh fi + if [[ "${EPHEMERAL_CLUSTER}" = "tilt" ]]; then pushd "${CAPM3PATH}" pgrep tilt | xargs kill || true diff --git a/config_example.sh b/config_example.sh index 35b7cc904..2204f3ec6 100644 --- a/config_example.sh +++ b/config_example.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Choose whether the "external" libvirt network will use IPv4, IPv6, or IPv4+IPv6. @@ -207,7 +207,7 @@ # Uncomment the line below to build ironic-image from source # export IRONIC_FROM_SOURCE="true" -# Skip applying BMHs +# Skip applying BMHs # export SKIP_APPLY_BMH="true" # To enable FakeIPA and run dev-env on a fake platform diff --git a/host_cleanup.sh b/host_cleanup.sh index 54173f4b5..10af9499c 100755 --- a/host_cleanup.sh +++ b/host_cleanup.sh @@ -1,12 +1,13 @@ #!/usr/bin/env bash -set -x + +set -eux # shellcheck disable=SC1091 -source lib/logging.sh +. lib/logging.sh # shellcheck disable=SC1091 -source lib/common.sh +. lib/common.sh # shellcheck disable=SC1091 -source lib/network.sh +. lib/network.sh # Kill and remove the running ironic containers remove_ironic_containers @@ -18,7 +19,7 @@ sudo "${CONTAINER_RUNTIME}" rm -f fake-ipa 2>/dev/null if [[ "${CONTAINER_RUNTIME}" = "podman" ]]; then for pod in ironic-pod infra-pod; do if sudo "${CONTAINER_RUNTIME}" pod exists "${pod}"; then - sudo "${CONTAINER_RUNTIME}" pod rm "${pod}" -f + sudo "${CONTAINER_RUNTIME}" pod rm "${pod}" -f fi done fi @@ -57,7 +58,7 @@ ANSIBLE_FORCE_COLOR=true "${ANSIBLE}-playbook" \ # There was a bug in this file, it may need to be recreated. if [[ "${OS}" = "centos" ]] || [[ "${OS}" = "rhel" ]]; then sudo rm -rf /etc/NetworkManager/conf.d/dnsmasq.conf - if [[ "${MANAGE_PRO_BRIDGE}" == "y" ]]; then + if [[ "${MANAGE_PRO_BRIDGE}" = "y" ]]; then sudo nmcli con delete ironic-peer sudo nmcli con delete "${BARE_METAL_PROVISIONER_INTERFACE}" sudo nmcli con delete provisioning diff --git a/lib/common.sh b/lib/common.sh index 0eeaa20b3..1650e30a2 100644 --- a/lib/common.sh +++ b/lib/common.sh @@ -1,7 +1,6 @@ -#!/bin/bash - -[[ ! "${PATH}" =~ .*(:|^)(/usr/local/go/bin)(:|$).* ]] && export PATH="$PATH:/usr/local/go/bin" +#!/usr/bin/env bash +SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )/.." && pwd)" USER="$(whoami)" export USER @@ -13,11 +12,12 @@ if [[ "$USER" != "root" ]]; then fi fi +if [[ ! "${PATH}" =~ .*(:|^)(/usr/local/go/bin)(:|$).* ]]; then + export PATH="${PATH}:/usr/local/go/bin" +fi eval "$(go env)" export GOPATH="${GOPATH:-/home/$(whoami)/go}" -SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" - # Get variables from the config file if [[ -z "${CONFIG:-}" ]]; then # See if there's a config_$USER.sh in the SCRIPTDIR @@ -94,30 +94,6 @@ export SSH_PUB_KEY_CONTENT FILESYSTEM="${FILESYSTEM:=/}" -# Reusable repository cloning function -clone_repo() { - local REPO_URL="$1" - local REPO_BRANCH="$2" - local REPO_PATH="$3" - local REPO_COMMIT="${4:-HEAD}" - - if [[ -d "${REPO_PATH}" ]] && [[ "${FORCE_REPO_UPDATE}" = "true" ]]; then - rm -rf "${REPO_PATH}" - fi - if [[ ! -d "${REPO_PATH}" ]]; then - pushd "${M3PATH}" || exit - if [[ "${REPO_COMMIT}" = "HEAD" ]]; then - git clone --depth 1 --branch "${REPO_BRANCH}" "${REPO_URL}" \ - "${REPO_PATH}" - else - git clone --branch "${REPO_BRANCH}" "${REPO_URL}" "${REPO_PATH}" - pushd "${REPO_PATH}" || exit - git checkout "${REPO_COMMIT}" - popd || exit - fi - popd || exit - fi -} # Configure common environment variables CAPM3_VERSION_LIST="v1beta1" @@ -400,8 +376,6 @@ fi SKIP_RETRIES="${SKIP_RETRIES:-false}" TEST_TIME_INTERVAL="${TEST_TIME_INTERVAL:-10}" TEST_MAX_TIME="${TEST_MAX_TIME:-240}" -FAILS=0 -RESULT_STR="" BMO_ROLLOUT_WAIT="${BMO_ROLLOUT_WAIT:-5}" IRONIC_ROLLOUT_WAIT="${IRONIC_ROLLOUT_WAIT:-10}" @@ -487,125 +461,6 @@ list_nodes() { | sed 's/"//g' } -# -# Iterate a command until it runs successfully or exceeds the maximum retries -# -# Inputs: -# - the command to run -# -iterate(){ - local RUNS=0 - local COMMAND="$*" - local TMP_RET TMP_RET_CODE - TMP_RET="$(${COMMAND})" - TMP_RET_CODE="$?" - - until [[ "${TMP_RET_CODE}" = 0 ]] || [[ "${SKIP_RETRIES}" = true ]] - do - if [[ "${RUNS}" = "0" ]]; then - echo " - Waiting for task completion (up to" \ - "$((TEST_TIME_INTERVAL*TEST_MAX_TIME)) seconds)" \ - " - Command: '${COMMAND}'" - fi - RUNS="$((RUNS+1))" - if [[ "${RUNS}" = "${TEST_MAX_TIME}" ]]; then - break - fi - sleep "${TEST_TIME_INTERVAL}" - # shellcheck disable=SC2068 - TMP_RET="$(${COMMAND})" - TMP_RET_CODE="$?" - done - FAILS="$((FAILS+TMP_RET_CODE))" - echo "${TMP_RET}" - return "${TMP_RET_CODE}" -} - -# -# Retry a command until it runs successfully or exceeds the maximum retries -# -# Inputs: -# - the command to run -# -retry() -{ - local retries=10 - local i - for i in $(seq 1 "${retries}"); do - if "${@}"; then - return 0 - fi - echo "Retrying... ${i}/${retries}" - sleep 5 - done - return 1 -} - - -# -# Check the return code -# -# Inputs: -# - return code to check -# - message to print -# -process_status(){ - if [[ "${1}" = 0 ]]; then - echo "OK - ${RESULT_STR}" - return 0 - else - echo "FAIL - ${RESULT_STR}" - FAILS="$((FAILS+1))" - return 1 - fi -} - -# -# Compare if the two inputs are the same and log -# -# Inputs: -# - first input to compare -# - second input to compare -# -equals(){ - [[ "${1}" = "${2}" ]]; RET_CODE="$?" - if ! process_status "${RET_CODE}" ; then - echo " expected ${2}, got ${1}" - fi - return ${RET_CODE} -} - -# -# Compare the substring to the string and log -# -# Inputs: -# - Substring to look for -# - String to look for the substring in -# -is_in(){ - [[ "${2}" =~ .*(${1}).* ]]; RET_CODE="$?" - if ! process_status "${RET_CODE}" ; then - echo " expected ${1} to be in ${2}" - fi - return ${RET_CODE} -} - - -# -# Check if the two inputs differ and log -# -# Inputs: -# - first input to compare -# - second input to compare -# -differs(){ - [[ "${1}" != "${2}" ]]; RET_CODE="$?" - if ! process_status "${RET_CODE}" ; then - echo " expected to be different from ${2}, got ${1}" - fi - return ${RET_CODE} -} - # # Kill and remove the infra containers # diff --git a/lib/utils.sh b/lib/utils.sh new file mode 100644 index 000000000..50a3222dc --- /dev/null +++ b/lib/utils.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# common util functions, separated from common.sh + +# Reusable repository cloning function +clone_repo() +{ + local repo_url="$1" + local repo_branch="$2" + local repo_path="$3" + local repo_commit="${4:-HEAD}" + + if [[ -d "${repo_path}" ]] && [[ "${FORCE_REPO_UPDATE}" = "true" ]]; then + rm -rf "${repo_path}" + fi + + if [[ ! -d "${repo_path}" ]]; then + pushd "${M3PATH}" || exit + if [[ "${repo_commit}" = "HEAD" ]]; then + git clone --depth 1 --branch "${repo_branch}" "${repo_url}" \ + "${repo_path}" + else + git clone --branch "${repo_branch}" "${repo_url}" "${repo_path}" + pushd "${repo_path}" || exit + git checkout "${repo_commit}" + popd || exit + fi + popd || exit + fi +} + +# +# Iterate a command until it runs successfully or exceeds the maximum retries +# +# Inputs: +# - the command to run +# +iterate() +{ + local runs=0 + local command="$*" + + until "$@" || [[ "${SKIP_RETRIES}" = true ]]; do + if [[ "${runs}" = "0" ]]; then + echo " - Waiting for task completion (up to" \ + "$((TEST_TIME_INTERVAL*TEST_MAX_TIME)) seconds)" \ + " - Command: '${command}'" + fi + runs="$((runs + 1))" + if [[ "${runs}" -ge "${TEST_MAX_TIME}" ]]; then + return 1 + fi + sleep "${TEST_TIME_INTERVAL}" + done + + return $? +} + + +# +# Retry a command until it runs successfully or exceeds the maximum retries +# +# Inputs: +# - the command to run +# +retry() +{ + local retries=10 + local i + for i in $(seq 1 "${retries}"); do + if "${@}"; then + return 0 + fi + echo "Retrying... ${i}/${retries}" + sleep 5 + done + return 1 +} + + +# +# Check the return code +# +# Inputs: +# - return code to check +# - message to print +# +process_status() +{ + local retcode="$1" + local message="${2:-}" + + if [[ "${retcode}" -eq 0 ]]; then + if [[ -n "${message}" ]]; then + echo "OK - ${message}" + fi + return 0 + fi + + if [[ -n "${message}" ]]; then + echo "FAIL - ${message}" + else + echo -n "FAIL - " + fi + + FAILS=$((FAILS + 1)) + return 1 +} + +# +# Compare if the two inputs are the same and log +# +# Inputs: +# - first input to compare +# - second input to compare +# +equals() +{ + local retval=0 + [[ "${1}" = "${2}" ]] || retval=1 + if ! process_status "${retval}"; then + echo " expected ${2}, got ${1}" + fi +} + +# +# Compare the substring to the string and log +# +# Inputs: +# - Substring to look for +# - String to look for the substring in +# +is_in() +{ + local retval=0 + [[ "${2}" =~ .*(${1}).* ]] || retval=1 + if ! process_status "${retval}"; then + echo " expected ${1} to be in ${2}" + fi +} diff --git a/openstackclient.sh b/openstackclient.sh index 658a2a2ea..d85b02f8d 100755 --- a/openstackclient.sh +++ b/openstackclient.sh @@ -4,7 +4,7 @@ set -eu # shellcheck disable=SC2312 DIR="$(dirname "$(readlink -f "$0")")" -# shellcheck source=lib/common.sh +# shellcheck disable=SC1091 source "${DIR}/lib/common.sh" if [[ -d "${PWD}/_clouds_yaml" ]]; then diff --git a/tests/test.sh b/tests/test.sh index 6248f4928..f4af55433 100755 --- a/tests/test.sh +++ b/tests/test.sh @@ -1,33 +1,30 @@ -#!/bin/bash -set -xe +#!/usr/bin/env bash + +set -eux METAL3_DIR="$(dirname "$(readlink -f "${0}")")/.." -# shellcheck disable=SC1090,SC1091 -source "${METAL3_DIR}/lib/common.sh" +# shellcheck disable=SC1091 +. "${METAL3_DIR}/lib/common.sh" +# shellcheck disable=SC1091 +. "${METAL3_DIR}/lib/utils.sh" export ACTION="ci_test_provision" - "${METAL3_DIR}"/tests/run.sh # Manifest collection before pivot "${METAL3_DIR}"/tests/scripts/fetch_manifests.sh - export ACTION="pivoting" - "${METAL3_DIR}"/tests/run.sh - "${METAL3_DIR}"/tests/scripts/fetch_target_logs.sh + # Manifest collection after pivot "${METAL3_DIR}"/tests/scripts/fetch_manifests.sh - export ACTION="repivoting" - "${METAL3_DIR}"/tests/run.sh # wait until status of Metal3Machine is rebuilt -while [[ -z "${status:-}" ]] -do +while [[ -z "${status:-}" ]]; do status=$(kubectl get m3m -n "${NAMESPACE}" -o=jsonpath="{.items[*]['status.ready']}") sleep 1s done @@ -35,15 +32,15 @@ done # Manifest collection after re-pivot "${METAL3_DIR}"/tests/scripts/fetch_manifests.sh -kubectl get secrets "${CLUSTER_NAME}-kubeconfig" -n "${NAMESPACE}" -o json | jq -r '.data.value'| base64 -d > "/tmp/kubeconfig-${CLUSTER_NAME}.yaml" +kubectl get secrets "${CLUSTER_NAME}-kubeconfig" -n "${NAMESPACE}" -o json \ + | jq -r '.data.value'| base64 -d > "/tmp/kubeconfig-${CLUSTER_NAME}.yaml" NUM_DEPLOYED_NODES="$(kubectl get nodes --kubeconfig "/tmp/kubeconfig-${CLUSTER_NAME}.yaml" | grep -c -w Ready)" process_status $? "Fetch number of deployed nodes" -if [ "${NUM_DEPLOYED_NODES}" -ne "$((CONTROL_PLANE_MACHINE_COUNT + WORKER_MACHINE_COUNT))" ]; then +if [[ "${NUM_DEPLOYED_NODES}" -ne "$((CONTROL_PLANE_MACHINE_COUNT + WORKER_MACHINE_COUNT))" ]]; then echo "Failed with incorrect number of nodes deployed" exit 1 fi export ACTION="ci_test_deprovision" - "${METAL3_DIR}"/tests/run.sh diff --git a/ubuntu_bridge_network_configuration.sh b/ubuntu_bridge_network_configuration.sh index 307299a49..9c026e482 100755 --- a/ubuntu_bridge_network_configuration.sh +++ b/ubuntu_bridge_network_configuration.sh @@ -3,11 +3,11 @@ set -eux # shellcheck disable=SC1091 -source lib/logging.sh +. lib/logging.sh # shellcheck disable=SC1091 -source lib/common.sh +. lib/common.sh # shellcheck disable=SC1091 -source lib/network.sh +. lib/network.sh if [[ "${MANAGE_PRO_BRIDGE}" = "y" ]]; then # Adding an IP address in the libvirt definition for this network results in @@ -24,7 +24,7 @@ if [[ "${MANAGE_PRO_BRIDGE}" = "y" ]]; then sudo ip link set provisioning up if [[ "${BARE_METAL_PROVISIONER_SUBNET_IPV6_ONLY}" = "true" ]]; then sudo ip -6 addr add "${BARE_METAL_PROVISIONER_IP}"/"${BARE_METAL_PROVISIONER_CIDR}" dev ironicendpoint - else + else sudo ip addr add dev ironicendpoint "${BARE_METAL_PROVISIONER_IP}"/"${BARE_METAL_PROVISIONER_CIDR}" fi sudo brctl addif provisioning ironic-peer diff --git a/vbmc.sh b/vbmc.sh index 1e4cbbfb5..1a6f62255 100755 --- a/vbmc.sh +++ b/vbmc.sh @@ -2,7 +2,7 @@ # shellcheck disable=SC2312 DIR="$(dirname "$(readlink -f "$0")")" -# shellcheck source=lib/common.sh -source "${DIR}/lib/common.sh" +# shellcheck disable=SC1091 +. "${DIR}/lib/common.sh" sudo "${CONTAINER_RUNTIME}" exec -ti vbmc vbmc "$@"