diff --git a/roles/validations/defaults/main.yml b/roles/validations/defaults/main.yml index 4588d241bb..9657c2d352 100644 --- a/roles/validations/defaults/main.yml +++ b/roles/validations/defaults/main.yml @@ -45,6 +45,8 @@ cifmw_validations_namespace: "openstack" cifmw_validations_hotfixed_edpm_nova_compute_image: quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified cifmw_validations_custom_nova_service: "nova-custom-ceph" +cifmw_validations_xml_status_file_dir: "{{ cifmw_validations_basedir }}/tests/validations" + # variables needed for scaledown cifmw_validations_edpm_scale_down_hostname: compute-2.ctlplane.example.com cifmw_validations_edpm_scale_down_nodename: edpm-compute-2 diff --git a/roles/validations/tasks/edpm/hotfix.yml b/roles/validations/tasks/edpm/hotfix.yml index 750827c68e..35f3a2847d 100644 --- a/roles/validations/tasks/edpm/hotfix.yml +++ b/roles/validations/tasks/edpm/hotfix.yml @@ -6,86 +6,96 @@ # Since jobs use different names for their OpenStackDataPlaneNodeSet's, we need to dynamically # determine the name of the currently deployed one -- name: Determine name of deployed NodeSet - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc get osdpns -n {{ cifmw_validations_namespace }} --no-headers -o custom-columns=":metadata.name" - register: deployed_nodeset_name +- name: Set validator status to passed + ansible.builtin.set_fact: + validator_status: "passed" + +- name: Validation block + block: + - name: Determine name of deployed NodeSet + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc get osdpns -n {{ cifmw_validations_namespace }} --no-headers -o custom-columns=":metadata.name" + register: deployed_nodeset_name # Simulate hotfixed image by update edpm_nova_compute_image to a different, non-default value -- name: Update edpm_nova_compute_image value - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim }}" --type=merge -p '{"spec": {"nodeTemplate": {"ansible": {"ansibleVars": {"edpm_nova_compute_image": "{{ cifmw_validations_hotfixed_edpm_nova_compute_image }}"}}}}}' + - name: Update edpm_nova_compute_image value + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim }}" --type=merge -p '{"spec": {"nodeTemplate": {"ansible": {"ansibleVars": {"edpm_nova_compute_image": "{{ cifmw_validations_hotfixed_edpm_nova_compute_image }}"}}}}}' # loop check the status of the openstackdataplanenodeset until it is either SetupReady, # or reaches a defined timeout value. -- name: Wait for nodeset to be SetupReady again - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc wait osdpns "{{ deployed_nodeset_name.stdout | trim }}" - --namespace={{ cifmw_validations_namespace }} - --for=condition=SetupReady - --timeout={{ cifmw_validations_timeout }}m + - name: Wait for nodeset to be SetupReady again + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait osdpns "{{ deployed_nodeset_name.stdout | trim }}" + --namespace={{ cifmw_validations_namespace }} + --for=condition=SetupReady + --timeout={{ cifmw_validations_timeout }}m # Create a new OpenStackDataPlaneDeployment to apply the hotfixed image -- name: Create openstackdataplanedeployment to rollout changes - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: | - oc apply -f - <- - oc wait openstackdataplanedeployment edpm-hotfix - --namespace={{ cifmw_validations_namespace }} - --for=condition=ready - --timeout={{ cifmw_validations_timeout }}m + - name: Wait for deployment to be complete + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait openstackdataplanedeployment edpm-hotfix + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m # Collect running image to assert the hotfix was applied -- name: Collect the image currently used by nova_compute on the edpm node - ansible.builtin.shell: - cmd: >- - set -o pipefail && sudo podman inspect nova_compute | jq '.[].ImageName' | tr -d '"' - delegate_to: "{{ cifmw_validations_edpm_check_node }}" - register: post_change_nova_compute_image + - name: Collect the image currently used by nova_compute on the edpm node + ansible.builtin.shell: + cmd: >- + set -o pipefail && sudo podman inspect nova_compute | jq '.[].ImageName' | tr -d '"' + delegate_to: "{{ cifmw_validations_edpm_check_node }}" + register: post_change_nova_compute_image # these assertions will determine whether this job has ultimately been successful or not. # here, we should list all of the assertions required to satisfy our feature requirements. -- name: Assert state change reflects our requirements - ansible.builtin.assert: - that: - - cifmw_validations_hotfixed_edpm_nova_compute_image == post_change_nova_compute_image.stdout + - name: Assert state change reflects our requirements + ansible.builtin.assert: + that: + - cifmw_validations_hotfixed_edpm_nova_compute_image == post_change_nova_compute_image.stdout + rescue: + - name: Set validator status to failed + ansible.builtin.set_fact: + validator_status: "Validator failed task: {{ ansible_failed_task.name }}, Validator failed reason: {{ ansible_failed_result.msg}}" diff --git a/roles/validations/tasks/edpm/hugepages_and_reboot.yml b/roles/validations/tasks/edpm/hugepages_and_reboot.yml index 9a2de54343..46ebe40cb8 100644 --- a/roles/validations/tasks/edpm/hugepages_and_reboot.yml +++ b/roles/validations/tasks/edpm/hugepages_and_reboot.yml @@ -8,139 +8,149 @@ # Collect facts from the node in question. this will give us an initial state prior to our changes # that can be compared with the end state. -- name: Collect initial state of edpm node - ansible.builtin.gather_facts: - delegate_to: "{{ cifmw_validations_edpm_check_node }}" - register: initial_node_state +- name: Set validator status to passed + ansible.builtin.set_fact: + validator_status: "passed" + +- name: Validation block + block: + - name: Collect initial state of edpm node + ansible.builtin.gather_facts: + delegate_to: "{{ cifmw_validations_edpm_check_node }}" + register: initial_node_state # Since jobs use different names for their OpenStackDataPlaneNodeSet's, we need to dynamically # determine the name of the currently deployed one -- name: Determine name of deployed NodeSet - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc get -n {{ cifmw_validations_namespace }} osdpns --no-headers -o custom-columns=":metadata.name" - register: deployed_nodeset_name + - name: Determine name of deployed NodeSet + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc get -n {{ cifmw_validations_namespace }} osdpns --no-headers -o custom-columns=":metadata.name" + register: deployed_nodeset_name # collect initial confighash from the nodeset -- name: Collecting initial confighash from the nodeset - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - set -o pipefail && oc get -n {{ cifmw_validations_namespace }} osdpns "{{ deployed_nodeset_name.stdout | trim }}" -o jsonpath='{.status.configHash}' - register: initial_config_hash + - name: Collecting initial confighash from the nodeset + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + set -o pipefail && oc get -n {{ cifmw_validations_namespace }} osdpns "{{ deployed_nodeset_name.stdout | trim }}" -o jsonpath='{.status.configHash}' + register: initial_config_hash -- name: Update hugepages value - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim }}" --type=merge -p '{"spec": {"nodeTemplate": {"ansible": {"ansibleVars": {"edpm_kernel_hugepages": {"2048": {"count": 10, "default": true}, "4048": {"count": 10}}}}}}}' + - name: Update hugepages value + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim }}" --type=merge -p '{"spec": {"nodeTemplate": {"ansible": {"ansibleVars": {"edpm_kernel_hugepages": {"2048": {"count": 10, "default": true}, "4048": {"count": 10}}}}}}}' # loop check the status of the openstackdataplanenodeset until it is either SetupReady, # or reaches a defined timeout value. -- name: Wait for nodeset to be SetupReady again - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc wait osdpns "{{ deployed_nodeset_name.stdout | trim }}" - --namespace={{ cifmw_validations_namespace }} - --for=condition=SetupReady - --timeout={{ cifmw_validations_timeout }}m + - name: Wait for nodeset to be SetupReady again + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait osdpns "{{ deployed_nodeset_name.stdout | trim }}" + --namespace={{ cifmw_validations_namespace }} + --for=condition=SetupReady + --timeout={{ cifmw_validations_timeout }}m # Define an ad-hoc reboot service for our node reboot -- name: Create ad-hoc reboot service - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: | - oc apply -f - <- - oc wait openstackdataplanedeployment edpm-hugepages-update - --namespace={{ cifmw_validations_namespace }} - --for=condition=ready - --timeout={{ cifmw_validations_timeout }}m + - name: Wait for deployment to be complete + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait openstackdataplanedeployment edpm-hugepages-update + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m # Collect final confighash from the nodeset -- name: Collecting final confighash from the nodeset - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - set -o pipefail && oc get -n {{ cifmw_validations_namespace }} osdpns "{{ deployed_nodeset_name.stdout | trim }}" -o jsonpath='{.status.configHash}' - register: post_change_config_hash + - name: Collecting final confighash from the nodeset + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + set -o pipefail && oc get -n {{ cifmw_validations_namespace }} osdpns "{{ deployed_nodeset_name.stdout | trim }}" -o jsonpath='{.status.configHash}' + register: post_change_config_hash # gather facts again to compare against the initial state. -- name: Collect post change state of edpm node - ansible.builtin.gather_facts: - delegate_to: "{{ cifmw_validations_edpm_check_node }}" - register: post_change_node_state + - name: Collect post change state of edpm node + ansible.builtin.gather_facts: + delegate_to: "{{ cifmw_validations_edpm_check_node }}" + register: post_change_node_state # these assertions will determine whether this job has ultimately been successful or not. # here, we should list all of the assertions required to satisfy our feature requirements. -- name: Assert state change reflects our requirements - ansible.builtin.assert: - that: - - initial_config_hash.stdout != post_change_config_hash.stdout - - initial_node_state.ansible_facts.ansible_cmdline != post_change_node_state.ansible_facts.ansible_cmdline + - name: Assert state change reflects our requirements + ansible.builtin.assert: + that: + - initial_config_hash.stdout != post_change_config_hash.stdout + - initial_node_state.ansible_facts.ansible_cmdline != post_change_node_state.ansible_facts.ansible_cmdline + rescue: + - name: Set validator status to failed + ansible.builtin.set_fact: + validator_status: "Validator failed task: {{ ansible_failed_task.name }}, Validator failed reason: {{ ansible_failed_result.msg}}" diff --git a/roles/validations/tasks/edpm/scaledown.yml b/roles/validations/tasks/edpm/scaledown.yml index 35b08a448a..60ace9504d 100644 --- a/roles/validations/tasks/edpm/scaledown.yml +++ b/roles/validations/tasks/edpm/scaledown.yml @@ -6,76 +6,87 @@ # Since jobs use different names for their OpenStackDataPlaneNodeSet's, we need to dynamically # determine the name of the currently deployed one -- name: Determine name of deployed NodeSet - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc get osdpns -n {{ cifmw_validations_namespace }} --no-headers -o custom-columns=":metadata.name" - register: deployed_nodeset_name +- name: Set validator status to passed + ansible.builtin.set_fact: + validator_status: "passed" -- name: Verify compute being removed is present before scale down - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack hypervisor list - register: hypervisors_before_scale_down - failed_when: '"{{ cifmw_validations_edpm_scale_down_hostname }}" not in hypervisors_before_scale_down.stdout' +- name: Validation block + block: -- name: Disable nova-compute for node being removed - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service set {{ cifmw_validations_edpm_scale_down_hostname }} nova-compute --disable + - name: Determine name of deployed NodeSet + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc get osdpns -n {{ cifmw_validations_namespace }} --no-headers -o custom-columns=":metadata.name" + register: deployed_nodeset_name -- name: Disable ovn and nova-compute containers on node being removed - ansible.builtin.shell: - cmd: >- - set -o pipefail && systemctl stop edpm_ovn_controller && systemctl stop edpm_ovn_metadata_agent && systemctl stop edpm_nova_compute - become: true - delegate_to: "{{ cifmw_validations_edpm_scale_down_hostname.split('.')[0] }}" + - name: Verify compute being removed is present before scale down + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack hypervisor list + register: hypervisors_before_scale_down + failed_when: '"{{ cifmw_validations_edpm_scale_down_hostname }}" not in hypervisors_before_scale_down.stdout' -- name: Get ovn controller id of host to be removed - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent list --host {{ cifmw_validations_edpm_scale_down_hostname }} | grep "OVN Controller agent" | awk '{print $2}' - register: remove_ovn_id + - name: Disable nova-compute for node being removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service set {{ cifmw_validations_edpm_scale_down_hostname }} nova-compute --disable -- name: Delete network agent for compute being removed - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent delete {{ remove_ovn_id.stdout }} + - name: Disable ovn and nova-compute containers on node being removed + ansible.builtin.shell: + cmd: >- + set -o pipefail && systemctl stop edpm_ovn_controller && systemctl stop edpm_ovn_metadata_agent && systemctl stop edpm_nova_compute + become: true + delegate_to: "{{ cifmw_validations_edpm_scale_down_hostname.split('.')[0] }}" -- name: Get compute service id of host to be removed - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service list --host {{ cifmw_validations_edpm_scale_down_hostname }} | grep "nova-compute" | awk '{print $2}' - register: remove_compute_service_id + - name: Get ovn controller id of host to be removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent list --host {{ cifmw_validations_edpm_scale_down_hostname }} | grep "OVN Controller agent" | awk '{print $2}' + register: remove_ovn_id -- name: Delete compute service - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service delete {{ remove_compute_service_id.stdout }} + - name: Delete network agent for compute being removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent delete {{ remove_ovn_id.stdout }} -- name: Verify compute was removed - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack hypervisor list - register: hypervisors_after_scale_down - failed_when: '"{{ cifmw_validations_edpm_scale_down_hostname }}" in hypervisors_after_scale_down.stdout' + - name: Get compute service id of host to be removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service list --host {{ cifmw_validations_edpm_scale_down_hostname }} | grep "nova-compute" | awk '{print $2}' + register: remove_compute_service_id -- name: Patch nodeset to remove node - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_validations_basedir }}/artifacts" - script: >- - oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim}}" --type=json --patch '[{ "op": "remove", "path": "/spec/nodes/{{ cifmw_validations_edpm_scale_down_nodename }}" }]' + - name: Delete compute service + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service delete {{ remove_compute_service_id.stdout }} + + - name: Verify compute was removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack hypervisor list + register: hypervisors_after_scale_down + failed_when: '"{{ cifmw_validations_edpm_scale_down_hostname }}" in hypervisors_after_scale_down.stdout' + + - name: Patch nodeset to remove node + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ deployed_nodeset_name.stdout | trim}}" --type=json --patch '[{ "op": "remove", "path": "/spec/nodes/{{ cifmw_validations_edpm_scale_down_nodename }}" }]' + rescue: + - name: Set validator status to failed + ansible.builtin.set_fact: + validator_status: "Validator failed task: {{ ansible_failed_task.name }}, Validator failed reason: {{ ansible_failed_result.msg}}" diff --git a/roles/validations/tasks/main.yml b/roles/validations/tasks/main.yml index ea82763b8b..5f297661b2 100644 --- a/roles/validations/tasks/main.yml +++ b/roles/validations/tasks/main.yml @@ -25,6 +25,18 @@ - artifacts - logs +- name: Initialize variables needed for generating polarion xml file + ansible.builtin.set_fact: + validations_executed: 0 + validations_failed: 0 + validations_errors: 0 + xml_line_list: [] + +- name: Get validations start time from system + ansible.builtin.command: + cmd: date +%Y-%m-%d-%H:%M:%S.%3N + register: validations_start_time + # We can execute all defined validations when cifmw_validation_run_all is defined. # Else, we will skip this and run only the explicitly defined validations from # cifmw_validations_list @@ -42,7 +54,7 @@ - name: Run all found validations ansible.builtin.include_tasks: - file: "{{ item.path }}" + file: xmlcreate.yml loop: "{{ found_validations.files }}" - name: Run selected validations @@ -56,5 +68,34 @@ failed_when: not validation_exists.stat.exists - name: Run validations - ansible.builtin.include_tasks: "{{ item }}" + ansible.builtin.include_tasks: + file: xmlcreate.yml loop: "{{ cifmw_validations_list }}" + +- name: Get validations end time from system + ansible.builtin.command: + cmd: date +%Y-%m-%d-%H:%M:%S.%3N + register: validations_end_time + +- name: Calculate total validations run time + ansible.builtin.set_fact: + validations_run_time: "{{((validations_end_time.stdout | to_datetime('%Y-%m-%d-%H:%M:%S.%f')) - (validations_start_time.stdout | to_datetime('%Y-%m-%d-%H:%M:%S.%f'))).total_seconds()}}" + +- name: Write first line of xml results file + ansible.builtin.shell: | + mkdir -p "{{ cifmw_validations_xml_status_file_dir }}" + echo "" >> {{ cifmw_validations_xml_status_file_dir }}/validations_results.xml + +- name: Write individual results to xml file + ansible.builtin.shell: | + echo '{{ item }}' >> {{ cifmw_validations_xml_status_file_dir }}/validations_results.xml + loop: '{{ xml_line_list }}' + +- name: Write last line to xml results file + ansible.builtin.shell: | + echo "" >> {{ cifmw_validations_xml_status_file_dir }}/validations_results.xml + +- name: Fail job when validations fail + ansible.builtin.fail: + msg: "One or more validations failed" + when: validations_failed | int > 0 diff --git a/roles/validations/tasks/xmlcreate.yml b/roles/validations/tasks/xmlcreate.yml new file mode 100644 index 0000000000..27a7548491 --- /dev/null +++ b/roles/validations/tasks/xmlcreate.yml @@ -0,0 +1,58 @@ +--- +- name: Get validation start time from system + ansible.builtin.command: + cmd: date +%Y-%m-%d-%H:%M:%S.%3N + register: individual_validation_start_time + +- name: Run a validation + ansible.builtin.include_tasks: "{{ item }}" + +- name: Get validation end time from system + ansible.builtin.command: + cmd: date +%Y-%m-%d-%H:%M:%S.%3N + register: individual_validation_end_time + +- name: Calculate validation total run time + ansible.builtin.set_fact: + individual_validator_run_time: "{{((individual_validation_end_time.stdout | to_datetime('%Y-%m-%d-%H:%M:%S.%f')) - (individual_validation_start_time.stdout | to_datetime('%Y-%m-%d-%H:%M:%S.%f'))).total_seconds()}}" + +- name: Increment test count after validation execution + ansible.builtin.set_fact: + validations_executed: "{{ validations_executed | int + 1 }}" + +- name: Make new xml line list entry + ansible.builtin.set_fact: + new_xml_line_list_entry: [' '] + when: '"passed" in validator_status' + +- name: Add new line to xml line list + ansible.builtin.set_fact: + xml_line_list: "{{ xml_line_list + new_xml_line_list_entry }}" + when: '"passed" in validator_status' + +- name: Add testcase and failure reason to xml line list when a validation fails + when: '"failed" in validator_status' + block: + - name: Make new xml line list entry + ansible.builtin.set_fact: + new_xml_line_list_entry: [' '] + + - name: Add new line to xml line list + ansible.builtin.set_fact: + xml_line_list: "{{ xml_line_list + new_xml_line_list_entry }}" + + - name: Make a new failure reason xml line list entry + ansible.builtin.set_fact: + new_xml_line_list_entry: [' '] + + - name: Add new failure reason line to xml line list + ansible.builtin.set_fact: + xml_line_list: "{{ xml_line_list + new_xml_line_list_entry }}" + + - name: Add testcase end to xml line list + ansible.builtin.set_fact: + xml_line_list: "{{ xml_line_list + [' '] }}" + + - name: Increment failure count + ansible.builtin.set_fact: + validations_failed: "{{ validations_failed | int + 1 }}"