diff --git a/.github/path-filters.yml b/.github/path-filters.yml index b9815d9d6..02d0ab609 100644 --- a/.github/path-filters.yml +++ b/.github/path-filters.yml @@ -1,5 +1,5 @@ # This file is a list of path filters for the PR workflow in .github/workflows/stackhpc-pull-request.yml. -aio: +aio: &aio - '.automation' - '.automation.conf/config.sh' - '.automation.conf/tempest/load-lists/default' @@ -20,6 +20,11 @@ aio: - 'kayobe-env' - 'requirements.txt' - 'terraform/aio/**' -check-tags: +check-tags: &check-tags - '.github/workflows/stackhpc-check-tags.yml' - 'etc/kayobe/kolla-image-tags.yml' + - 'etc/kayobe/pulp.yml' + - 'tools/kolla-images.py' +build-kayobe-image: + - *aio + - *check-tags diff --git a/.github/workflows/multinode-inputs.py b/.github/workflows/multinode-inputs.py index b862cbf91..c64b73544 100644 --- a/.github/workflows/multinode-inputs.py +++ b/.github/workflows/multinode-inputs.py @@ -33,7 +33,8 @@ class Scenario: UBUNTU_JAMMY = OSRelease("ubuntu", "jammy", "ubuntu") # NOTE(upgrade): Add supported releases here. OPENSTACK_RELEASES = [ - OpenStackRelease("2023.1", "zed", [ROCKY_9, UBUNTU_JAMMY]) + OpenStackRelease("2024.1", "2023.1", [ROCKY_9, UBUNTU_JAMMY]), + OpenStackRelease("2023.1", "zed", [ROCKY_9, UBUNTU_JAMMY]), ] NEUTRON_PLUGINS = ["ovs", "ovn"] diff --git a/.github/workflows/stackhpc-check-tags.yml b/.github/workflows/stackhpc-check-tags.yml index 4016c00e9..db2383e21 100644 --- a/.github/workflows/stackhpc-check-tags.yml +++ b/.github/workflows/stackhpc-check-tags.yml @@ -46,6 +46,15 @@ jobs: run: | docker image pull $KAYOBE_IMAGE + - name: Check kolla-images.py image map and tag hierarchy + run: | + docker run -t --rm \ + -v $(pwd):/stack/kayobe-automation-env/src/kayobe-config \ + -e KAYOBE_ENVIRONMENT -e KAYOBE_VAULT_PASSWORD -e KAYOBE_AUTOMATION_SSH_PRIVATE_KEY \ + $KAYOBE_IMAGE \ + /stack/kayobe-automation-env/src/kayobe-config/.automation/pipeline/playbook-run.sh \ + '$KAYOBE_CONFIG_PATH/ansible/check-kolla-images-py.yml' + - name: Check container image tags run: | docker run -t --rm \ diff --git a/.github/workflows/stackhpc-multinode-periodic.yml b/.github/workflows/stackhpc-multinode-periodic.yml index 341fbe55b..cb94705bc 100644 --- a/.github/workflows/stackhpc-multinode-periodic.yml +++ b/.github/workflows/stackhpc-multinode-periodic.yml @@ -35,7 +35,7 @@ jobs: name: Multinode periodic needs: - generate-inputs - uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.0.1 + uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.1.0 with: multinode_name: mn-prdc-${{ github.run_id }} os_distribution: ${{ needs.generate-inputs.outputs.os_distribution }} diff --git a/.github/workflows/stackhpc-multinode.yml b/.github/workflows/stackhpc-multinode.yml index 2ca96a50c..5c8b67d1e 100644 --- a/.github/workflows/stackhpc-multinode.yml +++ b/.github/workflows/stackhpc-multinode.yml @@ -52,7 +52,7 @@ name: Multinode jobs: multinode: name: Multinode - uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.0.1 + uses: stackhpc/stackhpc-openstack-gh-workflows/.github/workflows/multinode.yml@1.1.0 with: multinode_name: ${{ inputs.multinode_name }} os_distribution: ${{ inputs.os_distribution }} diff --git a/.github/workflows/stackhpc-pull-request.yml b/.github/workflows/stackhpc-pull-request.yml index 196ecc3a3..aba6bacb7 100644 --- a/.github/workflows/stackhpc-pull-request.yml +++ b/.github/workflows/stackhpc-pull-request.yml @@ -20,6 +20,7 @@ jobs: if: github.repository == 'stackhpc/stackhpc-kayobe-config' outputs: aio: ${{ steps.changes.outputs.aio }} + build-kayobe-image: ${{ steps.changes.outputs.build-kayobe-image }} check-tags: ${{ steps.changes.outputs.check-tags }} steps: - name: GitHub Checkout @@ -74,7 +75,7 @@ jobs: - check-changes uses: ./.github/workflows/stackhpc-build-kayobe-image.yml with: - if: ${{ needs.check-changes.outputs.aio == 'true' }} + if: ${{ needs.check-changes.outputs.build-kayobe-image == 'true' }} if: github.repository == 'stackhpc/stackhpc-kayobe-config' check-tags: diff --git a/etc/kayobe/ansible/check-kolla-images-py.yml b/etc/kayobe/ansible/check-kolla-images-py.yml new file mode 100644 index 000000000..ba934a222 --- /dev/null +++ b/etc/kayobe/ansible/check-kolla-images-py.yml @@ -0,0 +1,35 @@ +--- +- name: Check kolla-images.py image map and tag hierarchy + hosts: localhost + gather_facts: false + tasks: + - name: Create a temporary directory + ansible.builtin.tempfile: + state: directory + suffix: kolla-ansible + register: tempdir_result + + - name: Clone Kolla Ansible repository + ansible.builtin.git: + repo: "{{ stackhpc_kolla_ansible_source_url }}" + version: "{{ stackhpc_kolla_ansible_source_version }}" + dest: "{{ tempdir_result.path }}" + + - name: Check image mapping + ansible.builtin.command: + cmd: >- + {{ kayobe_config_path }}/../../tools/kolla-images.py + check-image-map + --kolla-ansible-path {{ tempdir_result.path }} + + - name: Check tag hierarchy + ansible.builtin.command: + cmd: >- + {{ kayobe_config_path }}/../../tools/kolla-images.py + check-hierarchy + --kolla-ansible-path {{ tempdir_result.path }} + + - name: Remove temporary directory + ansible.builtin.file: + path: "{{ tempdir_result.path }}" + state: absent diff --git a/etc/kayobe/ansible/check-tags.yml b/etc/kayobe/ansible/check-tags.yml index dc429a7cd..bdfb294da 100644 --- a/etc/kayobe/ansible/check-tags.yml +++ b/etc/kayobe/ansible/check-tags.yml @@ -4,6 +4,7 @@ - name: Check whether tags exist in Pulp container registry hosts: localhost + gather_facts: false tasks: - name: Query images and tags command: diff --git a/etc/kayobe/ansible/fix-hostname.yml b/etc/kayobe/ansible/fix-hostname.yml index dc3c92e32..ca5bd8883 100644 --- a/etc/kayobe/ansible/fix-hostname.yml +++ b/etc/kayobe/ansible/fix-hostname.yml @@ -21,3 +21,10 @@ cmd: hostnamectl set-hostname "{{ inventory_hostname }}" when: current_hostname.stdout != inventory_hostname become: true + +- name: Reboot hosts + import_playbook: "{{ playbook_dir | realpath }}/reboot.yml" + vars: + reboot_hosts: fix-hostname + reboot_with_bootstrap_user: true + when: current_hostname.stdout != inventory_hostname diff --git a/etc/kayobe/ansible/growroot.yml b/etc/kayobe/ansible/growroot.yml index 333991aa0..4748ab75b 100644 --- a/etc/kayobe/ansible/growroot.yml +++ b/etc/kayobe/ansible/growroot.yml @@ -75,7 +75,7 @@ vars: pv: "{{ pvs.stdout | from_json }}" disk_tmp: "{{ pv.report[0].pv[0].pv_name[:-1] }}" - disk: "{{ disk_tmp[:-1] if disk_tmp[-1] == 'p' and disk_tmp[:9] == '/dev/nvme' else disk_tmp }}" + disk: "{{ disk_tmp[:-1] if pv.report[0].pv[0].pv_name | regex_search('[a-z0-9]+[0-9]+p[0-9]+') else disk_tmp }}" part_num: "{{ pv.report[0].pv[0].pv_name[-1] }}" become: true failed_when: "growpart.rc != 0 and 'NOCHANGE' not in growpart.stdout" diff --git a/etc/kayobe/ansible/reboot.yml b/etc/kayobe/ansible/reboot.yml index 92603ade2..545c509df 100644 --- a/etc/kayobe/ansible/reboot.yml +++ b/etc/kayobe/ansible/reboot.yml @@ -1,6 +1,6 @@ --- - name: Reboot the host - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: "{{ reboot_hosts | default('seed-hypervisor:seed:overcloud:infra-vms') }}" serial: "{{ lookup('env', 'ANSIBLE_SERIAL') | default(1, true) }}" gather_facts: false vars: diff --git a/etc/kayobe/ansible/vault-generate-backend-tls.yml b/etc/kayobe/ansible/vault-generate-backend-tls.yml index bd61f9d9a..5603f1991 100644 --- a/etc/kayobe/ansible/vault-generate-backend-tls.yml +++ b/etc/kayobe/ansible/vault-generate-backend-tls.yml @@ -18,7 +18,7 @@ - name: Generate backend API certificates hosts: controllers:network vars: - vault_api_addr: "https://{{ internal_net_name | net_ip }}:8200" + vault_api_addr: "https://{{ internal_net_name | net_ip(groups['controllers'][0]) }}:8200" vault_intermediate_ca_name: "OS-TLS-INT" tasks: - name: Set a fact about the virtualenv on the remote system diff --git a/etc/kayobe/environments/ci-multinode/hooks/overcloud-host-configure/pre.d/10-fix-hostname.yml b/etc/kayobe/environments/ci-multinode/hooks/overcloud-host-configure/pre.d/10-fix-hostname.yml new file mode 120000 index 000000000..829fbfd77 --- /dev/null +++ b/etc/kayobe/environments/ci-multinode/hooks/overcloud-host-configure/pre.d/10-fix-hostname.yml @@ -0,0 +1 @@ +../../../../../ansible/fix-hostname.yml \ No newline at end of file diff --git a/etc/kayobe/environments/ci-multinode/inventory/groups b/etc/kayobe/environments/ci-multinode/inventory/groups index 08018ca3a..8bc65f541 100644 --- a/etc/kayobe/environments/ci-multinode/inventory/groups +++ b/etc/kayobe/environments/ci-multinode/inventory/groups @@ -5,4 +5,4 @@ controllers [fix-hostname:children] -storage +overcloud diff --git a/etc/kayobe/kolla/inventory/group_vars/prometheus-blackbox-exporter b/etc/kayobe/kolla/inventory/group_vars/prometheus-blackbox-exporter index 4e95d64bd..cc4990334 100644 --- a/etc/kayobe/kolla/inventory/group_vars/prometheus-blackbox-exporter +++ b/etc/kayobe/kolla/inventory/group_vars/prometheus-blackbox-exporter @@ -6,13 +6,16 @@ # prometheus_blackbox_exporter_endpoints_kayobe is another set of default # endpoints that are templated by Kayobe rather than Kolla Ansible. See # kolla/globals.yml for more details. -prometheus_blackbox_exporter_endpoints: >- - {{ (prometheus_blackbox_exporter_endpoints_kayobe | default([]) + - prometheus_blackbox_exporter_endpoints_default) | - selectattr('enabled', 'true') | - map(attribute='endpoints') | flatten | - union(prometheus_blackbox_exporter_endpoints_custom) | - unique | select | list }} +prometheus_blackbox_exporter_endpoints: | + {% set endpoints = [] %} + {% for dict_item in (prometheus_blackbox_exporter_endpoints_kayobe | default([]) + prometheus_blackbox_exporter_endpoints_default) %} + {% if dict_item.enabled | bool %} + {% for endpoint in dict_item.endpoints %} + {% set _ = endpoints.append(endpoint) %} + {% endfor %} + {% endif %} + {% endfor %} + {{ (endpoints + prometheus_blackbox_exporter_endpoints_custom) | unique | select | list }} # A list of custom prometheus Blackbox exporter endpoints. Each element should # have the following format: @@ -125,7 +128,7 @@ prometheus_blackbox_exporter_endpoints_default: - endpoints: - "prometheus_alertmanager:http_2xx_alertmanager:{{ prometheus_alertmanager_public_endpoint if enable_prometheus_alertmanager_external else prometheus_alertmanager_internal_endpoint }}" enabled: "{{ enable_prometheus_alertmanager | bool }}" - - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host.replace('-', '') + (':tls_connect:' if rabbitmq_enable_tls else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port'] ) }}{% endfor %}{{ rabbitmq_endpoints }}" + - endpoints: "{% set rabbitmq_endpoints = [] %}{% for host in groups.get('rabbitmq', []) %}{{ rabbitmq_endpoints.append('rabbitmq_' + host.replace('-', '') + (':tls_connect:' if rabbitmq_enable_tls | bool else ':tcp_connect:') + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['rabbitmq_port'] ) }}{% endfor %}{{ rabbitmq_endpoints }}" enabled: "{{ enable_rabbitmq | bool }}" - endpoints: "{% set redis_endpoints = [] %}{% for host in groups.get('redis', []) %}{{ redis_endpoints.append('redis_' + host.replace('-', '') + ':tcp_connect:' + ('api' | kolla_address(host) | put_address_in_context('url')) + ':' + hostvars[host]['redis_port']) }}{% endfor %}{{ redis_endpoints }}" enabled: "{{ enable_redis | bool }}" @@ -146,7 +149,7 @@ heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}" heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}" heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}" -horizon_public_endpoint: "{{ horizon_external_fqdn | kolla_url(public_protocol, horizon_listen_port) }}" +horizon_public_endpoint: "{{ horizon_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}" ironic_inspector_internal_endpoint: "{{ ironic_inspector_internal_fqdn | kolla_url(internal_protocol, ironic_inspector_port) }}" ironic_inspector_public_endpoint: "{{ ironic_inspector_external_fqdn | kolla_url(public_protocol, ironic_inspector_public_port) }}" magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}" diff --git a/releasenotes/notes/fix-cve-2024-44082-122ef225f674d864.yaml b/releasenotes/notes/fix-cve-2024-44082-122ef225f674d864.yaml new file mode 100644 index 000000000..3066b0758 --- /dev/null +++ b/releasenotes/notes/fix-cve-2024-44082-122ef225f674d864.yaml @@ -0,0 +1,12 @@ +--- +security: + - | + Fixes `CVE-2024-44082 + `_ with updated + container images for Ironic services. Note that Ironic Python Agent images + also need to be updated to fully fix this vulnerability. If this is not + possible, a new configuration option + ``[conductor]conductor_always_validates_images`` is available. See the + `OSSA-2024-003 description + `_ for more + details. diff --git a/releasenotes/notes/fixes-growroot-for-software-raid-3852bdea5415a0be.yaml b/releasenotes/notes/fixes-growroot-for-software-raid-3852bdea5415a0be.yaml new file mode 100644 index 000000000..0f66c6934 --- /dev/null +++ b/releasenotes/notes/fixes-growroot-for-software-raid-3852bdea5415a0be.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes a regression when using ``growroot.yml`` and software raid where the + playbook would fail to identify the correct disk. diff --git a/tools/kolla-images.py b/tools/kolla-images.py index 33fb82e25..82ded3f81 100755 --- a/tools/kolla-images.py +++ b/tools/kolla-images.py @@ -39,11 +39,20 @@ # Maps a Kolla image to a list of containers that use the image. IMAGE_TO_CONTAINERS_EXCEPTIONS: Dict[str, List[str]] = { + "dnsmasq": [ + "ironic_dnsmasq", + ], "haproxy": [ "glance_tls_proxy", + "haproxy", "neutron_tls_proxy", ], - "neutron-eswitchd": [ + "mariadb-server": [ + "mariadb", + "mariabackup", + ], + "neutron-mlnx-agent": [ + "neutron_eswitchd", "neutron_mlnx_agent", ], "neutron-metadata-agent": [ @@ -54,6 +63,15 @@ "nova_super_conductor", "nova_conductor", ], + "openvswitch-db-server": [ + "openvswitch_db", + ], + "ovn-nb-db-server": [ + "ovn_nb_db", + ], + "ovn-sb-db-server": [ + "ovn_sb_db", + ], "prometheus-v2-server": [ "prometheus_server", ], @@ -91,6 +109,9 @@ def parse_args() -> argparse.Namespace: parser.add_argument("--base-distros", default=",".join(SUPPORTED_BASE_DISTROS), choices=SUPPORTED_BASE_DISTROS) subparsers = parser.add_subparsers(dest="command", required=True) + subparser = subparsers.add_parser("check-image-map", help="Check image mapping against kolla-ansible") + subparser.add_argument("--kolla-ansible-path", required=True, help="Path to kolla-ansible repostory checked out to correct branch") + subparser = subparsers.add_parser("check-hierarchy", help="Check tag variable hierarchy against kolla-ansible") subparser.add_argument("--kolla-ansible-path", required=True, help="Path to kolla-ansible repostory checked out to correct branch") @@ -109,7 +130,7 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() -def get_abs_path(relative_path: str) -> str: +def get_abs_path(relative_path: str) -> pathlib.Path: """Return the absolute path of a file in SKC.""" script_path = pathlib.Path(inspect.getfile(inspect.currentframe())) return script_path.parent.parent / relative_path @@ -272,6 +293,45 @@ def check_tags(base_distros: List[str], kolla_image_tags: KollaImageTags, regist sys.exit(1) +def check_image_map(kolla_ansible_path: str): + """Check the image mapping against Kolla Ansible variables. + + The *_image variables in Kolla Ansible define the mapping between + containers and images. Ensure that the mapping defined in this script + matches the one in Kolla Ansible. + """ + supported_images = read_images("etc/kayobe/pulp.yml") + assert supported_images + # Build a map from container to image name. + cmd = """git grep -h '^[a-z0-9_]*_image:' ansible/roles/*/defaults/main.yml""" + image_map_str = subprocess.check_output(cmd, shell=True, cwd=os.path.realpath(kolla_ansible_path)) + image_map = yaml.safe_load(image_map_str) + image_var_re = re.compile(r"^([a-z0-9_]+)_image$") + image_map = { + image_var_re.match(image_var).group(1): image.split("/")[-1] + for image_var, image in image_map.items() + } + # Filter out unsupported images. + image_map = { + container: image + for container, image in image_map.items() + if image in supported_images + } + assert image_map + errors = [] + # Check that our mapping is correct. + for container, image in image_map.items(): + containers = get_containers(image) + if container not in containers: + errors.append((container, image)) + if errors: + print("Errors:") + for tag_var, image in errors: + print(f"Expected {tag_var} container to use {image} image") + if errors: + sys.exit(1) + + def check_hierarchy(kolla_ansible_path: str): """Check the tag variable hierarchy against Kolla Ansible variables.""" cmd = """git grep -h '^[a-z0-9_]*_tag:' ansible/roles/*/defaults/main.yml""" @@ -347,7 +407,9 @@ def main(): validate(kolla_image_tags) - if args.command == "check-hierarchy": + if args.command == "check-image-map": + check_image_map(args.kolla_ansible_path) + elif args.command == "check-hierarchy": check_hierarchy(args.kolla_ansible_path) elif args.command == "check-tags": check_tags(base_distros, kolla_image_tags, args.registry, args.namespace) diff --git a/tools/merge.py b/tools/merge.py new file mode 100755 index 000000000..216614b55 --- /dev/null +++ b/tools/merge.py @@ -0,0 +1,116 @@ +#!/usr/bin/python3 + +DESCRIPTION = """ +This script merges one release branch of SKC into another. + +Example 1: Merge stackhpc/yoga into stackhpc/zed: + + merge.py yoga zed + +Example 2: Merge the branch created in example 1 into stackhpc/2023.1: + + merge.py zed 2023.1 zed-yoga-merge + +Example 3: Continue after manually resolving merge conflicts seen in example 2: + + merge.py zed 2023.1 zed-yoga-merge --continue + +""" + +import argparse +import os +from subprocess import check_call, check_output +import sys + + +def command(cmd): + print("Running:", cmd) + check_call(cmd) + + +def parse_args(): + parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) + #"Merge one branch of SKC into the next") + parser.add_argument("previous", type=str, help="The previous version") + parser.add_argument("current", type=str, help="The current version") + parser.add_argument("previous_branch", type=str, nargs="?", default=None, help="Optional branch to use as the previous release. Allows merging multiple branches in parallel.") + parser.add_argument("--continue", dest="cont", action="store_true", help="Continue after merge conflicts have been resolved.") + parser.add_argument("--remote", type=str, default="origin", help="Git remote") + return parser.parse_args() + + +def fetch(args): + command(["git", "fetch", args.remote]) + + +def checkout(args): + merge_branch = f"{args.current}-{args.previous}-merge" + current_branch = f"{args.remote}/stackhpc/{args.current}" + command(["git", "checkout", "-B", merge_branch, current_branch]) + + +def update_submodules(): + command(["git", "submodule", "update"]) + + +def merge_in_progress(): + repo_root = check_output(["git", "rev-parse", "--show-toplevel"]) + repo_root = repo_root.decode().strip() + return os.path.isfile(os.path.join(repo_root, ".git", "MERGE_HEAD")) + + +def uncommitted_changes(): + unstaged = check_output(["git", "diff"]) + staged = check_output(["git", "diff", "--cached"]) + return unstaged or staged + + +def continue_merge(): + if merge_in_progress(): + command(["git", "merge", "--continue"]) + else: + print("No merge in progress") + + +def merge(args): + if args.previous_branch: + previous_branch = args.previous_branch + else: + previous_branch = f"{args.remote}/stackhpc/{args.previous}" + commit_message = f"Merge stackhpc/{args.previous} into stackhpc/{args.current}" + command(["git", "merge", previous_branch, "-m", commit_message]) + + +def show_diff(args): + print("Proposed changes:") + current_branch = f"{args.remote}/stackhpc/{args.current}" + command(["git", "diff", current_branch]) + + +def create_pr(args): + current_branch = f"stackhpc/{args.current}" + pr_title = f"{args.current}: {args.previous} merge" + command(["gh", "pr", "create", "-f", "-a", "@me", "-B", current_branch, "-t", pr_title]) + + +def main(): + args = parse_args() + if args.cont: + continue_merge() + else: + if merge_in_progress(): + print("Merge in progress - did you miss the --continue argument?") + sys.exit(1) + if uncommitted_changes(): + print("You have uncommitted changes - aborting") + sys.exit(1) + fetch(args) + checkout(args) + update_submodules() + merge(args) + show_diff(args) + create_pr(args) + + +if __name__ == "__main__": + main()