diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
deleted file mode 100644
index fb942777..00000000
--- a/.github/workflows/integration.yml
+++ /dev/null
@@ -1,96 +0,0 @@
----
-name: Integration Test
-
-permissions:
- pull-requests: read
- contents: read
- statuses: write
-
-# Running testing farm needs TF_API_KEY secret available inside the forked repo.
-# So the pull_request_target trigger has to be used in this case. To protect the
-# secrets this workflow has a PR sender permission checking at first job. Only
-# collaborator with repo write or admin permission can run this workflow.
-
-on:
- pull_request_target:
- types: [opened, synchronize, reopened]
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
- cancel-in-progress: true
-
-env:
- AWS_REGION: us-west-2
-
-jobs:
- pr-info:
- runs-on: ubuntu-latest
- steps:
- - name: Query author repository permissions
- uses: octokit/request-action@v2.x
- id: user_permission
- with:
- route: GET /repos/${{ github.repository }}/collaborators/${{ github.event.sender.login }}/permission
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- # restrict running of tests to users with admin or write permission for the repository
- # see https://docs.github.com/en/rest/collaborators/collaborators?apiVersion=2022-11-28#get-repository-permissions-for-a-user
- - name: Check if user does have correct permissions
- if: contains('admin write', fromJson(steps.user_permission.outputs.data).permission)
- id: check_user_perm
- run: |
- echo "User '${{ github.event.sender.login }}' has permission '${{ fromJson(steps.user_permission.outputs.data).permission }}' allowed values: 'admin', 'write'"
- echo "allowed_user=true" >> $GITHUB_OUTPUT
-
- - name: Get information for pull request
- uses: octokit/request-action@v2.x
- id: pr-api
- with:
- route: GET /repos/${{ github.repository }}/pulls/${{ github.event.number }}
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- outputs:
- allowed_user: ${{ steps.check_user_perm.outputs.allowed_user }}
- sha: ${{ fromJson(steps.pr-api.outputs.data).head.sha }}
- ref: ${{ fromJson(steps.pr-api.outputs.data).head.ref }}
- repo_url: ${{ fromJson(steps.pr-api.outputs.data).head.repo.html_url }}
-
- integration:
- needs: pr-info
- if: ${{ needs.pr-info.outputs.allowed_user == 'true' && !contains(github.event.pull_request.labels.*.name, 'control/skip-ci') }}
- continue-on-error: true
- strategy:
- matrix:
- arch: [x86_64, aarch64]
- distro: [rhel-9-5, centos-stream-9, fedora-40]
- exclude:
- - arch: x86_64
- distro: centos-stream-9
- - arch: aarch64
- distro: fedora-40
- runs-on: ubuntu-latest
-
- steps:
- - name: Clone repository
- uses: actions/checkout@v4
- with:
- ref: ${{ needs.pr-info.outputs.sha }}
- fetch-depth: 0
-
- - name: Run the tests
- uses: sclorg/testing-farm-as-github-action@v3
- with:
- compose: Fedora-40
- api_key: ${{ secrets.TF_API_KEY }}
- git_url: ${{ needs.pr-info.outputs.repo_url }}
- git_ref: ${{ needs.pr-info.outputs.ref }}
- arch: ${{ matrix.arch }}
- update_pull_request_status: true
- pull_request_status_name: "bootc-${{ matrix.distro }}-${{ matrix.arch }}"
- tmt_context: "arch=${{ matrix.arch }}"
- tmt_plan_regex: "/install-upgrade/"
- tf_scope: private
- secrets: "QUAY_USERNAME=${{ secrets.QUAY_USERNAME }};QUAY_PASSWORD=${{ secrets.QUAY_PASSWORD }};QUAY_SECRET=${{ secrets.QUAY_SECRET }};RHEL_REGISTRY_URL=${{ secrets.RHEL_REGISTRY_URL }};DOWNLOAD_NODE=${{ secrets.DOWNLOAD_NODE }};AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }};AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}"
- variables: "TEST_OS=${{ matrix.distro }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}"
diff --git a/.packit.yaml b/.packit.yaml
index 29c9639f..1794a5d8 100644
--- a/.packit.yaml
+++ b/.packit.yaml
@@ -1,5 +1,55 @@
---
+specfile_path: contrib/packaging/bootc.spec
+
+files_to_sync:
+ - contrib/packaging/bootc.spec
+ - .packit.yaml
+
+upstream_tag_template: v{version}
+
+upstream_package_name: bootc
+downstream_package_name: bootc
+
+srpm_build_deps:
+ - cargo
+ - git
+ - zstd
+ - libzstd-devel
+ - ostree-devel
+ - openssl-devel
+
+actions:
+ # The last setp here is required by Packit to return the archive name
+ # https://packit.dev/docs/configuration/actions#create-archive
+ create-archive:
+ - bash -c "cargo install cargo-vendor-filterer"
+ - bash -c "cargo xtask spec"
+ - bash -c "cat target/bootc.spec"
+ - bash -c "cp target/bootc* contrib/packaging/"
+ - bash -c "ls -1 target/bootc*.tar.zstd | grep -v 'vendor'"
+ # Do nothing with spec file. Two steps here are for debugging
+ fix-spec-file:
+ - bash -c "cat contrib/packaging/bootc.spec"
+ - bash -c "ls -al contrib/packaging/"
+
jobs:
+ # Only add CS10 and RHEL-9 RPM build test
+ # But no e2e test on CS10 and RHEL-9
+ - job: copr_build
+ trigger: pull_request
+ targets:
+ - centos-stream-9-x86_64
+ - centos-stream-9-aarch64
+ - centos-stream-9-ppc64le
+ - fedora-40-x86_64
+ - fedora-40-aarch64
+ - fedora-40-ppc64le
+ - fedora-rawhide-x86_64
+ - fedora-rawhide-aarch64
+ - fedora-rawhide-ppc64le
+ - rhel-9-x86_64
+ - rhel-9-aarch64
+
- job: tests
trigger: pull_request
targets:
@@ -8,3 +58,21 @@ jobs:
tmt_plan: /integration-build
skip_build: true
identifier: integration-test
+
+ - job: tests
+ trigger: pull_request
+ targets:
+ - centos-stream-9-x86_64
+ - centos-stream-9-aarch64
+ - fedora-40-x86_64
+ tmt_plan: /to-existing-root
+ identifier: e2e-test-to-existing-root
+
+ - job: tests
+ trigger: pull_request
+ targets:
+ - centos-stream-9-x86_64
+ - centos-stream-9-aarch64
+ - fedora-40-aarch64
+ tmt_plan: /to-disk
+ identifier: e2e-test-to-disk
diff --git a/plans/e2e.fmf b/plans/e2e.fmf
new file mode 100644
index 00000000..587b5a54
--- /dev/null
+++ b/plans/e2e.fmf
@@ -0,0 +1,59 @@
+discover:
+ how: fmf
+ test: e2e
+adjust:
+ - when: arch == x86_64 or arch == aarch64
+ provision:
+ hardware:
+ cpu:
+ processors: ">= 2"
+ memory: ">= 6 GB"
+ virtualization:
+ is-supported: true
+prepare:
+ - how: shell
+ script: |
+ source /etc/os-release
+ if [[ "$ID" == "centos" ]]; then
+ # EPEL for genisoimage
+ dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
+ fi
+ - how: install
+ package:
+ - ansible-core
+ - firewalld
+ - podman
+ - skopeo
+ - jq
+ - openssl
+ - qemu-img
+ - qemu-kvm
+ - libvirt
+ - virt-install
+ - genisoimage
+ - how: shell
+ script: ansible-galaxy collection install https://ansible-collection.s3.amazonaws.com/ansible-posix-1.5.4.tar.gz https://ansible-collection.s3.amazonaws.com/community-general-8.5.0.tar.gz
+execute:
+ how: tmt
+
+/to-existing-root:
+ summary: Run bootc install to-existing-root and bootc switch test locally (nested)
+ environment+:
+ TEST_CASE: to-existing-root
+ discover+:
+ test:
+ - /to-existing-root
+ adjust+:
+ - when: arch == ppc64le
+ enabled: false
+
+/to-disk:
+ summary: Run bootc install to-disk and bootc upgrade test locally (nested)
+ environment+:
+ TEST_CASE: to-disk
+ discover+:
+ test:
+ - /to-disk
+ adjust+:
+ - when: arch == ppc64le
+ enabled: false
diff --git a/plans/install-upgrade.fmf b/plans/install-upgrade.fmf
deleted file mode 100644
index 0de4cf89..00000000
--- a/plans/install-upgrade.fmf
+++ /dev/null
@@ -1,101 +0,0 @@
-discover:
- how: fmf
- test: install-upgrade
-prepare:
- - how: install
- package:
- - ansible-core
- - podman
- - skopeo
- - jq
- - unzip
- - how: shell
- script: ansible-galaxy collection install https://ansible-collection.s3.amazonaws.com/ansible-posix-1.5.4.tar.gz https://ansible-collection.s3.amazonaws.com/community-general-8.5.0.tar.gz
-execute:
- how: tmt
-
-/aws:
- summary: Run bootc install and upgrade test on aws
- environment+:
- PLATFORM: aws
- discover+:
- test:
- - /rpm-build
- - /bootc-install-upgrade
- adjust+:
- - when: arch != x86_64 and arch != aarch64
- enabled: false
- prepare+:
- - how: shell
- script: curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" && unzip awscliv2.zip && sudo ./aws/install
-
-/libvirt:
- summary: Run bootc install and upgrade test locally (nested)
- environment+:
- PLATFORM: libvirt
- AIR_GAPPED: 1
- discover+:
- test:
- - /rpm-build
- - /bootc-install-upgrade
- prepare+:
- - how: shell
- script: |
- source /etc/os-release
- if [[ "$ID" == "rhel" ]] || [[ "$ID" == "centos" ]]; then
- # EPEL for genisoimage
- dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
- fi
- - how: install
- package:
- - qemu-kvm
- - libvirt
- - virt-install
- - genisoimage
- adjust+:
- - when: arch == ppc64le
- enabled: false
- - when: arch == x86_64 or arch == aarch64
- provision+:
- hardware:
- cpu:
- processors: ">= 2"
- memory: ">= 6 GB"
- virtualization:
- is-supported: true
-
-/to-disk:
- summary: Use bootc install to-disk to generate raw image and test locally (nested)
- environment+:
- PLATFORM: libvirt
- IMAGE_TYPE: to-disk
- discover+:
- test:
- - /rpm-build
- - /image-install-upgrade
- prepare+:
- - how: shell
- script: |
- source /etc/os-release
- if [[ "$ID" == "rhel" ]] || [[ "$ID" == "centos" ]]; then
- # EPEL for genisoimage
- dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
- fi
- - how: install
- package:
- - qemu-img
- - qemu-kvm
- - libvirt
- - virt-install
- - genisoimage
- adjust+:
- - when: arch == ppc64le
- enabled: false
- - when: arch == x86_64 or arch == aarch64
- provision+:
- hardware:
- cpu:
- processors: ">= 2"
- memory: ">= 6 GB"
- virtualization:
- is-supported: true
diff --git a/tests/e2e/README.md b/tests/e2e/README.md
new file mode 100644
index 00000000..54d2815e
--- /dev/null
+++ b/tests/e2e/README.md
@@ -0,0 +1,11 @@
+## End to end (e2e) Test
+
+### Scenarios
+
+End to end (e2e) test includes `bootc install to-existing-root`, `bootc install to-disk`, `bootc upgrade`, and `bootc switch` tests
+
+* bootc install/upgrade/switch scenario will install, upgrade, and switch bootc image and have some system checking, such as check mount point/permission, run podman with root and rootless, check persistent log, etc.
+
+### Run end to end Test
+
+Test run is drived by [Packit](https://packit.dev/) and running on [Testing-farm](https://docs.testing-farm.io/).
diff --git a/tests/e2e/bootc-install.sh b/tests/e2e/bootc-install.sh
new file mode 100755
index 00000000..46ac8c31
--- /dev/null
+++ b/tests/e2e/bootc-install.sh
@@ -0,0 +1,312 @@
+#!/bin/bash
+set -exuo pipefail
+
+source ./shared_lib.sh
+dump_runner
+deploy_libvirt_network
+
+ARCH=$(uname -m)
+
+TEMPDIR=$(mktemp -d)
+trap 'rm -rf -- "$TEMPDIR"' EXIT
+
+# SSH configurations
+SSH_KEY=${TEMPDIR}/id_rsa
+ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048
+SSH_KEY_PUB="${SSH_KEY}.pub"
+
+INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install
+UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade
+QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}"
+INVENTORY_FILE="${TEMPDIR}/inventory"
+# Local registry IP and port
+REGISTRY_IP="192.168.100.1"
+REGISTRY_PORT=5000
+
+# VM firmware
+if [[ "$ARCH" == "x86_64" ]]; then
+ FIRMWARE_LIST=( \
+ "bios" \
+ "uefi" \
+ )
+ RND_LINE=$((RANDOM % 2))
+ FIRMWARE="${FIRMWARE_LIST[$RND_LINE]}"
+else
+ FIRMWARE="uefi"
+fi
+
+# Get OS data.
+source /etc/os-release
+
+case ""${ID}-${VERSION_ID}"" in
+ "centos-9")
+ TEST_OS="centos-stream-9"
+ TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc-dev:stream9"
+ SSH_USER="cloud-user"
+ REDHAT_VERSION_ID="9"
+ BOOT_ARGS="uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
+ ;;
+ "fedora-"*)
+ TEST_OS="fedora-${VERSION_ID}"
+ TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:${VERSION_ID}"
+ REDHAT_VERSION_ID="${VERSION_ID}"
+ SSH_USER="fedora"
+ BOOT_ARGS="uefi"
+ ;;
+ *)
+ redprint "Variable TEST_OS has to be defined"
+ exit 1
+ ;;
+esac
+
+# Setup local registry
+greenprint "Generate certificate"
+openssl req \
+ -newkey rsa:4096 \
+ -nodes \
+ -sha256 \
+ -keyout "${TEMPDIR}/domain.key" \
+ -addext "subjectAltName = IP:${REGISTRY_IP}" \
+ -x509 \
+ -days 365 \
+ -out "${TEMPDIR}/domain.crt" \
+ -subj "/C=US/ST=Denial/L=Stockholm/O=bootc/OU=bootc-test/CN=bootc-test/emailAddress=bootc-test@bootc-test.org"
+
+greenprint "Update CA Trust"
+sudo cp "${TEMPDIR}/domain.crt" "/etc/pki/ca-trust/source/anchors/${REGISTRY_IP}.crt"
+sudo update-ca-trust
+
+greenprint "Deploy local registry"
+sudo podman run \
+ -d \
+ --name registry \
+ --replace \
+ --network host \
+ -v "${TEMPDIR}":/certs:z \
+ -e REGISTRY_HTTP_ADDR="${REGISTRY_IP}:${REGISTRY_PORT}" \
+ -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
+ -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
+ quay.io/bootc-test/registry:2.8.3
+sudo podman ps -a
+
+# Test image URL
+TEST_IMAGE_NAME="bootc-workflow-test"
+TEST_IMAGE_URL="${REGISTRY_IP}:${REGISTRY_PORT}/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
+
+# Debug PACKIT_COPR_PROJECT and PACKIT_COPR_RPMS
+echo "$PACKIT_COPR_PROJECT and $PACKIT_COPR_RPMS"
+
+# Generate bootc copr repo file
+if [[ "$VERSION_ID" == 41 ]]; then
+ REPLACE_TEST_OS="${ID}-rawhide"
+else
+ REPLACE_TEST_OS="$TEST_OS"
+fi
+sed "s|REPLACE_COPR_PROJECT|${PACKIT_COPR_PROJECT}|; s|REPLACE_TEST_OS|${REPLACE_TEST_OS}|" files/bootc.repo.template | tee "${TEMPDIR}"/bootc.repo > /dev/null
+
+# Configure continerfile
+greenprint "Create $TEST_OS installation Containerfile"
+tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF
+FROM "$TIER1_IMAGE_URL"
+COPY bootc.repo /etc/yum.repos.d/
+COPY domain.crt /etc/pki/ca-trust/source/anchors/
+RUN dnf -y update bootc && \
+ update-ca-trust
+EOF
+
+case "$TEST_CASE" in
+ "to-existing-root")
+ SSH_USER="root"
+ SSH_KEY_PUB_CONTENT=$(cat "${SSH_KEY_PUB}")
+ tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF
+RUN mkdir -p /usr/etc-system/ && \
+ echo 'AuthorizedKeysFile /usr/etc-system/%u.keys' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \
+ echo "$SSH_KEY_PUB_CONTENT" > /usr/etc-system/root.keys && \
+ chmod 0600 /usr/etc-system/root.keys && \
+ dnf -y install qemu-guest-agent && \
+ dnf clean all && \
+ systemctl enable qemu-guest-agent
+EOF
+ ;;
+ "to-disk")
+ tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF
+RUN dnf -y install python3 cloud-init && \
+ dnf -y clean all
+EOF
+ ;;
+esac
+
+greenprint "Check $TEST_OS installation Containerfile"
+cat "$INSTALL_CONTAINERFILE"
+
+# Build test bootc image and push to local registry
+greenprint "Build $TEST_OS installation container image"
+sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" "$TEMPDIR"
+
+greenprint "Push $TEST_OS installation container image"
+retry sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
+
+# Prepare Ansible inventory file and ansible.cfg
+greenprint "Prepare inventory file"
+tee -a "$INVENTORY_FILE" > /dev/null << EOF
+[cloud]
+localhost
+
+[guest]
+
+[cloud:vars]
+ansible_connection=local
+
+[guest:vars]
+ansible_user="$SSH_USER"
+ansible_private_key_file="$SSH_KEY"
+ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
+
+[all:vars]
+ansible_python_interpreter=/usr/bin/python3
+EOF
+
+greenprint "Prepare ansible.cfg"
+export ANSIBLE_CONFIG="playbooks/ansible.cfg"
+
+# Run bootc install to-disk test
+case "$TEST_CASE" in
+ "to-existing-root")
+ DOWNLOAD_IMAGE="true"
+ AIR_GAPPED_DIR="$TEMPDIR"/virtiofs
+ mkdir "$AIR_GAPPED_DIR"
+ ;;
+ "to-disk")
+ DOWNLOAD_IMAGE="false"
+ AIR_GAPPED_DIR=""
+ greenprint "Configure rootfs randomly"
+ ROOTFS_LIST=( \
+ "ext4" \
+ "xfs" \
+ )
+ RND_LINE=$((RANDOM % 2))
+ ROOTFS="${ROOTFS_LIST[$RND_LINE]}"
+
+ if [[ "$TEST_OS" == "fedora"* ]]; then
+ ROOTFS="btrfs"
+ fi
+
+ greenprint "💾 Create disk.raw"
+ sudo truncate -s 10G disk.raw
+
+ greenprint "bootc install to disk.raw"
+ sudo podman run \
+ --rm \
+ --privileged \
+ --pid=host \
+ --security-opt label=type:unconfined_t \
+ -v /var/lib/containers:/var/lib/containers \
+ -v /dev:/dev \
+ -v .:/output \
+ "$TEST_IMAGE_URL" \
+ bootc install to-disk --filesystem "$ROOTFS" --generic-image --via-loopback /output/disk.raw
+
+ sudo qemu-img convert -f raw ./disk.raw -O qcow2 "/var/lib/libvirt/images/disk.qcow2"
+ rm -f disk.raw
+ ;;
+esac
+
+# Start disk.qcow for to-disk test
+# Start a new VM for to-existing-root test
+greenprint "Deploy VM"
+ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e test_os="$TEST_OS" \
+ -e ssh_user="$SSH_USER" \
+ -e ssh_key_pub="$SSH_KEY_PUB" \
+ -e inventory_file="$INVENTORY_FILE" \
+ -e download_image="$DOWNLOAD_IMAGE" \
+ -e air_gapped_dir="$AIR_GAPPED_DIR" \
+ -e firmware="$FIRMWARE" \
+ -e boot_args="$BOOT_ARGS" \
+ playbooks/deploy-libvirt.yaml
+
+# Run bootc install to-existing-root test
+if [[ "$TEST_CASE" == "to-existing-root" ]]; then
+ greenprint "Install $TEST_OS bootc system"
+ ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e test_os="$TEST_OS" \
+ -e test_image_url="$TEST_IMAGE_URL" \
+ -e test_case="$TEST_CASE" \
+ playbooks/install.yaml
+fi
+
+# Check bootc system
+greenprint "Run ostree checking test on VM"
+ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e test_os="$TEST_OS" \
+ -e bootc_image="$TEST_IMAGE_URL" \
+ -e image_label_version_id="$REDHAT_VERSION_ID" \
+ playbooks/check-system.yaml
+
+# Prepare upgrade containerfile
+greenprint "Create upgrade Containerfile"
+tee "$UPGRADE_CONTAINERFILE" > /dev/null << EOF
+FROM "$TEST_IMAGE_URL"
+RUN dnf -y install wget && \
+ dnf -y clean all
+EOF
+
+# Build upgrade container image and push to locay registry
+greenprint "Build $TEST_OS upgrade container image"
+sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" .
+
+greenprint "Push $TEST_OS upgrade container image"
+retry sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
+
+# Copy upgrade image to local folder for bootc switch test
+if [[ "$AIR_GAPPED_DIR" != "" ]]; then
+ retry skopeo copy docker://"$TEST_IMAGE_URL" dir://"$AIR_GAPPED_DIR"
+ BOOTC_IMAGE="/mnt"
+else
+ BOOTC_IMAGE="$TEST_IMAGE_URL"
+fi
+
+# bootc upgrade/switch test
+greenprint "Upgrade $TEST_OS system"
+ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e air_gapped_dir="$AIR_GAPPED_DIR" \
+ playbooks/upgrade.yaml
+
+# Check bootc system after upgrade/switch
+greenprint "Run ostree checking test after upgrade on VM"
+ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e test_os="$TEST_OS" \
+ -e bootc_image="$BOOTC_IMAGE" \
+ -e image_label_version_id="$REDHAT_VERSION_ID" \
+ -e upgrade="true" \
+ playbooks/check-system.yaml
+
+# bootc rollback test
+greenprint "Rollback $TEST_OS system"
+ansible-playbook -v \
+ -i "$INVENTORY_FILE" \
+ -e air_gapped_dir="$AIR_GAPPED_DIR" \
+ playbooks/rollback.yaml
+
+# Test finished and system clean up
+greenprint "Clean up"
+unset ANSIBLE_CONFIG
+sudo virsh destroy "bootc-${TEST_OS}"
+if [[ "$FIRMWARE" == "uefi" ]]; then
+ sudo virsh undefine "bootc-${TEST_OS}" --nvram
+else
+ sudo virsh undefine "bootc-${TEST_OS}"
+fi
+if [[ "$TEST_CASE" == "to-disk" ]]; then
+ sudo virsh vol-delete --pool images disk.qcow2
+else
+ sudo virsh vol-delete --pool images "bootc-${TEST_OS}.qcow2"
+fi
+
+greenprint "🎉 All tests passed."
+exit 0
diff --git a/tests/e2e/e2e.fmf b/tests/e2e/e2e.fmf
new file mode 100644
index 00000000..06eb9fba
--- /dev/null
+++ b/tests/e2e/e2e.fmf
@@ -0,0 +1,9 @@
+/to-existing-root:
+ summary: bootc install to-existing-root and bootc switch test
+ test: ./bootc-install.sh
+ duration: 90m
+
+/to-disk:
+ summary: bootc install to-disk and bootc upgrade test
+ test: ./bootc-install.sh
+ duration: 90m
diff --git a/tests/e2e/files/bootc.repo.template b/tests/e2e/files/bootc.repo.template
new file mode 100644
index 00000000..c6072a60
--- /dev/null
+++ b/tests/e2e/files/bootc.repo.template
@@ -0,0 +1,6 @@
+[bootc]
+name=bootc
+baseurl=https://download.copr.fedorainfracloud.org/results/REPLACE_COPR_PROJECT/REPLACE_TEST_OS-$basearch/
+enabled=1
+gpgcheck=0
+repo_gpgcheck=0
diff --git a/tests/integration/playbooks/ansible.cfg b/tests/e2e/playbooks/ansible.cfg
similarity index 100%
rename from tests/integration/playbooks/ansible.cfg
rename to tests/e2e/playbooks/ansible.cfg
diff --git a/tests/integration/playbooks/check-system.yaml b/tests/e2e/playbooks/check-system.yaml
similarity index 100%
rename from tests/integration/playbooks/check-system.yaml
rename to tests/e2e/playbooks/check-system.yaml
diff --git a/tests/integration/playbooks/deploy-libvirt.yaml b/tests/e2e/playbooks/deploy-libvirt.yaml
similarity index 66%
rename from tests/integration/playbooks/deploy-libvirt.yaml
rename to tests/e2e/playbooks/deploy-libvirt.yaml
index f90df989..7cda1918 100644
--- a/tests/integration/playbooks/deploy-libvirt.yaml
+++ b/tests/e2e/playbooks/deploy-libvirt.yaml
@@ -3,24 +3,24 @@
become: false
vars:
test_os: ""
- arch: "{{ lookup('env', 'ARCH') | default('x86_64', true) }}"
ssh_key_pub: ""
ssh_user: "cloud-user"
inventory_file: ""
- download_node: "{{ lookup('env', 'DOWNLOAD_NODE') | default('', true) }}"
instance_name: "bootc-{{ test_os }}"
image_path: "/var/lib/libvirt/images"
- bib: "false"
- bib_firmware: ""
+ download_image: "true"
+ air_gapped_dir: ""
+ firmware: ""
boot_args: ""
os_variant:
centos-stream-9: centos-stream9
- rhel-9-5: rhel9-unknown
- rhel-9-4: rhel9-unknown
fedora-40: fedora-unknown
fedora-41: fedora-unknown
tasks:
+ - set_fact:
+ arch: "{{ ansible_facts['architecture'] }}"
+
- name: Get temp folder
command: dirname "{{ inventory_file }}"
register: result_temp_folder
@@ -39,30 +39,10 @@
- set_fact:
download_image_name: "{{ out.stdout }}"
- rhel_guest_image_fname: "{{ instance_name }}.qcow2"
+ guest_image_fname: "{{ instance_name }}.qcow2"
when:
- "'centos' in test_os"
- - bib == "false"
-
- - name: Get rhel-guest-image filename
- block:
- - name: Get version from test_os
- shell: echo {{ test_os }} | sed 's/rhel-//;s/-/\./'
- register: result_os_version
-
- - set_fact:
- test_os_dot_version: "{{ result_os_version.stdout }}"
-
- - name: Get rhel-guest-image filename
- shell: curl -s http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/{{ arch }}/images/ | grep -oP '(?<=href=")rhel-guest-image-[^"]+.qcow2(?=")'
- register: out
-
- - set_fact:
- download_image_name: "{{ out.stdout }}"
- rhel_guest_image_fname: "{{ instance_name }}.qcow2"
- when:
- - "'rhel' in test_os"
- - bib == "false"
+ - download_image == "true"
- name: Get Fedora-Cloud-Base-Generic 40 image filename
block:
@@ -72,10 +52,10 @@
- set_fact:
download_image_name: "{{ out.stdout }}"
- rhel_guest_image_fname: "{{ instance_name }}.qcow2"
+ guest_image_fname: "{{ instance_name }}.qcow2"
when:
- test_os == "fedora-40"
- - bib == "false"
+ - download_image == "true"
- name: Get Fedora-Cloud-Base-Generic 41 image filename
block:
@@ -85,58 +65,44 @@
- set_fact:
download_image_name: "{{ out.stdout }}"
- rhel_guest_image_fname: "{{ instance_name }}.qcow2"
+ guest_image_fname: "{{ instance_name }}.qcow2"
when:
- test_os == "fedora-41"
- - bib == "false"
+ - download_image == "true"
- name: Download CentOS-Stream-GenericCloud image
get_url:
url: "https://composes.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/{{ arch }}/images/{{ download_image_name }}"
- dest: "{{ image_path }}/{{ rhel_guest_image_fname }}"
+ dest: "{{ image_path }}/{{ guest_image_fname }}"
validate_certs: false
become: true
when:
- "'centos' in test_os"
- - bib == "false"
-
- - name: Download rhel-guest-image
- get_url:
- url: "http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/{{ arch }}/images/{{ download_image_name }}"
- dest: "{{ image_path }}/{{ rhel_guest_image_fname }}"
- validate_certs: false
- become: true
- when:
- - "'rhel' in test_os"
- - bib == "false"
+ - download_image == "true"
- name: Download Fedora-Cloud-Base-Generic 40
get_url:
url: "https://dl.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/{{ arch }}/images/{{ download_image_name }}"
- dest: "{{ image_path }}/{{ rhel_guest_image_fname }}"
+ dest: "{{ image_path }}/{{ guest_image_fname }}"
validate_certs: false
become: true
when:
- test_os == "fedora-40"
- - bib == "false"
+ - download_image == "true"
- name: Download Fedora-Cloud-Base-Generic 41
get_url:
url: "https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/{{ arch }}/images/{{ download_image_name }}"
- dest: "{{ image_path }}/{{ rhel_guest_image_fname }}"
+ dest: "{{ image_path }}/{{ guest_image_fname }}"
validate_certs: false
become: true
when:
- test_os == "fedora-41"
- - bib == "false"
+ - download_image == "true"
- set_fact:
- rhel_guest_image_fname: "disk.qcow2"
- when: bib == "true"
-
- - name: Start libvirtd service
- command: systemctl start libvirtd.service
- become: true
+ guest_image_fname: "disk.qcow2"
+ when: download_image == "false"
- name: Generate user-data and meta-data
template:
@@ -221,19 +187,3 @@
option: guest ansible_host
value: "{{ instance_ip }}"
no_extra_spaces: true
-
- - name: Write random number to inventory file
- community.general.ini_file:
- path: "{{ inventory_file }}"
- section: cloud:vars
- option: rhel_guest_image_fname
- value: "{{ rhel_guest_image_fname }}"
- no_extra_spaces: true
-
- - name: Write instance name to inventory file
- community.general.ini_file:
- path: "{{ inventory_file }}"
- section: cloud:vars
- option: instance_name
- value: "{{ instance_name }}"
- no_extra_spaces: true
diff --git a/tests/integration/playbooks/install.yaml b/tests/e2e/playbooks/install.yaml
similarity index 57%
rename from tests/integration/playbooks/install.yaml
rename to tests/e2e/playbooks/install.yaml
index 77e37322..a3d8ca50 100644
--- a/tests/integration/playbooks/install.yaml
+++ b/tests/e2e/playbooks/install.yaml
@@ -3,8 +3,8 @@
become: false
vars:
test_os: ""
- platform: "{{ lookup('env', 'PLATFORM') | default('aws', true) }}"
test_image_url: ""
+ test_case: ""
tasks:
- name: check bios or uefi
@@ -27,61 +27,25 @@
# installing SELinux-enabled targets from SELinux-disabled hosts
# https://github.com/containers/bootc/issues/419
+ # only run on to-existing-root case
- name: disable selinux for libvirt only
command: setenforce 0
become: true
ignore_errors: true
when:
- - platform == "libvirt"
-
- - name: Install podman
- dnf:
- name:
- - podman
- state: present
- become: true
- when: ('rhel' not in test_os and test_os != 'fedora-41') or (platform != 'aws' and test_os != 'fedora-41')
-
- - name: Install podman from internal
- dnf:
- disablerepo: "*"
- enablerepo: "rhel-9y-*"
- name:
- - podman
- state: present
- become: true
- when:
- - "'rhel' in test_os"
- - platform == "aws"
+ - test_case == "to-existing-root"
# ansible dnf5 module needs python3-libdnf5
- - name: Install podman on fedora-41(dnf5)
+ - name: Install podman dnf and dnf5
command: dnf -y install podman
become: true
- when: test_os == 'fedora-41'
-
- - name: Auth for RHEL private image
- command:
- podman login \
- -u "{{ lookup('env', 'QUAY_USERNAME') }}" \
- -p "{{ lookup('env', 'QUAY_PASSWORD') }}" \
- quay.io
- no_log: true
- become: true
-
- - name: Pull image
- command: "podman pull {{ test_image_url }}"
- become: true
- retries: 3
- delay: 10
- register: result
- until: result is successful
- name: Install image
command:
"podman run \
--rm \
--privileged \
+ --tls-verify=false \
--pid=host \
-v /dev:/dev \
-v /:/target \
diff --git a/tests/integration/playbooks/rollback.yaml b/tests/e2e/playbooks/rollback.yaml
similarity index 100%
rename from tests/integration/playbooks/rollback.yaml
rename to tests/e2e/playbooks/rollback.yaml
diff --git a/tests/integration/playbooks/templates/meta-data.j2 b/tests/e2e/playbooks/templates/meta-data.j2
similarity index 100%
rename from tests/integration/playbooks/templates/meta-data.j2
rename to tests/e2e/playbooks/templates/meta-data.j2
diff --git a/tests/e2e/playbooks/templates/user-data.j2 b/tests/e2e/playbooks/templates/user-data.j2
new file mode 100644
index 00000000..ec086235
--- /dev/null
+++ b/tests/e2e/playbooks/templates/user-data.j2
@@ -0,0 +1,19 @@
+#cloud-config
+users:
+ - default
+ - name: {{ ssh_user }}
+ groups: wheel
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: true
+ ssh_authorized_keys:
+ - {{ lookup('ansible.builtin.file', ssh_key_pub) }}
+
+# install with --cloud-init always shutdown vm on the first reboot
+# https://github.com/virt-manager/virt-manager/issues/497
+# workaround is shutdown vm in cloud-init when cloud-init finished
+# then start vm
+power_state:
+ delay: now
+ mode: poweroff
+ message: Cloud Init Finalized - Shutting down machine
+ timeout: 30
diff --git a/tests/integration/playbooks/templates/virt-install.bash.j2 b/tests/e2e/playbooks/templates/virt-install.bash.j2
similarity index 64%
rename from tests/integration/playbooks/templates/virt-install.bash.j2
rename to tests/e2e/playbooks/templates/virt-install.bash.j2
index 73a4e87e..bd359b83 100644
--- a/tests/integration/playbooks/templates/virt-install.bash.j2
+++ b/tests/e2e/playbooks/templates/virt-install.bash.j2
@@ -5,16 +5,17 @@ virt-install \
--ram 3072 \
--vcpus 2 \
--os-variant {{ os_variant[test_os] }} \
- --network default \
- --disk size=10,path="{{ image_path }}/{{ rhel_guest_image_fname }}" \
-{% if bib_firmware == 'uefi' %}
+ --network network=integration \
+ --disk size=10,path="{{ image_path }}/{{ guest_image_fname }}" \
+{% if firmware == 'uefi' %}
--boot {{ boot_args }} \
{% endif %}
--cdrom "{{ image_path }}/seed.iso" \
--install no_install=yes \
-{% if bib == 'false' %}
+{% if air_gapped_dir != '' %}
--filesystem={{ air_gapped_dir }},mount_tag,driver.type=virtiofs,accessmode=passthrough \
--memorybacking=source.type=memfd,access.mode=shared \
{% endif %}
+ --console file,source.path="/tmp/{{ test_os }}-{{ firmware }}-console.log" \
--noautoconsole \
--wait
diff --git a/tests/integration/playbooks/upgrade.yaml b/tests/e2e/playbooks/upgrade.yaml
similarity index 100%
rename from tests/integration/playbooks/upgrade.yaml
rename to tests/e2e/playbooks/upgrade.yaml
diff --git a/tests/e2e/shared_lib.sh b/tests/e2e/shared_lib.sh
new file mode 100755
index 00000000..baf9b00e
--- /dev/null
+++ b/tests/e2e/shared_lib.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+# Dumps details about the instance running the CI job.
+function dump_runner {
+ RUNNER_CPUS=$(nproc)
+ RUNNER_MEM=$(free -m | grep -oP '\d+' | head -n 1)
+ RUNNER_DISK=$(df --output=size -h / | sed '1d;s/[^0-9]//g')
+ RUNNER_HOSTNAME=$(uname -n)
+ RUNNER_USER=$(whoami)
+ RUNNER_ARCH=$(uname -m)
+ RUNNER_KERNEL=$(uname -r)
+
+ echo -e "\033[0;36m"
+ cat << EOF
+------------------------------------------------------------------------------
+CI MACHINE SPECS
+------------------------------------------------------------------------------
+ Hostname: ${RUNNER_HOSTNAME}
+ User: ${RUNNER_USER}
+ CPUs: ${RUNNER_CPUS}
+ RAM: ${RUNNER_MEM} MB
+ DISK: ${RUNNER_DISK} GB
+ ARCH: ${RUNNER_ARCH}
+ KERNEL: ${RUNNER_KERNEL}
+------------------------------------------------------------------------------
+EOF
+}
+
+# Colorful timestamped output.
+function greenprint {
+ echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
+}
+
+function redprint {
+ echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m"
+}
+
+# Retry container image pull and push
+function retry {
+ n=0
+ until [ "$n" -ge 3 ]
+ do
+ "$@" && break
+ n=$((n+1))
+ sleep 10
+ done
+}
+
+function deploy_libvirt_network {
+ greenprint "Start firewalld"
+ sudo systemctl enable --now firewalld
+
+ greenprint "🚀 Starting libvirt daemon"
+ sudo systemctl start libvirtd
+ sudo virsh list --all > /dev/null
+
+ # Set a customized dnsmasq configuration for libvirt so we always get the
+ # same address on boot up.
+ greenprint "💡 Setup libvirt network"
+ sudo tee /tmp/integration.xml > /dev/null << EOF
+
+integration
+1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+EOF
+ if ! sudo virsh net-info integration > /dev/null 2>&1; then
+ sudo virsh net-define /tmp/integration.xml
+ fi
+ if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then
+ sudo virsh net-start integration
+ fi
+ sudo rm -f /tmp/integration.xml
+}
diff --git a/tests/integration/README.md b/tests/integration/README.md
deleted file mode 100644
index 27079817..00000000
--- a/tests/integration/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-## Integration Test
-
-### Scenarios
-
-Integration test includes two scenarios, `RPM build` and `bootc install/upgrade`.
-
-1. RPM build scenario will build RPM for RHEL 9, CentOS Stream 9, and Fedora with mock.
-
-2. bootc install/upgrade scenario will install and upgrade bootc image and have some system checking, such as check mount point/permission, run podman with root and rootless, check persistent log.
-
-#### Run RPM Build Test
-
-```shell
- podman run --rm --privileged -v ./:/workdir:z -e TEST_OS=$TEST_OS -e ARCH=$ARCH -e RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL -e DOWNLOAD_NODE=$DOWNLOAD_NODE --workdir /workdir quay.io/fedora/fedora:40 ./tests/integration/mockbuild.sh
-```
-
-#### Run Integration Test
-
-Run on a shared test infrastructure using the [`testing farm`](https://docs.testing-farm.io/Testing%20Farm/0.1/cli.html) tool. For example, running on AWS.
-
-Run `testing-farm` CLI from `quay.io/testing-farm/cli` container. Don't forget export the `TESTING_FARM_API_TOKEN` in your environment. To run RHEL test, `Red Hat Ranch` has to be used.
-
-```shell
- export TESTING_FARM_API_TOKEN=
- testing-farm request \
- --plan "aws" \
- --environment PLATFORM=$PLATFORM \
- --environment ARCH=$ARCH \
- --environment TEST_OS=$TEST_OS \
- --environment AWS_REGION=us-east-1 \
- --secret DOWNLOAD_NODE=$DOWNLOAD_NODE \
- --secret RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL \
- --secret QUAY_USERNAME=$QUAY_USERNAME \
- --secret QUAY_PASSWORD=$QUAY_PASSWORD \
- --secret QUAY_SECRET=$QUAY_SECRET \
- --secret AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
- --secret AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
- --git-url \
- --git-ref \
- --compose "CentOS-Stream-9" \
- --arch $ARCH \
- --context "arch=$ARCH" \
- --timeout "120"
-```
-
-* AWS test needs environment variables `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION=us-east-1` have to be configured.
-
-### Required environment variables
-
- TEST_OS The OS to run the tests in. Currently supported values:
- "rhel-9-4"
- "centos-stream-9"
- ARCH Test architecture
- "x86_64"
- "aarch64"
-
- PLATFORM Run test on:
- "aws"
- QUAY_USERNAME quay.io username
- QUAY_PASSWORD quay.io password
- QUAY_SECRET Save into /etc/ostree/auth.json for authenticated registry
- DOWNLOAD_NODE RHEL nightly compose download URL
- RHEL_REGISTRY_URL RHEL bootc image URL
- AWS_ACCESS_KEY_ID AWS access key id
- AWS_SECRET_ACCESS_KEY AWS secrety key
- AWS_REGION AWS region
- "us-east-1" RHEL AWS EC2 image is only available in this region
- TESTING_FARM_API_TOKEN Required by Testing Farm API
diff --git a/tests/integration/bootc-install-upgrade.sh b/tests/integration/bootc-install-upgrade.sh
deleted file mode 100755
index 8c5888e2..00000000
--- a/tests/integration/bootc-install-upgrade.sh
+++ /dev/null
@@ -1,242 +0,0 @@
-#!/bin/bash
-set -exuo pipefail
-
-source ./shared_lib.sh
-dump_runner
-
-TEMPDIR=$(mktemp -d)
-trap 'rm -rf -- "$TEMPDIR"' EXIT
-
-# SSH configurations
-SSH_KEY=${TEMPDIR}/id_rsa
-ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048
-SSH_KEY_PUB="${SSH_KEY}.pub"
-
-INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install
-UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade
-QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}"
-INVENTORY_FILE="${TEMPDIR}/inventory"
-
-REPLACE_CLOUD_USER=""
-TEST_IMAGE_NAME="bootc-workflow-test"
-TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
-
-case "$TEST_OS" in
- "rhel-9"*)
- if [[ "$TEST_OS" == "rhel-9-4" ]]; then
- TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.4"
- BATCH_COMPOSE="updates/"
- LATEST_COMPOSE_ID="latest-RHEL-9.4.0"
- REDHAT_VERSION_ID="9.4"
- else
- TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.5"
- BATCH_COMPOSE=""
- LATEST_COMPOSE_ID="latest-RHEL-9.5.0"
- REDHAT_VERSION_ID="9.5"
- fi
- SSH_USER="cloud-user"
- sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s|REPLACE_BATCH_COMPOSE|${BATCH_COMPOSE}|; s/REPLACE_COMPOSE_ID/${LATEST_COMPOSE_ID}/" files/rhel-9-y.template | tee rhel-9-y.repo > /dev/null
- ADD_REPO="COPY rhel-9-y.repo /etc/yum.repos.d/rhel-9-y.repo"
- if [[ "$PLATFORM" == "aws" ]]; then
- SSH_USER="ec2-user"
- REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg'
- fi
- greenprint "Prepare cloud-init file"
- tee -a "playbooks/user-data" > /dev/null << EOF
-#cloud-config
-yum_repos:
- rhel-9y-baseos:
- name: rhel-9y-baseos
- baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/
- enabled: true
- gpgcheck: false
- rhel-9y-appstream:
- name: rhel-9y-appstream
- baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/
- enabled: true
- gpgcheck: false
-EOF
- ;;
- "centos-stream-9")
- TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc-dev:stream9"
- SSH_USER="cloud-user"
- ADD_REPO=""
- if [[ "$PLATFORM" == "aws" ]]; then
- SSH_USER="ec2-user"
- REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg'
- fi
- REDHAT_VERSION_ID="9"
- ;;
- "fedora"*)
- if [[ "$TEST_OS" == "fedora-40" ]]; then
- TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:40"
- REDHAT_VERSION_ID="40"
- else
- TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:41"
- REDHAT_VERSION_ID="41"
- fi
- SSH_USER="fedora"
- ADD_REPO=""
- ;;
- *)
- redprint "Variable TEST_OS has to be defined"
- exit 1
- ;;
-esac
-
-sed "s/REPLACE_ME/${QUAY_SECRET}/g" files/auth.template | tee auth.json > /dev/null
-
-greenprint "Create $TEST_OS installation Containerfile"
-tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF
-FROM "$TIER1_IMAGE_URL"
-$ADD_REPO
-COPY build/bootc-2*.${ARCH}.rpm .
-RUN dnf -y update ./bootc-2*.${ARCH}.rpm && \
- rm -f ./bootc-2*.${ARCH}.rpm
-COPY auth.json /etc/ostree/auth.json
-EOF
-
-case "$PLATFORM" in
- "aws")
- tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF
-RUN dnf -y install python3 cloud-init && \
- dnf -y clean all
-$REPLACE_CLOUD_USER
-EOF
- ;;
- "libvirt")
- SSH_USER="root"
- SSH_KEY_PUB_CONTENT=$(cat "${SSH_KEY_PUB}")
- tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF
-RUN mkdir -p /usr/etc-system/ && \
- echo 'AuthorizedKeysFile /usr/etc-system/%u.keys' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \
- echo "$SSH_KEY_PUB_CONTENT" > /usr/etc-system/root.keys && \
- chmod 0600 /usr/etc-system/root.keys && \
- dnf -y install qemu-guest-agent && \
- dnf clean all && \
- systemctl enable qemu-guest-agent
-EOF
- ;;
-esac
-
-greenprint "Check $TEST_OS installation Containerfile"
-cat "$INSTALL_CONTAINERFILE"
-
-greenprint "Login quay.io"
-podman login -u "${QUAY_USERNAME}" -p "${QUAY_PASSWORD}" quay.io
-
-greenprint "Build $TEST_OS installation container image"
-podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" .
-
-greenprint "Push $TEST_OS installation container image"
-retry podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
-
-greenprint "Prepare inventory file"
-tee -a "$INVENTORY_FILE" > /dev/null << EOF
-[cloud]
-localhost
-
-[guest]
-
-[cloud:vars]
-ansible_connection=local
-
-[guest:vars]
-ansible_user="$SSH_USER"
-ansible_private_key_file="$SSH_KEY"
-ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
-
-[all:vars]
-ansible_python_interpreter=/usr/bin/python3
-EOF
-
-greenprint "Prepare ansible.cfg"
-export ANSIBLE_CONFIG="playbooks/ansible.cfg"
-
-# AIR_GAPPED=1 means add passthough mount to test bootc swtich to local disk
-if [[ ${AIR_GAPPED-} -eq 1 ]];then
- AIR_GAPPED_DIR="$TEMPDIR"/virtiofs
- mkdir "$AIR_GAPPED_DIR"
-else
- AIR_GAPPED=0
- AIR_GAPPED_DIR=""
-fi
-
-greenprint "Deploy $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e ssh_user="$SSH_USER" \
- -e ssh_key_pub="$SSH_KEY_PUB" \
- -e inventory_file="$INVENTORY_FILE" \
- -e air_gapped_dir="$AIR_GAPPED_DIR" \
- "playbooks/deploy-${PLATFORM}.yaml"
-
-greenprint "Install $TEST_OS bootc system"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e test_image_url="$TEST_IMAGE_URL" \
- playbooks/install.yaml
-
-greenprint "Run ostree checking test on $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e bootc_image="$TEST_IMAGE_URL" \
- -e image_label_version_id="$REDHAT_VERSION_ID" \
- playbooks/check-system.yaml
-
-greenprint "Create upgrade Containerfile"
-tee "$UPGRADE_CONTAINERFILE" > /dev/null << EOF
-FROM "$TEST_IMAGE_URL"
-RUN dnf -y install wget && \
- dnf -y clean all
-EOF
-
-greenprint "Build $TEST_OS upgrade container image"
-podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" .
-
-greenprint "Push $TEST_OS upgrade container image"
-retry podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
-
-if [[ ${AIR_GAPPED-} -eq 1 ]]; then
- retry skopeo copy docker://"$TEST_IMAGE_URL" dir://"$AIR_GAPPED_DIR"
- BOOTC_IMAGE="/mnt"
-else
- BOOTC_IMAGE="$TEST_IMAGE_URL"
-fi
-
-greenprint "Upgrade $TEST_OS system"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e air_gapped_dir="$AIR_GAPPED_DIR" \
- playbooks/upgrade.yaml
-
-greenprint "Run ostree checking test after upgrade on $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e bootc_image="$BOOTC_IMAGE" \
- -e image_label_version_id="$REDHAT_VERSION_ID" \
- -e upgrade="true" \
- playbooks/check-system.yaml
-
-greenprint "Rollback $TEST_OS system"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e air_gapped_dir="$AIR_GAPPED_DIR" \
- playbooks/rollback.yaml
-
-greenprint "Remove $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e platform="$PLATFORM" \
- playbooks/remove.yaml
-
-greenprint "Clean up"
-rm -rf auth.json rhel-9-y.repo
-unset ANSIBLE_CONFIG
-
-greenprint "🎉 All tests passed."
-exit 0
diff --git a/tests/integration/files/auth.template b/tests/integration/files/auth.template
deleted file mode 100644
index ea3e4fbf..00000000
--- a/tests/integration/files/auth.template
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "auths": {
- "quay.io": {
- "auth": "REPLACE_ME"
- }
- }
-}
diff --git a/tests/integration/files/rhel-9-y.template b/tests/integration/files/rhel-9-y.template
deleted file mode 100644
index 0e4c853f..00000000
--- a/tests/integration/files/rhel-9-y.template
+++ /dev/null
@@ -1,10 +0,0 @@
-[rhel-9y-baseos]
-baseurl=http://REPLACE_ME/rhel-9/nightly/REPLACE_BATCH_COMPOSERHEL-9/REPLACE_COMPOSE_ID/compose/BaseOS/$basearch/os/
-enabled=1
-gpgcheck=0
-
-[rhel-9y-appstream]
-baseurl=http://REPLACE_ME/rhel-9/nightly/REPLACE_BATCH_COMPOSERHEL-9/REPLACE_COMPOSE_ID/compose/AppStream/$basearch/os/
-enabled=1
-gpgcheck=0
-
diff --git a/tests/integration/image-install-upgrade.sh b/tests/integration/image-install-upgrade.sh
deleted file mode 100755
index f26fd61f..00000000
--- a/tests/integration/image-install-upgrade.sh
+++ /dev/null
@@ -1,242 +0,0 @@
-#!/bin/bash
-set -exuo pipefail
-
-source ./shared_lib.sh
-dump_runner
-
-TEMPDIR=$(mktemp -d)
-trap 'rm -rf -- "$TEMPDIR"' EXIT
-
-# SSH configurations
-SSH_KEY=${TEMPDIR}/id_rsa
-ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048
-SSH_KEY_PUB="${SSH_KEY}.pub"
-
-INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install
-UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade
-QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}"
-INVENTORY_FILE="${TEMPDIR}/inventory"
-
-TEST_IMAGE_NAME="bootc-workflow-test"
-
-case "$TEST_OS" in
- "rhel-9"*)
- if [[ "$TEST_OS" == "rhel-9-4" ]]; then
- TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.4"
- BATCH_COMPOSE="updates/"
- LATEST_COMPOSE_ID="latest-RHEL-9.4.0"
- REDHAT_VERSION_ID="9.4"
- else
- TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/rhel9-rhel_bootc:rhel-9.5"
- BATCH_COMPOSE=""
- LATEST_COMPOSE_ID="latest-RHEL-9.5.0"
- REDHAT_VERSION_ID="9.5"
- fi
- TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
- SSH_USER="cloud-user"
- sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s|REPLACE_BATCH_COMPOSE|${BATCH_COMPOSE}|; s/REPLACE_COMPOSE_ID/${LATEST_COMPOSE_ID}/" files/rhel-9-y.template | tee rhel-9-y.repo > /dev/null
- ADD_REPO="COPY rhel-9-y.repo /etc/yum.repos.d/rhel-9-y.repo"
- greenprint "Prepare cloud-init file"
- tee -a "playbooks/user-data" > /dev/null << EOF
-#cloud-config
-yum_repos:
- rhel-9y-baseos:
- name: rhel-9y-baseos
- baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/
- enabled: true
- gpgcheck: false
- rhel-9y-appstream:
- name: rhel-9y-appstream
- baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/
- enabled: true
- gpgcheck: false
-EOF
- BOOT_ARGS="uefi"
- ;;
- "centos-stream-9")
- TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc-dev:stream9"
- ADD_REPO=""
- SSH_USER="cloud-user"
- REDHAT_VERSION_ID="9"
- TEST_IMAGE_URL="quay.io/bootc-test/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
- BOOT_ARGS="uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
- ;;
- "fedora"*)
- if [[ "$TEST_OS" == "fedora-40" ]]; then
- TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:40"
- REDHAT_VERSION_ID="40"
- else
- TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:41"
- REDHAT_VERSION_ID="41"
- fi
- SSH_USER="fedora"
- ADD_REPO=""
- TEST_IMAGE_URL="quay.io/bootc-test/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
- BOOT_ARGS="uefi"
- ;;
- *)
- redprint "Variable TIER1_IMAGE_URL is not supported"
- exit 1
- ;;
-esac
-
-sed "s/REPLACE_ME/${QUAY_SECRET}/g" files/auth.template | tee auth.json > /dev/null
-greenprint "Create $TEST_OS installation Containerfile"
-tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF
-FROM "$TIER1_IMAGE_URL"
-$ADD_REPO
-COPY build/bootc-2*.${ARCH}.rpm .
-RUN dnf -y update ./bootc-2*.${ARCH}.rpm && \
- rm -f ./bootc-2*.${ARCH}.rpm
-RUN dnf -y install python3 cloud-init && \
- dnf -y clean all
-COPY auth.json /etc/ostree/auth.json
-EOF
-
-greenprint "Check $TEST_OS installation Containerfile"
-cat "$INSTALL_CONTAINERFILE"
-
-greenprint "Login quay.io"
-sudo podman login -u "${QUAY_USERNAME}" -p "${QUAY_PASSWORD}" quay.io
-
-greenprint "Build $TEST_OS installation container image"
-sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" .
-
-greenprint "Push $TEST_OS installation container image"
-sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
-
-greenprint "Prepare inventory file"
-tee -a "$INVENTORY_FILE" > /dev/null << EOF
-[cloud]
-localhost
-
-[guest]
-
-[cloud:vars]
-ansible_connection=local
-
-[guest:vars]
-ansible_user="$SSH_USER"
-ansible_private_key_file="$SSH_KEY"
-ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
-
-[all:vars]
-ansible_python_interpreter=/usr/bin/python3
-EOF
-
-greenprint "Prepare ansible.cfg"
-export ANSIBLE_CONFIG="${PWD}/playbooks/ansible.cfg"
-
-case "$IMAGE_TYPE" in
- "to-disk")
- greenprint "Configure rootfs randomly"
- ROOTFS_LIST=( \
- "ext4" \
- "xfs" \
- )
- RND_LINE=$((RANDOM % 2))
- ROOTFS="${ROOTFS_LIST[$RND_LINE]}"
-
- if [[ "$TEST_OS" == "fedora"* ]]; then
- ROOTFS="btrfs"
- fi
-
- greenprint "💾 Create disk.raw"
- sudo truncate -s 10G disk.raw
-
- greenprint "bootc install to disk.raw"
- sudo podman run \
- --rm \
- --privileged \
- --pid=host \
- --security-opt label=type:unconfined_t \
- -v /dev:/dev \
- -v /var/lib/containers:/var/lib/containers \
- -v /dev:/dev \
- -v .:/output \
- "$TEST_IMAGE_URL" \
- bootc install to-disk --filesystem "$ROOTFS" --generic-image --via-loopback /output/disk.raw
-
- sudo qemu-img convert -f raw ./disk.raw -O qcow2 "/var/lib/libvirt/images/disk.qcow2"
- rm -f disk.raw
-
- if [[ "$ARCH" == "x86_64" ]]; then
- BIB_FIRMWARE_LIST=( \
- "bios" \
- "uefi" \
- )
- RND_LINE=$((RANDOM % 2))
- BIB_FIRMWARE="${BIB_FIRMWARE_LIST[$RND_LINE]}"
- else
- BIB_FIRMWARE="uefi"
- fi
-
- greenprint "Deploy $IMAGE_TYPE instance"
- ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e ssh_key_pub="$SSH_KEY_PUB" \
- -e ssh_user="$SSH_USER" \
- -e inventory_file="$INVENTORY_FILE" \
- -e bib="true" \
- -e boot_args="$BOOT_ARGS" \
- -e bib_firmware="$BIB_FIRMWARE" \
- "playbooks/deploy-libvirt.yaml"
- ;;
- *)
- redprint "Variable IMAGE_TYPE has to be defined"
- exit 1
- ;;
-esac
-
-greenprint "Run ostree checking test on $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e bootc_image="$TEST_IMAGE_URL" \
- -e image_label_version_id="$REDHAT_VERSION_ID" \
- playbooks/check-system.yaml
-
-greenprint "Create upgrade Containerfile"
-tee "$UPGRADE_CONTAINERFILE" > /dev/null << EOF
-FROM "$TEST_IMAGE_URL"
-RUN dnf -y install wget && \
- dnf -y clean all
-EOF
-
-greenprint "Build $TEST_OS upgrade container image"
-sudo podman build --tls-verify=false --retry=5 --retry-delay=10 -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" .
-greenprint "Push $TEST_OS upgrade container image"
-sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
-
-greenprint "Upgrade $TEST_OS system"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- playbooks/upgrade.yaml
-
-greenprint "Run ostree checking test after upgrade on $PLATFORM instance"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e test_os="$TEST_OS" \
- -e bootc_image="$TEST_IMAGE_URL" \
- -e image_label_version_id="$REDHAT_VERSION_ID" \
- -e upgrade="true" \
- playbooks/check-system.yaml
-
-greenprint "Rollback $TEST_OS system"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- playbooks/rollback.yaml
-
-greenprint "Terminate $PLATFORM instance and deregister AMI"
-ansible-playbook -v \
- -i "$INVENTORY_FILE" \
- -e platform="$PLATFORM" \
- playbooks/remove.yaml
-
-greenprint "Clean up"
-rm -rf auth.json rhel-9-y.repo
-unset ANSIBLE_CONFIG
-
-greenprint "🎉 All tests passed."
-exit 0
diff --git a/tests/integration/install-upgrade.fmf b/tests/integration/install-upgrade.fmf
deleted file mode 100644
index 5ccbc12d..00000000
--- a/tests/integration/install-upgrade.fmf
+++ /dev/null
@@ -1,14 +0,0 @@
-/rpm-build:
- summary: bootc rpm build test
- test: podman run --rm --privileged -v ../../:/workdir:z -e TEST_OS=$TEST_OS -e ARCH=$ARCH -e RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL -e DOWNLOAD_NODE=$DOWNLOAD_NODE --workdir /workdir quay.io/fedora/fedora:40 ./tests/integration/mockbuild.sh
- duration: 40m
-
-/bootc-install-upgrade:
- summary: bootc install and upgrade test
- test: ./bootc-install-upgrade.sh
- duration: 90m
-
-/image-install-upgrade:
- summary: bootc install to-disk and upgrade test
- test: ./image-install-upgrade.sh
- duration: 90m
diff --git a/tests/integration/mockbuild.sh b/tests/integration/mockbuild.sh
deleted file mode 100755
index 5446683d..00000000
--- a/tests/integration/mockbuild.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-set -exuo pipefail
-
-ARCH=$(uname -m)
-
-# Colorful output.
-function greenprint {
- echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
-}
-function redprint {
- echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m"
-}
-
-greenprint "📥 Install required packages"
-dnf install -y cargo zstd git libzstd-devel openssl-devel ostree-devel rpm-build mock podman skopeo jq
-cargo install cargo-vendor-filterer
-
-greenprint "⛏ Build archive"
-cargo xtask package-srpm
-
-greenprint "📋 Get target tmp folder path"
-shopt -s extglob
-TARGET_FOLDER=(target/.tmp*)
-
-case "$TEST_OS" in
- "rhel-9"*)
- TEMPLATE="rhel-9.tpl"
- greenprint "📝 update mock rhel-9 template"
- # disable subscription for nightlies
- sed -i "s/config_opts\['redhat_subscription_required'\] = True/config_opts['redhat_subscription_required'] = False/" /etc/mock/templates/"$TEMPLATE"
- # delete default cdn compose and add nightly compose
- sed -i '/user_agent/q' /etc/mock/templates/"$TEMPLATE"
- if [[ "$TEST_OS" == "rhel-9-4" ]]; then
- BATCH_COMPOSE="updates/"
- LATEST_COMPOSE_ID="latest-RHEL-9.4.0"
- else
- BATCH_COMPOSE=""
- LATEST_COMPOSE_ID="latest-RHEL-9.5.0"
- fi
- tee -a /etc/mock/templates/"$TEMPLATE" > /dev/null << EOF
-[BaseOS]
-name=Red Hat Enterprise Linux - BaseOS
-baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/BaseOS/\$basearch/os/
-enabled=1
-gpgcheck=0
-
-[AppStream]
-name=Red Hat Enterprise Linux - AppStream
-baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/AppStream/\$basearch/os/
-enabled=1
-gpgcheck=0
-
-[CRB]
-name=Red Hat Enterprise Linux - CRB
-baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/${BATCH_COMPOSE}RHEL-9/${LATEST_COMPOSE_ID}/compose/CRB/\$basearch/os/
-enabled=1
-gpgcheck=0
-"""
-EOF
- MOCK_CONFIG="rhel-9-${ARCH}"
- ;;
- "centos-stream-9")
- MOCK_CONFIG="centos-stream-9-${ARCH}"
- ;;
- "fedora-40")
- MOCK_CONFIG="fedora-40-${ARCH}"
- ;;
- "fedora-41")
- MOCK_CONFIG="fedora-41-${ARCH}"
- ;;
- *)
- redprint "Variable TEST_OS has to be defined"
- exit 1
- ;;
-esac
-
-greenprint "🧬 Using mock config: ${MOCK_CONFIG}"
-
-greenprint "✏ Adding user to mock group"
-usermod -a -G mock "$(whoami)"
-
-greenprint "🎁 Building SRPM"
-mock -r "$MOCK_CONFIG" --buildsrpm \
- --spec "${TARGET_FOLDER[0]}/bootc.spec" \
- --config-opts=cleanup_on_failure=False \
- --config-opts=cleanup_on_success=True \
- --sources "${TARGET_FOLDER[0]}" \
- --resultdir ./tests/integration/build
-
-greenprint "🎁 Building RPMs"
-mock -r "$MOCK_CONFIG" \
- --config-opts=cleanup_on_failure=False \
- --config-opts=cleanup_on_success=True \
- --resultdir "./tests/integration/build" \
- ./tests/integration/build/*.src.rpm
diff --git a/tests/integration/playbooks/deploy-aws.yaml b/tests/integration/playbooks/deploy-aws.yaml
deleted file mode 100644
index ff1bfbe8..00000000
--- a/tests/integration/playbooks/deploy-aws.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
----
-- hosts: cloud
- gather_facts: false
- become: false
- vars:
- test_os: ""
- arch: "{{ lookup('env', 'ARCH') | default('x86_64', true) }}"
- ssh_key_pub: ""
- inventory_file: ""
- download_node: "{{ lookup('env', 'DOWNLOAD_NODE') | default('', true) }}"
- spot_max_price: "0.1"
- instance_type:
- x86_64:
- "0": t2.medium
- "1": t3.medium
- "2": m6a.large
- aarch64:
- "0": t4g.medium
- "1": c7g.medium
- "2": m6g.medium
-
- tasks:
- - set_fact:
- random_num: "{{ 9999 | random(start=1001) }}"
- - set_fact:
- instance_name: "bootc-aws-{{ test_os }}-{{ random_num }}"
-
- - name: random number for instance type
- set_fact:
- instance_type_index: "{{ 3 | random(start=0) }}"
-
- - name: set random instance type
- set_fact:
- random_instance_type: "{{ lookup('env', 'instance_type') | default(instance_type[arch][instance_type_index], true) }}"
-
- - name: get virtqe subnet
- shell: |
- aws ec2 describe-subnets \
- --output json \
- --filters "Name=tag:Name,Values=virtqe_test_prod_us-west-2_internal-a" | \
- jq -r ".Subnets[0].SubnetId"
- register: ec2_vpc_subnet
-
- - set_fact:
- subnet_id: "{{ ec2_vpc_subnet.stdout }}"
-
- - name: get virtqe security group
- shell: |
- aws ec2 describe-security-groups \
- --filters="Name=tag:Name,Values=bootc-test" \
- --output json | \
- jq -r ".SecurityGroups[0].GroupId"
- register: ec2_security_group
-
- - set_fact:
- group_id: "{{ ec2_security_group.stdout }}"
-
- - name: config ssh keypair used by test
- shell: |
- aws ec2 import-key-pair \
- --key-name "kp-bootc-{{ random_num }}" \
- --public-key-material "fileb://{{ ssh_key_pub }}" \
- --tag-specification 'ResourceType=key-pair,Tags=[{Key=Name,Value=bootc-test}]'
-
- - name: get ami id from aws ssm
- shell: |
- aws ssm get-parameter \
- --name "bootc-{{ test_os }}-{{ arch }}" | jq -r '.Parameter.Value'
- register: result_ami_id
-
- - set_fact:
- ami_id: "{{ result_ami_id.stdout }}"
-
- - name: generate ec2_run_instance script
- template:
- src: ec2_run_instance.j2
- dest: "{{ playbook_dir }}/ec2_run_instance.sh"
- mode: 0755
-
- - name: run ec2 instance with script
- command: "{{ playbook_dir }}/ec2_run_instance.sh"
- register: result_instance
-
- - name: convert run_instance output to json
- set_fact:
- instance_json: "{{ result_instance.stdout | from_json }}"
-
- - name: wait for instance running
- shell: |
- aws ec2 wait instance-running \
- --instance-ids {{ instance_json.Instances[0].InstanceId }}
-
- - name: get instance private ip
- shell: |
- aws ec2 describe-instances \
- --instance-ids {{ instance_json.Instances[0].InstanceId }} \
- --query 'Reservations[*].Instances[*].PrivateIpAddress' \
- --output text
- register: ip_result
-
- - set_fact:
- instance_ip: "{{ ip_result.stdout }}"
-
- - name: waits until instance is reachable
- wait_for:
- host: "{{ instance_ip }}"
- port: 22
- search_regex: OpenSSH
- delay: 10
- retries: 30
- register: result_ssh_check
- until: result_ssh_check is success
-
- - name: add instance ip into host group guest
- add_host:
- name: "{{ instance_ip }}"
- groups: guest
-
- - name: Write instance ip to inventory file
- community.general.ini_file:
- path: "{{ inventory_file }}"
- section: guest
- option: guest ansible_host
- value: "{{ instance_ip }}"
- no_extra_spaces: true
-
- - name: Write random number to inventory file
- community.general.ini_file:
- path: "{{ inventory_file }}"
- section: cloud:vars
- option: random_num
- value: "{{ random_num }}"
- no_extra_spaces: true
-
- - name: write AWS EC2 instance id to inventory file
- community.general.ini_file:
- path: "{{ inventory_file }}"
- section: cloud:vars
- option: instance_id
- value: "{{ instance_json.Instances[0].InstanceId }}"
- no_extra_spaces: true
diff --git a/tests/integration/playbooks/remove.yaml b/tests/integration/playbooks/remove.yaml
deleted file mode 100644
index cedd0f65..00000000
--- a/tests/integration/playbooks/remove.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- hosts: cloud
- gather_facts: false
- become: false
-
- tasks:
- - name: Remove AWS resources
- block:
- - name: terminate instance
- shell: |
- aws ec2 terminate-instances \
- --instance-ids "{{ instance_id }}"
- ignore_errors: true
-
- - name: wait until instance terminated
- shell: |
- aws ec2 wait instance-terminated \
- --instance-ids "{{ instance_id }}"
-
- - name: remove ec2 key
- shell: |
- aws ec2 delete-key-pair \
- --key-name "kp-bootc-{{ random_num }}"
- when: platform == "aws"
-
- - name: Destroy and undefine libvirt vm
- block:
- - name: "Destroy vm"
- command: virsh destroy {{ instance_name }}
- become: true
- ignore_errors: true
- - name: "Undefine vm"
- command: virsh undefine {{ instance_name }}
- become: true
- register: result_undefine
- ignore_errors: true
- - name: "Undefine vm with --nvram"
- command: virsh undefine {{ instance_name }} --nvram
- become: true
- ignore_errors: true
- when: result_undefine is failed
- - name: "Delete disk file"
- command: virsh vol-delete --pool images "{{ rhel_guest_image_fname }}"
- become: true
- ignore_errors: true
- when: platform == "libvirt"
diff --git a/tests/integration/playbooks/templates/ec2_run_instance.j2 b/tests/integration/playbooks/templates/ec2_run_instance.j2
deleted file mode 100644
index ec91f3aa..00000000
--- a/tests/integration/playbooks/templates/ec2_run_instance.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-/usr/local/bin/aws ec2 run-instances \
- --associate-public-ip-address \
- --block-device-mappings DeviceName=/dev/xvda,Ebs=\{DeleteOnTermination=true,VolumeSize=12,VolumeType=gp2,Encrypted=false\} \
-{% if random_instance_type.startswith('t3') or random_instance_type.startswith('t4g') %}
- --credit-specification CpuCredits=standard \
-{% endif %}
-{% if test_os.startswith('rhel') %}
- --user-data file://user-data \
-{% endif %}
- --image-id {{ ami_id }} \
- --instance-market-options MarketType=spot,SpotOptions=\{MaxPrice={{ spot_max_price }},SpotInstanceType=one-time,InstanceInterruptionBehavior=terminate\} \
- --instance-type {{ random_instance_type }} \
- --key-name kp-bootc-{{ random_num }} \
- --security-group-ids {{ group_id }} \
- --subnet-id {{ subnet_id }} \
- --tag-specifications ResourceType=instance,Tags=[\{Key=bootc-test,Value='bootc-test.{{ test_os }}.{{ arch }}.{{ random_num }}'\},\{Key=Name,Value='bootc-test.{{ test_os }}.{{ arch }}.{{ random_num }}'\}] \
-
-return_code=$?
-if [[ $return_code == 0 ]]; then
- exit 0
-fi
-
-# If we had no successful boots, we should exit with a failure.
-exit 1
diff --git a/tests/integration/playbooks/templates/user-data.j2 b/tests/integration/playbooks/templates/user-data.j2
deleted file mode 100644
index 0c51a9f9..00000000
--- a/tests/integration/playbooks/templates/user-data.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-#cloud-config
-users:
- - default
- - name: {{ ssh_user }}
- groups: wheel
- sudo: ALL=(ALL) NOPASSWD:ALL
- lock_passwd: true
- ssh_authorized_keys:
- - {{ lookup('ansible.builtin.file', ssh_key_pub) }}
-{% if bib == 'false' and test_os.startswith('rhel') %}
-yum_repos:
- rhel-9y-baseos:
- name: rhel-9y-baseos
- baseurl: http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/BaseOS/$basearch/os/
- enabled: true
- gpgcheck: false
- rhel-9y-appstream:
- name: rhel-9y-appstream
- baseurl: http://{{ download_node }}/rhel-9/nightly/RHEL-9/latest-RHEL-{{ test_os_dot_version }}.0/compose/AppStream/$basearch/os/
- enabled: true
- gpgcheck: false
-{% endif %}
-power_state:
- delay: now
- mode: poweroff
- message: Cloud Init Finalized - Shutting down machine
- timeout: 30
diff --git a/tests/integration/shared_lib.sh b/tests/integration/shared_lib.sh
deleted file mode 100755
index 0ef30a3f..00000000
--- a/tests/integration/shared_lib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-# Dumps details about the instance running the CI job.
-function dump_runner {
- RUNNER_CPUS=$(nproc)
- RUNNER_MEM=$(free -m | grep -oP '\d+' | head -n 1)
- RUNNER_DISK=$(df --output=size -h / | sed '1d;s/[^0-9]//g')
- RUNNER_HOSTNAME=$(uname -n)
- RUNNER_USER=$(whoami)
- RUNNER_ARCH=$(uname -m)
- RUNNER_KERNEL=$(uname -r)
-
- echo -e "\033[0;36m"
- cat << EOF
-------------------------------------------------------------------------------
-CI MACHINE SPECS
-------------------------------------------------------------------------------
- Hostname: ${RUNNER_HOSTNAME}
- User: ${RUNNER_USER}
- CPUs: ${RUNNER_CPUS}
- RAM: ${RUNNER_MEM} MB
- DISK: ${RUNNER_DISK} GB
- ARCH: ${RUNNER_ARCH}
- KERNEL: ${RUNNER_KERNEL}
-------------------------------------------------------------------------------
-EOF
-}
-
-# Colorful timestamped output.
-function greenprint {
- echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
-}
-
-function redprint {
- echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m"
-}
-
-# Retry container image pull and push
-function retry {
- n=0
- until [ "$n" -ge 3 ]
- do
- "$@" && break
- n=$((n+1))
- sleep 10
- done
-}
diff --git a/xtask/src/xtask.rs b/xtask/src/xtask.rs
index 985724c5..549ece54 100644
--- a/xtask/src/xtask.rs
+++ b/xtask/src/xtask.rs
@@ -22,6 +22,7 @@ const TASKS: &[(&str, fn(&Shell) -> Result<()>)] = &[
("man2markdown", man2markdown),
("package", package),
("package-srpm", package_srpm),
+ ("spec", spec),
("custom-lints", custom_lints),
("test-tmt", test_tmt),
];
@@ -244,6 +245,41 @@ fn package(sh: &Shell) -> Result<()> {
Ok(())
}
+fn update_spec(sh: &Shell) -> Result {
+ let p = Utf8Path::new("target");
+ let pkg = impl_package(sh)?;
+ let srcpath = pkg.srcpath.file_name().unwrap();
+ let v = pkg.version;
+ let src_vendorpath = pkg.vendorpath.file_name().unwrap();
+ {
+ let specin = File::open(format!("contrib/packaging/{NAME}.spec"))
+ .map(BufReader::new)
+ .context("Opening spec")?;
+ let mut o = File::create(p.join(format!("{NAME}.spec"))).map(BufWriter::new)?;
+ for line in specin.lines() {
+ let line = line?;
+ if line.starts_with("Version:") {
+ writeln!(o, "# Replaced by cargo xtask spec")?;
+ writeln!(o, "Version: {v}")?;
+ } else if line.starts_with("Source0") {
+ writeln!(o, "Source0: {srcpath}")?;
+ } else if line.starts_with("Source1") {
+ writeln!(o, "Source1: {src_vendorpath}")?;
+ } else {
+ writeln!(o, "{}", line)?;
+ }
+ }
+ }
+ let spec_path = p.join(format!("{NAME}.spec"));
+ Ok(spec_path)
+}
+
+fn spec(sh: &Shell) -> Result<()> {
+ let s = update_spec(sh)?;
+ println!("Generated: {s}");
+ Ok(())
+}
+
fn impl_srpm(sh: &Shell) -> Result {
{
let _g = sh.push_dir("target");