diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 48f559d85ee..52e938eefe3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -25,6 +25,7 @@ /SPECS-SIGNED/kernel-hci-signed/* @microsoft/cbl-mariner-kernel /SPECS-SIGNED/kernel-azure-signed/* @microsoft/cbl-mariner-kernel /SPECS-SIGNED/kernel-mstflint-signed/* @microsoft/cbl-mariner-kernel +/SPECS-SIGNED/kernel-mshv-signed/* @microsoft/cbl-mariner-kata-containers /SPECS/grub2/* @microsoft/cbl-mariner-bootloader /SPECS/grubby/* @microsoft/cbl-mariner-bootloader @@ -68,6 +69,7 @@ /SPECS/cloud-hypervisor/* @microsoft/cbl-mariner-virtualization /SPECS/hvloader/* @microsoft/cbl-mariner-kata-containers +/SPECS-SIGNED/hvloader-signed/* @microsoft/cbl-mariner-kata-containers /SPECS/cloud-init/* @microsoft/cbl-mariner-provisioning /SPECS/walinuxagent/* @microsoft/cbl-mariner-provisioning diff --git a/.pipelines/containerSourceData/Dockerfile-Initial b/.pipelines/containerSourceData/Dockerfile-Initial index cc6adfa9780..3dc9b7e3b9a 100644 --- a/.pipelines/containerSourceData/Dockerfile-Initial +++ b/.pipelines/containerSourceData/Dockerfile-Initial @@ -7,7 +7,7 @@ RUN --mount=type=bind,source=./Stage/,target=/dockerStage/ \\\ tdnf install -y createrepo; \\\ cp -r ${RPMS_PATH} ${LOCAL_REPO_PATH}; \\\ cat /dockerStage/marinerLocalRepo.repo >> /etc/yum.repos.d/local.repo; \\\ - createrepo --database ${LOCAL_REPO_PATH} --workers 10; tdnf makecache \&\& tdnf makecache; \\\ + createrepo --database ${LOCAL_REPO_PATH} --workers 10; tdnf makecache; \\\ tdnf autoremove -y createrepo; \\\ for rpm in "${RPMS_TO_INSTALL[@]}"; do \\\ echo "RPM: $rpm"; \\\ diff --git a/.pipelines/containerSourceData/azurecli/Dockerfile-AzureCLI b/.pipelines/containerSourceData/azurecli/Dockerfile-AzureCLI deleted file mode 100644 index 48754ed2a5c..00000000000 --- a/.pipelines/containerSourceData/azurecli/Dockerfile-AzureCLI +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -ARG BASE_IMAGE - -FROM $BASE_IMAGE - -@INCLUDE_MAIN_RUN_INSTRUCTION@ - -# basic smoke test -RUN az version - -# set default command for the container -CMD ["bash"] diff --git a/.pipelines/containerSourceData/azurecli/azurecli.pkg b/.pipelines/containerSourceData/azurecli/azurecli.pkg deleted file mode 100644 index 1a90f07a0ca..00000000000 --- a/.pipelines/containerSourceData/azurecli/azurecli.pkg +++ /dev/null @@ -1 +0,0 @@ -azure-cli diff --git a/.pipelines/containerSourceData/base/Dockerfile-Base-Nonroot-Template b/.pipelines/containerSourceData/base/Dockerfile-Base-Nonroot-Template index 1a1987af456..7b46c295cad 100644 --- a/.pipelines/containerSourceData/base/Dockerfile-Base-Nonroot-Template +++ b/.pipelines/containerSourceData/base/Dockerfile-Base-Nonroot-Template @@ -5,14 +5,14 @@ ARG BASE_IMAGE FROM $BASE_IMAGE AS BASE -ARG MARINER_VERSION=2.0 +ARG AZL_VERSION=2.0 ARG USERNAME=nonroot ARG USER_UID=65532 ARG USER_GID=$USER_UID ARG SET_USER=$USERNAME RUN mkdir -p /staging/etc \ - && tdnf install -y --releasever=$MARINER_VERSION shadow-utils \ + && tdnf install -y --releasever=$AZL_VERSION shadow-utils \ && groupadd --gid $USER_GID $USERNAME \ && useradd --gid $USER_GID -g $USERNAME $USERNAME -u $USER_UID \ && tdnf clean all \ diff --git a/.pipelines/containerSourceData/busybox/Dockerfile-Busybox-Template b/.pipelines/containerSourceData/base/Dockerfile-Busybox-Template similarity index 56% rename from .pipelines/containerSourceData/busybox/Dockerfile-Busybox-Template rename to .pipelines/containerSourceData/base/Dockerfile-Busybox-Template index 91a14429418..3103372b2e9 100644 --- a/.pipelines/containerSourceData/busybox/Dockerfile-Busybox-Template +++ b/.pipelines/containerSourceData/base/Dockerfile-Busybox-Template @@ -5,12 +5,32 @@ ARG BASE_IMAGE FROM $BASE_IMAGE AS BASE -ARG MARINER_VERSION=2.0 +ARG AZL_VERSION=2.0 + +ARG RPMS +ARG LOCAL_REPO_FILE="local.repo" +ARG LOCAL_REPO_PATH="/localrepo" + +COPY ${RPMS} /WORKDIR/RPMS +COPY ${LOCAL_REPO_FILE} /WORKDIR/REPO/local.repo + +# Create local repo if RPMS are provided +# This will allow the user to install packages from the local repo +# instead of fetching from PMC +RUN if [ "${RPMS}" ]; then \ + mkdir -p $LOCAL_REPO_PATH; \ + tdnf install -y --releasever=$AZL_VERSION createrepo; \ + cp -r /WORKDIR/RPMS ${LOCAL_REPO_PATH}; \ + cp /WORKDIR/REPO/local.repo /etc/yum.repos.d/local.repo; \ + createrepo --database ${LOCAL_REPO_PATH} --workers 10; \ + tdnf makecache; \ + tdnf autoremove -y createrepo; \ +fi # Install busybox, glibc, and their dependencies into a staging location. # Staging directory is copied into the final scratch image. RUN mkdir /staging \ - && tdnf install -y --releasever=$MARINER_VERSION --installroot /staging \ + && tdnf install -y --releasever=$AZL_VERSION --installroot /staging \ busybox glibc \ && tdnf clean all \ && pushd /staging \ @@ -37,5 +57,5 @@ FROM scratch # Copy dependencies into the scratch image. COPY --from=BASE /staging/ . - +COPY --from=BASE EULA-Container.txt / CMD [ "sh" ] diff --git a/.pipelines/containerSourceData/distroless/Dockerfile-Distroless-Nonroot-Template b/.pipelines/containerSourceData/base/Dockerfile-Distroless-Nonroot-Template similarity index 87% rename from .pipelines/containerSourceData/distroless/Dockerfile-Distroless-Nonroot-Template rename to .pipelines/containerSourceData/base/Dockerfile-Distroless-Nonroot-Template index 194ef5fd060..a4ddfdb1490 100644 --- a/.pipelines/containerSourceData/distroless/Dockerfile-Distroless-Nonroot-Template +++ b/.pipelines/containerSourceData/base/Dockerfile-Distroless-Nonroot-Template @@ -6,14 +6,14 @@ ARG FINAL_IMAGE FROM $BASE_IMAGE AS BASE -ARG MARINER_VERSION=2.0 +ARG AZL_VERSION=2.0 ARG USERNAME=nonroot ARG USER_UID=65532 ARG USER_GID=$USER_UID ARG SET_USER=$USERNAME RUN mkdir -p /staging/etc \ - && tdnf install -y --releasever=$MARINER_VERSION shadow-utils \ + && tdnf install -y --releasever=$AZL_VERSION shadow-utils \ && groupadd --gid $USER_GID $USERNAME \ && useradd --gid $USER_GID -g $USERNAME $USERNAME -u $USER_UID \ && tdnf clean all \ diff --git a/.pipelines/containerSourceData/distroless/Dockerfile-Distroless-Template b/.pipelines/containerSourceData/base/Dockerfile-Distroless-Template similarity index 100% rename from .pipelines/containerSourceData/distroless/Dockerfile-Distroless-Template rename to .pipelines/containerSourceData/base/Dockerfile-Distroless-Template diff --git a/.pipelines/containerSourceData/nodejs/distroless/holdback-nodejs18.pkg b/.pipelines/containerSourceData/nodejs/distroless/holdback-nodejs18.pkg new file mode 100644 index 00000000000..7c44c2d724f --- /dev/null +++ b/.pipelines/containerSourceData/nodejs/distroless/holdback-nodejs18.pkg @@ -0,0 +1,8 @@ +bash +bzi +coreutils +gmp +grep +libselinux +pcre +pcre-libs diff --git a/.pipelines/containerSourceData/nodejs/distroless/nodejs18.pkg b/.pipelines/containerSourceData/nodejs/distroless/nodejs18.pkg new file mode 100644 index 00000000000..4ef3b28e241 --- /dev/null +++ b/.pipelines/containerSourceData/nodejs/distroless/nodejs18.pkg @@ -0,0 +1,2 @@ +distroless-packages-base +nodejs18 diff --git a/.pipelines/containerSourceData/prometheus/distroless/holdback-prometheus.pkg b/.pipelines/containerSourceData/prometheus/distroless/holdback-prometheus.pkg new file mode 100644 index 00000000000..7c44c2d724f --- /dev/null +++ b/.pipelines/containerSourceData/prometheus/distroless/holdback-prometheus.pkg @@ -0,0 +1,8 @@ +bash +bzi +coreutils +gmp +grep +libselinux +pcre +pcre-libs diff --git a/.pipelines/containerSourceData/prometheus/distroless/prometheus.pkg b/.pipelines/containerSourceData/prometheus/distroless/prometheus.pkg new file mode 100644 index 00000000000..f293a3ce61b --- /dev/null +++ b/.pipelines/containerSourceData/prometheus/distroless/prometheus.pkg @@ -0,0 +1,2 @@ +distroless-packages-base +prometheus diff --git a/.pipelines/containerSourceData/prometheusadapter/distroless/holdback-prometheusadapter.pkg b/.pipelines/containerSourceData/prometheusadapter/distroless/holdback-prometheusadapter.pkg new file mode 100644 index 00000000000..7c44c2d724f --- /dev/null +++ b/.pipelines/containerSourceData/prometheusadapter/distroless/holdback-prometheusadapter.pkg @@ -0,0 +1,8 @@ +bash +bzi +coreutils +gmp +grep +libselinux +pcre +pcre-libs diff --git a/.pipelines/containerSourceData/prometheusadapter/distroless/prometheusadapter.pkg b/.pipelines/containerSourceData/prometheusadapter/distroless/prometheusadapter.pkg new file mode 100644 index 00000000000..e3becaca0cc --- /dev/null +++ b/.pipelines/containerSourceData/prometheusadapter/distroless/prometheusadapter.pkg @@ -0,0 +1,2 @@ +distroless-packages-base +prometheus-adapter diff --git a/.pipelines/containerSourceData/python/distroless/holdback-python.pkg b/.pipelines/containerSourceData/python/distroless/holdback-python.pkg new file mode 100644 index 00000000000..7c44c2d724f --- /dev/null +++ b/.pipelines/containerSourceData/python/distroless/holdback-python.pkg @@ -0,0 +1,8 @@ +bash +bzi +coreutils +gmp +grep +libselinux +pcre +pcre-libs diff --git a/.pipelines/containerSourceData/python/distroless/python.pkg b/.pipelines/containerSourceData/python/distroless/python.pkg new file mode 100644 index 00000000000..c1c777bedcc --- /dev/null +++ b/.pipelines/containerSourceData/python/distroless/python.pkg @@ -0,0 +1,2 @@ +distroless-packages-base +python3 diff --git a/.pipelines/containerSourceData/scripts/BuildBaseContainers.sh b/.pipelines/containerSourceData/scripts/BuildBaseContainers.sh index 13cc2a3f234..03139b26078 100755 --- a/.pipelines/containerSourceData/scripts/BuildBaseContainers.sh +++ b/.pipelines/containerSourceData/scripts/BuildBaseContainers.sh @@ -4,31 +4,60 @@ set -e -if [[ $(uname -p) == "x86_64" ]]; then - ARCHITECTURE="amd64" -else - ARCHITECTURE="arm64" -fi - -PUBLISHING_LEVEL="development" -BRANCH_NAME="main" - -# parse script parameters: -# -m -> folder containing artifacts of CBL-Mariner -# -n -> name of the container registry -# -o -> folder where to put artifacts to be published -# -b -> branch name -# -p -> publishing level -# -x -> container source dir from cbl-mariner -while getopts ":m:n:o:b:p:x:" OPTIONS; do +# This script is used to build base containers and publish them to ACR. +# The script takes the following inputs: +# - ACR: Azure Container Registry name. +# - CONTAINER_TARBALLS_DIR: Directory containing the tarballs for the base and distroless containers. +# - RPMS_TARBALL: Tarball containing the RPMs to be used in the containers. +# - CONTAINER_SRC_DIR: Directory containing the source files for the containers. +# - OUTPUT_DIR: Directory to save the published container names. +# - PUBLISHING_LEVEL: The publishing level for the containers. It can be "preview" or "development". +# - REPO_PREFIX: Prefix for the repository in ACR. +# - PUBLISH_TO_ACR: Flag to publish the containers to ACR. It can be "true" or "false". + +# Assuming you are in your current working directory. Below should be the directory structure: +# │ rpms.tar.gz +# │ out +# | ├── +# │ containerSourceData +# │ ├── base +# │ │ ├── Dockerfile-Base-Template +# │ │ ├── Dockerfile-Base-Nonroot-Template +# │ | ├── Dockerfile-Busybox-Template +# │ │ ├── Dockerfile-Distroless-Template +# │ │ ├── Dockerfile-Distroless-Nonroot-Template +# │ container_tarballs +# │ ├── container_base +# │ │ ├── core-2.0.20230607.tar.gz +# │ ├── distroless_base +# │ │ ├── distroless-base-2.0.20230607.tar.gz +# │ ├── distroless_debug +# │ │ ├── distroless-debug-2.0.20230607.tar.gz +# │ ├── distroless_minimal +# │ │ ├── distroless-minimal-2.0.20230607.tar.gz +# │ marinaraLocalRepo.repo + +# Example usage: +# ./BuildBaseContainers.sh \ +# -a azuerlinuxpreview \ +# -c ./container_tarballs \ +# -k ./rpms.tar.gz \ +# -l ./containerSourceData \ +# -o ./out \ +# -p development \ +# -r "" \ +# -q "false" + +while getopts ":a:c:k:l:o:p:r:q:" OPTIONS; do case ${OPTIONS} in - m ) MARINER_ARTIFACTS_FOLDER=$OPTARG;; - n ) CONTAINER_REGISTRY_NAME=$OPTARG - CONTAINER_REGISTRY_NAME_FULL="$CONTAINER_REGISTRY_NAME.azurecr.io";; - o ) OUTPUT_FOLDER=$OPTARG;; - b ) BRANCH_NAME=$OPTARG;; + a ) ACR=$OPTARG;; + c ) CONTAINER_TARBALLS_DIR=$OPTARG;; + k ) RPMS_TARBALL=$OPTARG;; + l ) CONTAINER_SRC_DIR=$OPTARG;; + o ) OUTPUT_DIR=$OPTARG;; p ) PUBLISHING_LEVEL=$OPTARG;; - x ) CONTAINER_SRC_DIR=$OPTARG;; + r ) REPO_PREFIX=$OPTARG;; + q ) PUBLISH_TO_ACR=$OPTARG;; \? ) echo "Error - Invalid Option: -$OPTARG" 1>&2 @@ -41,388 +70,265 @@ while getopts ":m:n:o:b:p:x:" OPTIONS; do esac done -echo "- MARINER_ARTIFACTS_FOLDER -> $MARINER_ARTIFACTS_FOLDER" -echo "- CONTAINER_REGISTRY_NAME -> $CONTAINER_REGISTRY_NAME" -echo "- CONTAINER_REGISTRY_NAME_FULL -> $CONTAINER_REGISTRY_NAME_FULL" -echo "- ARCHITECTURE -> $ARCHITECTURE" -echo "- BRANCH_NAME -> $BRANCH_NAME" -echo "- PUBLISHING_LEVEL -> $PUBLISHING_LEVEL" -echo "- OUTPUT_FOLDER -> $OUTPUT_FOLDER" - -ROOT_FOLDER="$(git rev-parse --show-toplevel)" - -BASE_IMAGE_TARBALL=$(find "$MARINER_ARTIFACTS_FOLDER" -name "core-[0-9.]*.tar.gz") -if [[ ! -f $BASE_IMAGE_TARBALL ]]; then - echo "Error - No base image tarball in $MARINER_ARTIFACTS_FOLDER" - exit 1 -fi - -DISTROLESS_IMAGE_TARBALL=$(find "$MARINER_ARTIFACTS_FOLDER" -name "distroless-base-[0-9.]*.tar.gz") -DISTROLESS_DEBUG_IMAGE_TARBALL=$(find "$MARINER_ARTIFACTS_FOLDER" -name "distroless-debug-[0-9.]*.tar.gz") -DISTROLESS_MINIMAL_IMAGE_TARBALL=$(find "$MARINER_ARTIFACTS_FOLDER" -name "distroless-minimal-[0-9.]*.tar.gz") -if [[ (! -f $DISTROLESS_IMAGE_TARBALL) || \ - (! -f $DISTROLESS_DEBUG_IMAGE_TARBALL) || \ - (! -f $DISTROLESS_MINIMAL_IMAGE_TARBALL) ]]; then - echo "Error - Missing some distroless image tarball(s) in $MARINER_ARTIFACTS_FOLDER" - exit 1 -fi - - -echo "+++ create temp folder" -TEMPDIR=$(mktemp -d) - +echo "+++ Create temp folder" +WORK_DIR=$(mktemp -d) function cleanup { - echo "+++ remove $TEMPDIR" - rm -rf "$TEMPDIR" + echo "+++ Remove temp folder: $WORK_DIR" + sudo rm -rf "$WORK_DIR" } trap cleanup EXIT -readonly BASE="base" -readonly DISTROLESS="distroless" -readonly BUSYBOX="busybox" - -# Use this global variable to store the most recently built base image. -LAST_BASE_IMAGE="" - -# Use this global variable to store the most recently built distroless image. -LAST_DISTROLESS_IMAGE="" - -# Use this global variable to store full container tag from base container image. -# This variable is set in the create_base_image function. -FULL_CONTAINER_TAG="" - -# these variables are used to create text files listing golden image names. -readonly file_name_prefix='PublishedContainers' -readonly file_ext='.txt' - -function get_container_info { - local container_file - local file_name - local prefix - local registryPrefix # (e.g.: public/cbl-mariner for container that go to MCR) - local temp_name - local repo_name - local __name - local __tag - - # $1: container tarball file name - # $2: name [out param] - # $3: tag [out param] - # $4: acr repo - # $5: prefix [optional, must be the last param] - container_file=$1 - __name=$2 - __tag=$3 - repo_name=$4 - if [[ -n $5 ]]; then - prefix=$5 - fi - - # remove path and extension - file_name=$(basename "$container_file") - file_name=${file_name%.tar.gz} - # Mariner 2.0 preview hack (remove "-Preview-" and following char(s) from name) - file_name=$(echo $file_name | sed "s/-Preview-.*//") - - # get container name and tag - oldifs=$IFS - IFS='#' - read -ra name_parts <<< "$(echo "$file_name" | sed -r 's/-([^-]*)$/#\1/')" - IFS=$oldifs - temp_name=${name_parts[0]} - temp_name=${temp_name//-/\/} - - # build full container name (all base containers are under 'base' in config file) - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME registryPrefix - if [[ -n $registryPrefix ]]; then - repo_name=$repo_name/$registryPrefix - fi - if [[ -n $prefix ]]; then - eval "$__name"="$repo_name/$prefix/$temp_name" - else - eval "$__name"="$repo_name/$temp_name" - fi - eval "$__tag"="${name_parts[1]}-$ARCHITECTURE" +function print_inputs { + echo "ACR -> $ACR" + echo "CONTAINER_TARBALLS_DIR -> $CONTAINER_TARBALLS_DIR" + echo "RPMS_TARBALL -> $RPMS_TARBALL" + echo "CONTAINER_SRC_DIR -> $CONTAINER_SRC_DIR" + echo "REPO_PREFIX -> $REPO_PREFIX" + echo "PUBLISHING_LEVEL -> $PUBLISHING_LEVEL" + echo "PUBLISH_TO_ACR -> $PUBLISH_TO_ACR" + echo "OUTPUT_DIR -> $OUTPUT_DIR" } -function create_base_image { - local container_name_prefix=$1 - local container_type=$2 - local container_tarball=$3 - local dockerfile=$4 - local container_name - local container_tag - - get_container_info "$container_tarball" container_name container_tag "$CONTAINER_REGISTRY_NAME_FULL" "$container_name_prefix" - - local full_container_name - full_container_name="$container_name:$container_tag" - - # FULL_CONTAINER_TAG is used to tag the marinara builder image. - FULL_CONTAINER_TAG="$container_tag" - - if [[ $container_type == "$BASE" ]]; then - LAST_BASE_IMAGE=$full_container_name - elif [[ $container_type == "$DISTROLESS" ]]; then - LAST_DISTROLESS_IMAGE=$full_container_name +function validate_inputs { + if [[ -z "$ACR" ]]; then + echo "Error - ACR name cannot be empty." + exit 1 fi - echo - echo "container_name_prefix: -> $container_name_prefix" - echo "container_type: -> $container_type" - echo "container_tarball: -> $container_tarball" - echo "LAST_BASE_IMAGE: -> $LAST_BASE_IMAGE" - echo "LAST_DISTROLESS_IMAGE: -> $LAST_DISTROLESS_IMAGE" - echo "full_container_name: -> $full_container_name" - echo "dockerfile -> $dockerfile" - echo - - echo "----------------------------------------------------------------------" - echo "+++ create container $full_container_name" - echo " from $(basename "$container_tarball")" - echo - - cat "$container_tarball" | docker import - "$full_container_name" - - echo "$full_container_name" >> "$TEMPDIR/$file_name_prefix-$container_type$file_ext" - echo "----------------------------------------------------------------------" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - mkdir -p "$containerBuildDir" + if [[ -z "$CONTAINER_TARBALLS_DIR" ]]; then + echo "Error - Container tarballs directory cannot be empty." + exit 1 + fi - cp "$ROOT_FOLDER/pipelines/publish-containers/common/data/EULA-Container.txt" "$containerBuildDir"/ - cp "$CONTAINER_SRC_DIR/$container_type/$dockerfile" "$containerBuildDir/Dockerfile" + BASE_TARBALL=$(find "$CONTAINER_TARBALLS_DIR" -name "core-[0-9.]*.tar.gz") + DISTROLESS_BASE_TARBALL=$(find "$CONTAINER_TARBALLS_DIR" -name "distroless-base-[0-9.]*.tar.gz") + DISTROLESS_DEBUG_TARBALL=$(find "$CONTAINER_TARBALLS_DIR" -name "distroless-debug-[0-9.]*.tar.gz") + DISTROLESS_MINIMAL_TARBALL=$(find "$CONTAINER_TARBALLS_DIR" -name "distroless-minimal-[0-9.]*.tar.gz") + if [[ (! -f $BASE_TARBALL) || \ + (! -f $DISTROLESS_BASE_TARBALL) || \ + (! -f $DISTROLESS_DEBUG_TARBALL) || \ + (! -f $DISTROLESS_MINIMAL_TARBALL) ]]; then + echo "Error - Missing some tarball(s) in $CONTAINER_TARBALLS_DIR" + exit 1 + fi - pushd "$containerBuildDir" > /dev/null + if [[ ! -f $RPMS_TARBALL ]]; then + echo "Error - No RPMs tarball found." + # exit 1 + fi - # Build image - docker build . \ - --build-arg EULA="EULA-Container.txt" \ - --build-arg BASE_IMAGE="$full_container_name" \ - -t "$full_container_name" \ - --no-cache \ - --progress=plain + if [ ! -d "$CONTAINER_SRC_DIR" ]; then + echo "Error - Container source directory does not exist." + exit 1 + fi - popd > /dev/null + if [[ -z "$PUBLISHING_LEVEL" ]]; then + echo "Error - Publishing level cannot be empty." + exit 1 + fi - # Clean up temp folder - sudo rm -rf "$containerBuildDir" + if [ ! -d "$OUTPUT_DIR" ]; then + echo "Create output directory: $OUTPUT_DIR" + mkdir -p "$OUTPUT_DIR" + fi } -function create_base_nonroot_image { - local mariner_version - local mariner_build_arch - - local base_container_full_name="$LAST_BASE_IMAGE" - local base_container_name=${base_container_full_name%:*} - local base_container_tag=${base_container_full_name#*:} - mariner_version=$(awk -F '.' '{print $1"."$2}' <<< "$base_container_tag") # 2.0.20220426-amd64 -> 2.0 - - mariner_build_arch=$(awk -F '.' '{print $3}' <<< "$base_container_tag") # 2.0.20220426-amd64 -> 20220426-amd64 - local full_new_tag=$mariner_version-nonroot.$mariner_build_arch # 2.0-nonroot.20220426-amd64 - local full_container_name="$base_container_name:$full_new_tag" - local dockerfile="Dockerfile-Base-Nonroot-Template" - - echo - echo "base_container_full_name: -> $base_container_full_name" - echo "base_container_name: -> $base_container_name" - echo "base_container_tag: -> $base_container_tag" - echo "mariner_version: -> $mariner_version" - echo "full_container_name: -> $full_container_name" - echo "dockerfile -> $dockerfile" - echo +function initialization { + echo "+++ Initialization" - echo "----------------------------------------------------------------------" - echo "+++ create container $full_container_name" - echo " from $base_container_full_name" - echo + echo "+++ Extracting RPMs into $WORK_DIR" + tar -xf "$RPMS_TARBALL" -C "$WORK_DIR" + RPMS_DIR="RPMS" - echo "$full_container_name" >> "$TEMPDIR/$file_name_prefix-$BASE$file_ext" - echo "----------------------------------------------------------------------" + echo "+++ Copy local repo files to $WORK_DIR" + LOCAL_REPO_FILE="local.repo" + cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$WORK_DIR/$LOCAL_REPO_FILE" - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - mkdir -p "$containerBuildDir" - - cp "$CONTAINER_SRC_DIR/base/$dockerfile" "$containerBuildDir/Dockerfile" - - pushd "$containerBuildDir" > /dev/null + if [ "$PUBLISHING_LEVEL" = "preview" ]; then + ACR_NAME_FULL=${ACR}.azurecr.io/${REPO_PREFIX} + elif [ "$PUBLISHING_LEVEL" = "development" ]; then + ACR_NAME_FULL=${ACR}.azurecr.io + fi - # Build image - docker build . \ - --build-arg BASE_IMAGE="$base_container_full_name" \ - --build-arg MARINER_VERSION="$mariner_version" \ - -t "$full_container_name" \ - --no-cache \ - --progress=plain + echo "ACR Name Full -> $ACR_NAME_FULL" - popd > /dev/null + if [[ $(uname -p) == "x86_64" ]]; then + ARCHITECTURE="amd64" + else + ARCHITECTURE="arm64" + fi - # Clean up temp folder - sudo rm -rf "$containerBuildDir" + echo "ARCHITECTURE -> $ARCHITECTURE" + + EULA_FILE_NAME="EULA-Container.txt" + + # Image types + BASE="base" + DISTROLESS="distroless" + BUSYBOX="busybox" + MARINARA="marinara" + + base_tarball_file_name=$(basename "$BASE_TARBALL") # core-2.0.20230607.tar.gz + base_tag_tar_gz=${base_tarball_file_name##*-} # 2.0.20230607.tar.gz + BASE_IMAGE_TAG=${base_tag_tar_gz%.tar.gz} # 2.0.20230607 + echo "BASE_IMAGE_TAG -> $BASE_IMAGE_TAG" + AZL_VERSION=${BASE_IMAGE_TAG%.*} # 2.0 + echo "AZL_VERSION -> $AZL_VERSION" + BUILD_ID=${BASE_IMAGE_TAG##*.} # 20230607 + echo "BUILD_ID -> $BUILD_ID" + + IMAGE_TAG=$BASE_IMAGE_TAG-$ARCHITECTURE + NONROOT_IMAGE_TAG=$AZL_VERSION-nonroot.$BUILD_ID-$ARCHITECTURE + + # Set various image names. + BASE_IMAGE_NAME="$ACR_NAME_FULL/base/core:$IMAGE_TAG" + BASE_NONROOT_IMAGE_NAME="$ACR_NAME_FULL/base/core:$NONROOT_IMAGE_TAG" + DISTROLESS_BASE_IMAGE_NAME="$ACR_NAME_FULL/distroless/base:$IMAGE_TAG" + DISTROLESS_BASE_NONROOT_IMAGE_NAME="$ACR_NAME_FULL/distroless/base:$NONROOT_IMAGE_TAG" + DISTROLESS_MINIMAL_IMAGE_NAME="$ACR_NAME_FULL/distroless/minimal:$IMAGE_TAG" + DISTROLESS_MINIMAL_NONROOT_IMAGE_NAME="$ACR_NAME_FULL/distroless/minimal:$NONROOT_IMAGE_TAG" + DISTROLESS_DEBUG_NONROOT_IMAGE_NAME="$ACR_NAME_FULL/distroless/debug:$NONROOT_IMAGE_TAG" + DISTROLESS_DEBUG_IMAGE_NAME="$ACR_NAME_FULL/distroless/debug:$IMAGE_TAG" + + BUSYBOX_IMAGE_NAME="$ACR_NAME_FULL/busybox:$IMAGE_TAG" + MARINARA_IMAGE_NAME="$ACR_NAME_FULL/marinara:$IMAGE_TAG" + + echo "BASE_IMAGE_NAME -> $BASE_IMAGE_NAME" + echo "BASE_NONROOT_IMAGE_NAME -> $BASE_NONROOT_IMAGE_NAME" + echo "DISTROLESS_BASE_IMAGE_NAME -> $DISTROLESS_BASE_IMAGE_NAME" + echo "DISTROLESS_BASE_NONROOT_IMAGE_NAME -> $DISTROLESS_BASE_NONROOT_IMAGE_NAME" + echo "DISTROLESS_MINIMAL_IMAGE_NAME -> $DISTROLESS_MINIMAL_IMAGE_NAME" + echo "DISTROLESS_MINIMAL_NONROOT_IMAGE_NAME -> $DISTROLESS_MINIMAL_NONROOT_IMAGE_NAME" + echo "DISTROLESS_DEBUG_IMAGE_NAME -> $DISTROLESS_DEBUG_IMAGE_NAME" + echo "DISTROLESS_DEBUG_NONROOT_IMAGE_NAME -> $DISTROLESS_DEBUG_NONROOT_IMAGE_NAME" + echo "BUSYBOX_IMAGE_NAME -> $BUSYBOX_IMAGE_NAME" + echo "MARINARA_IMAGE_NAME -> $MARINARA_IMAGE_NAME" } -function create_busybox_image { - local mariner_version - local registryPrefix # (e.g.: public/cbl-mariner for container that go to MCR) +function docker_build { + local image_type=$1 + local image_full_name=$2 + local image_tarball=$3 + local dockerfile=$4 - mariner_version=$(awk -F '.' '{print $1"."$2}' <<< "$FULL_CONTAINER_TAG") # 2.0.20220426-amd64 -> 2.0 + echo "+++ Importing container image: $image_full_name" + local temp_image=${image_full_name}_temp + docker import - "$temp_image" < "$image_tarball" - # Get registry prefix for busybox container. Use the same registry destination as the base container. - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME registryPrefix + local build_dir="$WORK_DIR/container_build_dir" + mkdir -p "$build_dir" - if [[ -n $registryPrefix ]]; then - full_busybox_container_name="$CONTAINER_REGISTRY_NAME_FULL/$registryPrefix/$BUSYBOX:$FULL_CONTAINER_TAG" - else - full_busybox_container_name="$CONTAINER_REGISTRY_NAME_FULL/$BUSYBOX:$FULL_CONTAINER_TAG" + ROOT_FOLDER="$(git rev-parse --show-toplevel)" + EULA_FILE_PATH="$ROOT_FOLDER/.pipelines/container_artifacts/data" + if [ -d "$EULA_FILE_PATH" ]; then + cp "$EULA_FILE_PATH/$EULA_FILE_NAME" "$build_dir"/ fi - echo "----------------------------------------------------------------------" - echo "+++ create container $full_busybox_container_name" - echo - echo "$full_busybox_container_name" >> "$TEMPDIR/$file_name_prefix-$BUSYBOX$file_ext" - echo "----------------------------------------------------------------------" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - mkdir -p "$containerBuildDir" - - cp "$CONTAINER_SRC_DIR/busybox/Dockerfile-Busybox-Template" "$containerBuildDir/Dockerfile" + cp "$CONTAINER_SRC_DIR/base/$dockerfile" "$build_dir/dockerfile" - pushd "$containerBuildDir" > /dev/null + pushd "$build_dir" > /dev/null + echo "+++ Build image: $image_full_name" docker build . \ - --build-arg BASE_IMAGE="$LAST_BASE_IMAGE" \ - --build-arg MARINER_VERSION="$mariner_version" \ - -t "$full_busybox_container_name" \ - --no-cache \ - --progress=plain + --build-arg EULA="$EULA_FILE_NAME" \ + --build-arg BASE_IMAGE="$temp_image" \ + -t "$image_full_name" \ + --no-cache + docker rmi "$temp_image" popd > /dev/null + sudo rm -rf "$build_dir" - # Clean up temp folder - sudo rm -rf "$containerBuildDir" + publish_to_acr "$image_full_name" + save_container_image "$image_type" "$image_full_name" } -function create_marinara_image { - local mariner_version - local registryPrefix # (e.g.: public/cbl-mariner for container that go to MCR) - - mariner_version=$(awk -F '.' '{print $1"."$2}' <<< "$FULL_CONTAINER_TAG") # 2.0.20220426-amd64 -> 2.0 - marinara="marinara" - - # get registry prefix for marinara container (note that marinara is under 'base' in config file) - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME registryPrefix - if [[ -n $registryPrefix ]]; then - full_marinara_container_name="$CONTAINER_REGISTRY_NAME_FULL/$registryPrefix/$marinara:$FULL_CONTAINER_TAG" - else - full_marinara_container_name="$CONTAINER_REGISTRY_NAME_FULL/$marinara:$FULL_CONTAINER_TAG" - fi - - marinaraSrcDir="$TEMPDIR/$marinara-src" - git clone "https://github.com/microsoft/$marinara.git" "$marinaraSrcDir" - pushd "$marinaraSrcDir" - - echo "----------------------------------------------------------------------" - echo "+++ create container $full_marinara_container_name" - echo - echo "$full_marinara_container_name" >> "$TEMPDIR/$file_name_prefix-$marinara$file_ext" - echo "----------------------------------------------------------------------" +function docker_build_custom { + local image_type=$1 + local image_full_name=$2 + local final_image_to_use=$3 + local dockerfile=$4 - # Update dockerfile-marinara to use the current base container - sed -E "s|^FROM mcr\..*installer$|FROM $LAST_BASE_IMAGE as installer|g" -i "dockerfile-$marinara" + # $WORK_DIR has $RPMS_DIR directory and $LOCAL_REPO_FILE file. + pushd "$WORK_DIR" > /dev/null + echo "+++ Build image: $image_full_name" docker build . \ - -t "$full_marinara_container_name" \ - -f dockerfile-$marinara \ - --no-cache \ - --progress=plain + --build-arg BASE_IMAGE="$BASE_IMAGE_NAME" \ + --build-arg FINAL_IMAGE="$final_image_to_use" \ + --build-arg AZL_VERSION="$AZL_VERSION" \ + --build-arg RPMS="$RPMS_DIR" \ + --build-arg LOCAL_REPO_FILE="$LOCAL_REPO_FILE" \ + -t "$image_full_name" \ + -f "$CONTAINER_SRC_DIR/base/$dockerfile" \ + --no-cache popd > /dev/null - echo "+++ remove $marinaraSrcDir" - sudo rm -rf "$marinaraSrcDir" -} - -function create_distroless_nonroot_image { - local mariner_version - local mariner_build_arch - - local base_container_full_name="$LAST_BASE_IMAGE" - local distroless_container_full_name="$LAST_DISTROLESS_IMAGE" - local distroless_container_name=${distroless_container_full_name%:*} - local distroless_container_tag=${distroless_container_full_name#*:} - mariner_version=$(awk -F '.' '{print $1"."$2}' <<< "$distroless_container_tag") # 2.0.20220426-amd64 -> 2.0 - - mariner_build_arch=$(awk -F '.' '{print $3}' <<< "$distroless_container_tag") # 2.0.20220426-amd64 -> 20220426-amd64 - local full_new_tag=$mariner_version-nonroot.$mariner_build_arch # 2.0-nonroot.20220426-amd64 - local full_container_name="$distroless_container_name:$full_new_tag" - local dockerfile="Dockerfile-Distroless-Nonroot-Template" - - echo - echo "base_container_full_name: -> $base_container_full_name" - echo "distroless_container_full_name: -> $distroless_container_full_name" - echo "distroless_container_name: -> $distroless_container_name" - echo "distroless_container_tag: -> $distroless_container_tag" - echo "mariner_version: -> $mariner_version" - echo "full_container_name: -> $full_container_name" - echo "dockerfile -> $dockerfile" - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $full_container_name" - echo " from $distroless_container_full_name" - echo - - echo "$full_container_name" >> "$TEMPDIR/$file_name_prefix-$DISTROLESS$file_ext" - echo "----------------------------------------------------------------------" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - mkdir -p "$containerBuildDir" - - cp "$CONTAINER_SRC_DIR/distroless/$dockerfile" "$containerBuildDir/Dockerfile" + publish_to_acr "$image_full_name" + save_container_image "$image_type" "$image_full_name" +} - pushd "$containerBuildDir" > /dev/null +function docker_build_marinara { + echo "+++ Build Marinara image: $MARINARA_IMAGE_NAME" + local build_dir="$WORK_DIR/marinara_build_dir" + mkdir -p "$build_dir" + git clone "https://github.com/microsoft/$MARINARA.git" "$build_dir" + pushd "$build_dir" + sed -E "s|^FROM mcr\..*installer$|FROM $BASE_IMAGE_NAME as installer|g" -i "dockerfile-$MARINARA" - # Build image docker build . \ - --build-arg BASE_IMAGE="$base_container_full_name" \ - --build-arg FINAL_IMAGE="$distroless_container_full_name" \ - --build-arg MARINER_VERSION="$mariner_version" \ - -t "$full_container_name" \ + -t "$MARINARA_IMAGE_NAME" \ + -f dockerfile-$MARINARA \ + --build-arg AZL_VERSION="$AZL_VERSION" \ --no-cache \ --progress=plain popd > /dev/null + sudo rm -rf "$build_dir" - # Clean up temp folder - sudo rm -rf "$containerBuildDir" + publish_to_acr "$MARINARA_IMAGE_NAME" + save_container_image "$MARINARA" "$MARINARA_IMAGE_NAME" } -function start_building_containers { - echo - echo "=====================================================================" - echo "Create Base and Distroless Mariner Containers" - echo "=====================================================================" - echo +function publish_to_acr { + local image=$1 + if [[ ! "$PUBLISH_TO_ACR" =~ [Tt]rue ]]; then + echo "+++ Skip publishing to ACR" + return + fi + echo "+++ Publish container $image" + echo "login into ACR: $ACR" + az acr login --name "$ACR" + docker image push "$image" +} + +function save_container_image { + local image_type=$1 + local image_name=$2 + echo "+++ Save image name to file PublishedContainers-$image_type.txt" + echo "$image_name" >> "$OUTPUT_DIR/PublishedContainers-$image_type.txt" +} - create_base_image $BASE $BASE "$BASE_IMAGE_TARBALL" "Dockerfile-Base-Template" - create_base_nonroot_image +function build_images { + echo "+++ Build images" - create_busybox_image - create_marinara_image + docker_build $BASE "$BASE_IMAGE_NAME" "$BASE_TARBALL" "Dockerfile-Base-Template" + docker_build $DISTROLESS "$DISTROLESS_BASE_IMAGE_NAME" "$DISTROLESS_BASE_TARBALL" "Dockerfile-Distroless-Template" + docker_build $DISTROLESS "$DISTROLESS_MINIMAL_IMAGE_NAME" "$DISTROLESS_MINIMAL_TARBALL" "Dockerfile-Distroless-Template" + docker_build $DISTROLESS "$DISTROLESS_DEBUG_IMAGE_NAME" "$DISTROLESS_DEBUG_TARBALL" "Dockerfile-Distroless-Template" - create_base_image "" $DISTROLESS "$DISTROLESS_IMAGE_TARBALL" "Dockerfile-Distroless-Template" - create_distroless_nonroot_image + docker_build_custom $BASE "$BASE_NONROOT_IMAGE_NAME" "" "Dockerfile-Base-Nonroot-Template" + docker_build_custom $DISTROLESS "$DISTROLESS_BASE_NONROOT_IMAGE_NAME" "$DISTROLESS_BASE_IMAGE_NAME" "Dockerfile-Distroless-Nonroot-Template" + docker_build_custom $DISTROLESS "$DISTROLESS_MINIMAL_NONROOT_IMAGE_NAME" "$DISTROLESS_MINIMAL_IMAGE_NAME" "Dockerfile-Distroless-Nonroot-Template" + docker_build_custom $DISTROLESS "$DISTROLESS_DEBUG_NONROOT_IMAGE_NAME" "$DISTROLESS_DEBUG_IMAGE_NAME" "Dockerfile-Distroless-Nonroot-Template" - create_base_image "" $DISTROLESS "$DISTROLESS_DEBUG_IMAGE_TARBALL" "Dockerfile-Distroless-Template" - create_distroless_nonroot_image + docker_build_custom $BUSYBOX "$BUSYBOX_IMAGE_NAME" "" "Dockerfile-Busybox-Template" - create_base_image "" $DISTROLESS "$DISTROLESS_MINIMAL_IMAGE_TARBALL" "Dockerfile-Distroless-Template" - create_distroless_nonroot_image + docker_build_marinara } -# source the CommonFunctions script to get the following function: -# - save_container_list -# - getRegistryPrefix -source $CONTAINER_SRC_DIR/scripts/CommonFunctions.sh - -start_building_containers -save_container_list +print_inputs +validate_inputs +initialization +build_images diff --git a/.pipelines/containerSourceData/scripts/BuildCdiContainers.sh b/.pipelines/containerSourceData/scripts/BuildCdiContainers.sh deleted file mode 100755 index 66cc834497a..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildCdiContainers.sh +++ /dev/null @@ -1,207 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -function create_cdi_container_image_base { - local componentName - local baseContainerName - local baseContainerTag - local initialDockerfile - local containerBuildDir - local binaryPath - local containerUser - local packagesToInstall - - # $1: sub-component name - # $2: container type - # $3: base container name - # $4: base container tag - # $5: packages to install - # $6: initial Dockerfile - # $7: binary path - # $8: container user - componentName=$1 - containerType=$2 - baseContainerName=$3 - baseContainerTag=$4 - packagesToInstall=$5 - initialDockerfile=$6 - binaryPath=$7 - containerUser=$8 - - echo "------ Display Arguments ------" - echo "Component Name: -> $componentName" - echo "Container Type: -> $containerType" - echo "Base Container Name: -> $baseContainerName" - echo "Base Container Tag: -> $baseContainerTag" - echo "Packages to Install: -> $packagesToInstall" - echo "Initial Dockerfile: -> $initialDockerfile" - echo "Binary Path: -> $binaryPath" - echo "Container User: -> $containerUser" - - # compose the container name. E.g. for branch-main this will look like - # cblmarinermain.azurecr.io/kubevirt/cdi-apiserver:1.51.0-1-cm2.0.20220811-amd64 - # cblmarinermain.azurecr.io -> repo - # kubevirt -> CDI_FOLDER_PREFIX - # cdi-apiserver -> $containerType (sub component) - # 1.51.0-1-cm2.0.20220811-amd64 -> version for cdi v1.51.0 rpms with base version details - - local originalContainerName="$CONTAINER_REGISTRY_NAME_FULL/base/$CDI_FOLDER_PREFIX/$containerType" - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $originalContainerName" - - containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp $initialDockerfile $containerBuildDir/Dockerfile - - # Workaround till proper binaries are built as part of the cdi rpm & renames are removed - # https://github.com/microsoft/CBL-Mariner/pull/5708/files# - cp "$CONTAINER_SRC_DIR/$CDI_BASE_COMPONENT/configuration-files"/* "$containerBuildDir" - pushd $containerBuildDir > /dev/null - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - --build-arg BINARY_NAME="$(basename $binaryPath)" \ - --build-arg USER="$containerUser" \ - -t "$originalContainerName" --no-cache --progress=plain . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - local containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - local installedPackage=$(docker exec "$containerId" rpm -qa | grep ^"$componentName") # nodejs-16.16.0-1.cm2.x86_64 - echo "Full Installed Package: -> $installedPackage" - local componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - docker rm -f "$containerId" - - # Rename the image to include package version - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - local containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - publish_container "$containerName" - - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - - local containerTypeNoDash=${containerType//-/} # Removes dash from containerType. Ex: azure-cli -> azurecli - echo "$containerName" >> $TEMPDIR/$file_name_prefix-$containerTypeNoDash$file_ext - - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Clean up docker storage folder - sudo rm -rf "$newDockerStorageLocation" - - - # clean up temp folder - popd > /dev/null - sudo rm -rf $containerBuildDir - - echo "----------------------------------------------------------------------" -} - -# Create containers for cdi-apiserver, cdi-cloner, cdi-controller, cdi-importer, -# cdi-operator, cdi-uploadproxy, cdi-uploadserver for CDI_BASE_COMPONENT -function create_cdi_subcomp_containers { - declare -A cdi_container_components - declare -A cdi_binary_path - declare -A cdi_container_user - - local sub_components - local CDI_PACKAGE_BASE="containerized-data-importer" - - sub_components=('api' 'cloner' 'controller' 'importer' 'operator' 'uploadproxy' 'uploadserver') - - # populate the cdi container names - for comp in ${sub_components[@]} - do - cdi_container_components[$comp]=$comp - - # replace 'api with 'apiserver' - [ "$comp" = "api" ] && cdi_container_components[$comp]='apiserver' - - cdi_binary_path[$comp]="/usr/bin/cdi-${cdi_container_components[$comp]}" - - # Setting the active user in the container based on upstream images - # By default set the user to be a non-root user (who is in the root group) - cdi_container_user[$comp]=1001 - done - - mkdir -p $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - - for comp in ${sub_components[@]} - do - # To build for specific versions - include it here with the name - dependency_component=$CDI_PACKAGE_BASE-$comp - echo "+++ CDI component name for $comp set at ${cdi_container_components[$comp]}" - cdi_comp=$CDI_BASE_COMPONENT-${cdi_container_components[$comp]} - - local pkgsFileName="$comp.pkg" - local packagesToInstall=() - getPkgsFromFile $CDI_BASE_COMPONENT $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - - echo "+++ CDI binary path for $comp ==> ${cdi_binary_path[$comp]}" - echo "+++ create container based on $base_container_name:$base_container_tag for $dependency_component" - create_cdi_container_image_base \ - "$dependency_component" \ - "$cdi_comp" \ - "$base_container_name"\ - "$base_container_tag" \ - "$packages" \ - "$CONTAINER_SRC_DIR/$CDI_BASE_COMPONENT/Dockerfile-$cdi_comp" \ - ${cdi_binary_path[$comp]} \ - ${cdi_container_user[$comp]} - - # Save text files generated in TEMPDIR - echo "+++ publish container list into pipeline artifacts" - cp $TEMPDIR/$file_name_prefix-*$file_ext $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - - done -} diff --git a/.pipelines/containerSourceData/scripts/BuildCertManagerContainers.sh b/.pipelines/containerSourceData/scripts/BuildCertManagerContainers.sh deleted file mode 100755 index df650ab4d79..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildCertManagerContainers.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -function create_cert_manager_container_image_base { - local componentName - local baseContainerName - local baseContainerTag - local originalContainerName - local initialDockerfile - local binaryPath - local packagesToInstall - - # $1: component name - # $2: container name - # $3: container tag - # $4: packages to install - # $5: initial Dockerfile - # $6: the path of the binary file to use as the container entrypoint - componentName=$1 - baseContainerName=$2 - baseContainerTag=$3 - packagesToInstall=$4 - initialDockerfile=$5 - binaryPath=$6 - - originalContainerName="$CONTAINER_REGISTRY_NAME_FULL/base/$componentName" - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $originalContainerName" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp $initialDockerfile $containerBuildDir/Dockerfile - - pushd $containerBuildDir > /dev/null - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - sed -i -E "s|@BINARY_PATH@|\"$binaryPath\"|" "$containerBuildDir/Dockerfile" - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - -t "$originalContainerName" --no-cache --progress=plain . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - - local containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - local installedPackage=$(docker exec "$containerId" rpm -qa | grep ^"$componentName") # nodejs-16.16.0-1.cm2.x86_64 - echo "Full Installed Package: -> $installedPackage" - local componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - docker rm -f "$containerId" - - # Rename the image to include package version - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - # replace base container registry prefix by golden container registry prefix (if any) - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - local containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - - publish_container "$containerName" - - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - - echo "$containerName" >> $TEMPDIR/$file_name_prefix-$componentName$file_ext - - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Clean up docker storage folder - sudo rm -rf "$newDockerStorageLocation" - - # clean up temp folder - popd > /dev/null - sudo rm -rf $containerBuildDir - - echo "----------------------------------------------------------------------" -} - -function create_cert_manager_subcomp_containers { - local sub_components - local dependency_component - local binary_path - - sub_components=('acmesolver' 'cainjector' 'controller' 'cmctl' 'webhook') - - mkdir -p $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - - for subcomp in ${sub_components[@]} - do - dependency_component=$CERT_MANAGER-$subcomp - binary_path=/usr/bin/$subcomp - local pkgsFileName="$subcomp.pkg" - local packagesToInstall=() - getPkgsFromFile $CERT_MANAGER_NO_DASH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - - echo "+++ create container based on $base_container_name:$base_container_tag for $dependency_component" - create_cert_manager_container_image_base \ - "$dependency_component" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "$CONTAINER_SRC_DIR/$CERT_MANAGER_NO_DASH/Dockerfile-cert-manager" \ - "$binary_path" - - # Save text files generated in TEMPDIR - echo "+++ publish container list into pipeline artifacts" - cp $TEMPDIR/$file_name_prefix-*$file_ext $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - - done -} diff --git a/.pipelines/containerSourceData/scripts/BuildGoldenContainer.sh b/.pipelines/containerSourceData/scripts/BuildGoldenContainer.sh new file mode 100755 index 00000000000..0e9a85836f3 --- /dev/null +++ b/.pipelines/containerSourceData/scripts/BuildGoldenContainer.sh @@ -0,0 +1,379 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +set -e + +# This script is used to build a golden container image for a given component. +# The script takes the following inputs: +# - a) Base container image name (e.g. mcr.microsoft.com/cbl-mariner/base/core:2.0) +# - b) ACR name (e.g. azurelinepreview, acrafoimages, etc.) +# - c) Container repository name (e.g. base/nodejs, base/postgres, base/kubevirt/cdi-apiserver, etc.) +# - d) Image name (e.g. nodejs, postgres, cdi, etc.) +# - e) Component name (e.g. nodejs18, postgresql, containerized-data-importer-api, etc.) +# - f) Package file name (e.g. nodejs18.pkg, postgres.pkg, api.pkg, etc.) +# - g) Dockerfile name (e.g. Dockerfile-nodejs, Dockerfile-Postgres, Dockerfile-cdi-apiserver, etc.) +# - h) Docker build arguments (e.g. '--build-arg BINARY_NAME="cdi-apiserver" --build-arg USER=1001') +# - i) Dockerfile text replacement (e.g. '@BINARY_PATH@ \"/usr/bin/acmesolver\"') +# - j) Output directory for container artifacts. +# - k) RPMS tarball file path (e.g. ./rpms.tar.gz) +# - l) Container source directory (e.g. ~/workspace/CBL-Mariner/.pipelines/containerSourceData) +# - m) Is HCI image (e.g. true, false. HCI images have different naming convention) +# - n) Use rpm -qa command (e.g. true, false. Some images use rpm -qa command to get installed package) +# - o) Repo prefix (e.g. public/cbl-mariner, unlisted/cbl-mariner, etc.) +# - p) Publishing level (e.g. preview, development) +# - q) Publish to ACR (e.g. true, false. If true, the script will push the container to ACR) +# - r) Create SBOM (e.g. true, false. If true, the script will create SBOM for the container) +# - s) SBOM tool path. +# - t) Script to create SBOM for the container image. +# - u) Create Distroless container (e.g. true, false. If true, the script will also create a distroless container) + +# Assuming you are in your current working directory. Below should be the directory structure: +# │ rpms.tar.gz +# │ OUTPUT +# │ ├── + +# Assuming CBL-Mariner repo is cloned in your home directory. Below should be the directory structure: +# ~/CBL-Mariner/.pipelines/containerSourceData +# ├── nodejs +# │ ├── distroless +# │ │ ├── holdback-nodejs18.pkg +# │ │ ├── nodejs18.pkg +# │ ├── Dockerfile-Nodejs +# │ ├── nodejs18.pkg +# ├── configuration +# │ ├── acrRepoV2.json +# ├── scripts +# │ ├── BuildGoldenContainer.sh +# ├── Dockerfile-Initial +# ├── marinerLocalRepo.repo + +# Example usage: +# /bin/bash ~/CBL-Mariner/.pipelines/containerSourceData/scripts/BuildGoldenContainer.sh \ +# -a "mcr.microsoft.com/cbl-mariner/base/core:2.0" -b azurelinuxlocal \ +# -c "base/nodejs" -d "nodejs" -e "nodejs18" -f nodejs18.pkg -g Dockerfile-Nodejs \ +# -j OUTPUT -k ./rpms.tar.gz -l ~/CBL-Mariner/.pipelines/containerSourceData \ +# -m "false" -n "false" -p development -q "false" -u "true" + +while getopts ":a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:" OPTIONS; do + case ${OPTIONS} in + a ) BASE_IMAGE_NAME_FULL=$OPTARG;; + b ) ACR=$OPTARG;; + c ) REPOSITORY=$OPTARG;; + d ) IMAGE=$OPTARG;; + e ) COMPONENT=$OPTARG;; + f ) PACKAGE_FILE=$OPTARG;; + g ) DOCKERFILE=$OPTARG;; + h ) DOCKER_BUILD_ARGS=$OPTARG;; + i ) DOCKERFILE_TEXT_REPLACEMENT=$OPTARG;; + j ) OUTPUT_DIR=$OPTARG;; + k ) RPMS_TARBALL=$OPTARG;; + l ) CONTAINER_SRC_DIR=$OPTARG;; + m ) IS_HCI_IMAGE=$OPTARG;; + n ) USE_RPM_QA_CMD=$OPTARG;; + o ) REPO_PREFIX=$OPTARG;; + p ) PUBLISHING_LEVEL=$OPTARG;; + q ) PUBLISH_TO_ACR=$OPTARG;; + r ) CREATE_SBOM=$OPTARG;; + s ) SBOM_TOOL_PATH=$OPTARG;; + t ) SBOM_SCRIPT=$OPTARG;; + u ) DISTROLESS=$OPTARG;; + + \? ) + echo "Error - Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Error - Invalid Option: -$OPTARG requires an argument" 1>&2 + exit 1 + ;; + esac +done + +echo "+++ Create temp folder" +WORK_DIR=$(mktemp -d) +function cleanup { + echo "+++ Remove temp folder: $WORK_DIR" + sudo rm -rf "$WORK_DIR" +} +trap cleanup EXIT + +function print_inputs { + echo "BASE_IMAGE_NAME_FULL -> $BASE_IMAGE_NAME_FULL" + echo "ACR -> $ACR" + echo "REPOSITORY -> $REPOSITORY" + echo "IMAGE -> $IMAGE" + echo "COMPONENT -> $COMPONENT" + echo "PACKAGE_FILE -> $PACKAGE_FILE" + echo "DOCKERFILE -> $DOCKERFILE" + echo "DOCKER_BUILD_ARGS -> $DOCKER_BUILD_ARGS" + echo "DOCKERFILE_TEXT_REPLACEMENT -> $DOCKERFILE_TEXT_REPLACEMENT" + echo "OUTPUT_DIR -> $OUTPUT_DIR" + echo "RPMS_TARBALL -> $RPMS_TARBALL" + echo "CONTAINER_SRC_DIR -> $CONTAINER_SRC_DIR" + echo "IS_HCI_IMAGE -> $IS_HCI_IMAGE" + echo "USE_RPM_QA_CMD -> $USE_RPM_QA_CMD" + echo "REPO_PREFIX -> $REPO_PREFIX" + echo "PUBLISHING_LEVEL -> $PUBLISHING_LEVEL" + echo "PUBLISH_TO_ACR -> $PUBLISH_TO_ACR" + echo "CREATE_SBOM -> $CREATE_SBOM" + echo "SBOM_TOOL_PATH -> $SBOM_TOOL_PATH" + echo "SBOM_SCRIPT -> $SBOM_SCRIPT" + echo "DISTROLESS -> $DISTROLESS" +} + +function validate_inputs { + if [[ -z "$BASE_IMAGE_NAME_FULL" ]]; then + echo "Error - Base container image name cannot be empty." + exit 1 + fi + + if [[ -z "$ACR" ]]; then + echo "Error - ACR name cannot be empty." + exit 1 + fi + + if [[ -z "$REPOSITORY" ]]; then + echo "Error - Container repository name cannot be empty." + exit 1 + fi + + if [[ -z "$IMAGE" ]]; then + echo "Error - Image name cannot be empty." + exit 1 + fi + + if [[ -z "$PACKAGE_FILE" ]]; then + echo "Error - Package file name cannot be empty." + exit 1 + fi + + if [[ -z "$DOCKERFILE" ]]; then + echo "Error - Dockerfile name cannot be empty." + exit 1 + fi + + if [ ! -d "$OUTPUT_DIR" ]; then + echo "Create output directory: $OUTPUT_DIR" + mkdir -p "$OUTPUT_DIR" + fi + + if [[ ! -f $RPMS_TARBALL ]]; then + echo "Error - No RPMs tarball found." + exit 1 + fi + + if [ ! -d "$CONTAINER_SRC_DIR" ]; then + echo "Error - Container source directory does not exist." + exit 1 + fi + + if [[ -z "$PUBLISHING_LEVEL" ]]; then + echo "Error - Publishing level cannot be empty." + exit 1 + fi + + if [[ "$CREATE_SBOM" =~ [Tt]rue ]]; then + if [[ -z "$SBOM_TOOL_PATH" ]] ; then + echo "Error - SBOM tool path cannot be empty." + exit 1 + fi + if [[ ! -f "$SBOM_SCRIPT" ]]; then + echo "Error - SBOM script does not exist." + exit 1 + fi + fi +} + +function initialization { + echo "+++ Initialization" + if [ "$PUBLISHING_LEVEL" = "preview" ]; then + GOLDEN_IMAGE_NAME=${ACR}.azurecr.io/${REPO_PREFIX}/${REPOSITORY} + elif [ "$PUBLISHING_LEVEL" = "development" ]; then + GOLDEN_IMAGE_NAME=${ACR}.azurecr.io/${REPOSITORY} + fi + + BASE_IMAGE_NAME=${BASE_IMAGE_NAME_FULL%:*} # mcr.microsoft.com/cbl-mariner/base/core + BASE_IMAGE_TAG=${BASE_IMAGE_NAME_FULL#*:} # 2.0 + AZURE_LINUX_VERSION=${BASE_IMAGE_TAG%.*} # 2.0 + + # For Azure Linux 2.0, we have shipped the container images with + # the below value of DISTRO_IDENTIFIER in the image tag. + # TODO: We may need to update this value for Azure Linux 3.0. + DISTRO_IDENTIFIER="cm" + + echo "Golden Image Name -> $GOLDEN_IMAGE_NAME" + echo "Base ACR Container Name -> $BASE_IMAGE_NAME" + echo "Base ACR Container Tag -> $BASE_IMAGE_TAG" + echo "Azure Linux Version -> $AZURE_LINUX_VERSION" + echo "Distro Identifier -> $DISTRO_IDENTIFIER" +} + +function prepare_dockerfile { + echo "+++ Prepare dockerfile" + # Copy original dockerfile from CBL-Mariner repo. + cp "$CONTAINER_SRC_DIR/$IMAGE/$DOCKERFILE" "$WORK_DIR/dockerfile" + + # Update the copied dockerfile for later use in container build. + mainRunInstruction=$(cat "$CONTAINER_SRC_DIR/Dockerfile-Initial") + sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i "$WORK_DIR/dockerfile" + + if [ -n "$DOCKERFILE_TEXT_REPLACEMENT" ]; then + TEXT_REPLACEMENT_ARRAY=($DOCKERFILE_TEXT_REPLACEMENT) + sed -E "s|${TEXT_REPLACEMENT_ARRAY[0]}|${TEXT_REPLACEMENT_ARRAY[1]}|g" -i "$WORK_DIR/dockerfile" + fi + + echo " Output content of final dockerfile" + echo "------------------------------------" + cat "$WORK_DIR/dockerfile" + echo "" +} + +function get_packages_to_install { + echo "+++ Get packages to install" + packagesFilePath="$CONTAINER_SRC_DIR/$IMAGE/$PACKAGE_FILE" + PACKAGES_TO_INSTALL=$(paste -s -d' ' < "$packagesFilePath") + echo "Packages to install -> $PACKAGES_TO_INSTALL" +} + +function prepare_docker_directory { + echo "+++ Prepare docker directory" + # Get additional required files for the container build from CBL-Mariner repo. + configurationDirectoryPath="$CONTAINER_SRC_DIR/$IMAGE/configuration-files" + if [ -d "$configurationDirectoryPath" ]; then + cp -v "$configurationDirectoryPath"/* "$WORK_DIR" + fi + + HOST_MOUNTED_DIR="$WORK_DIR/Stage" + mkdir -pv "$HOST_MOUNTED_DIR" + + # Copy files into docker context directory + tar -xf "$RPMS_TARBALL" -C "$HOST_MOUNTED_DIR"/ + cp -v "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$HOST_MOUNTED_DIR"/ +} + +function docker_build { + echo "+++ Build container" + pushd "$WORK_DIR" > /dev/null + echo " docker build command" + echo "----------------------" + echo "docker buildx build $DOCKER_BUILD_ARGS" \ + "--build-arg BASE_IMAGE=$BASE_IMAGE_NAME_FULL" \ + "--build-arg RPMS_TO_INSTALL=$PACKAGES_TO_INSTALL" \ + "-t $GOLDEN_IMAGE_NAME --no-cache --progress=plain" \ + "-f $WORK_DIR/Dockerfile ." + + echo "" + docker buildx build $DOCKER_BUILD_ARGS \ + --build-arg BASE_IMAGE="$BASE_IMAGE_NAME_FULL" \ + --build-arg RPMS_TO_INSTALL="$PACKAGES_TO_INSTALL" \ + -t "$GOLDEN_IMAGE_NAME" --no-cache --progress=plain \ + -f "$WORK_DIR/Dockerfile" . + popd > /dev/null +} + +function set_image_tag { + echo "+++ Get version of the installed package in the container." + local containerId + local installedPackage + + containerId=$(docker run --entrypoint /bin/bash -dt "$GOLDEN_IMAGE_NAME") + + echo "Container ID -> $containerId" + + if [[ $USE_RPM_QA_CMD =~ [Tt]rue ]] ; then + echo "Using rpm -qa command to get installed package." + installedPackage=$(docker exec "$containerId" rpm -qa | grep ^"$COMPONENT") + else + echo "Using tdnf repoquery command to get installed package." + # exec as root as the default user for some containers is non-root + installedPackage=$(docker exec -u 0 "$containerId" tdnf repoquery --installed "$COMPONENT" | grep ^"$COMPONENT") + fi + + echo "Full Installed Package: -> $installedPackage" + COMPONENT_VERSION=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 + echo "Component Version -> $COMPONENT_VERSION" + docker rm -f "$containerId" + + # Rename the image to include package version + # For HCI Images, do not include "-$DISTRO_IDENTIFIER" in the image tag; Instead use a "." + if [ "$IS_HCI_IMAGE" = true ]; then + # Example: acrafoimages.azurecr.io/base/kubevirt/virt-operator:0.59.0-2.2.0.20230607-amd64 + GOLDEN_IMAGE_NAME_FINAL="$GOLDEN_IMAGE_NAME:$COMPONENT_VERSION.$BASE_IMAGE_TAG" + else + # Example: azurelinuxpreview.azurecr.io/base/nodejs:16.19.1-2-$DISTRO_IDENTIFIER2.0.20230607-amd64 + GOLDEN_IMAGE_NAME_FINAL="$GOLDEN_IMAGE_NAME:$COMPONENT_VERSION-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + fi +} + +function finalize { + echo "+++ Finalize" + docker image tag "$GOLDEN_IMAGE_NAME" "$GOLDEN_IMAGE_NAME_FINAL" + docker rmi -f "$GOLDEN_IMAGE_NAME" + echo "+++ Save container image name to file PublishedContainers-$IMAGE.txt" + echo "$GOLDEN_IMAGE_NAME_FINAL" >> "$OUTPUT_DIR/PublishedContainers-$IMAGE.txt" +} + +function publish_to_acr { + CONTAINER_IMAGE=$1 + if [[ ! "$PUBLISH_TO_ACR" =~ [Tt]rue ]]; then + echo "+++ Skip publishing to ACR" + return + fi + echo "+++ Publish container $CONTAINER_IMAGE" + echo "login into ACR: $ACR" + az acr login --name "$ACR" + docker image push "$CONTAINER_IMAGE" +} + +function generate_image_sbom { + if [[ ! "$CREATE_SBOM" =~ [Tt]rue ]]; then + echo "+++ Skip creating SBOM" + return + fi + + echo "+++ Generate SBOM for the container image" + echo "Sanitized image name has '/' replaced with '-' and ':' replaced with '_'." + GOLDEN_IMAGE_NAME_SANITIZED=$(echo "$GOLDEN_IMAGE_NAME_FINAL" | tr '/' '-' | tr ':' '_') + echo "GOLDEN_IMAGE_NAME_SANITIZED -> $GOLDEN_IMAGE_NAME_SANITIZED" + + DOCKER_BUILD_DIR=$(mktemp -d) + # SBOM script will create the SBOM at the following path. + IMAGE_SBOM_MANIFEST_PATH="$DOCKER_BUILD_DIR/_manifest/spdx_2.2/manifest.spdx.json" + /bin/bash "$SBOM_SCRIPT" \ + "$DOCKER_BUILD_DIR" \ + "$GOLDEN_IMAGE_NAME_FINAL" \ + "$SBOM_TOOL_PATH" \ + "$BASE_IMAGE_NAME-$COMPONENT" \ + "$COMPONENT_VERSION-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + + SBOM_IMAGES_DIR="$OUTPUT_DIR/SBOM_IMAGES" + mkdir -p "$SBOM_IMAGES_DIR" + cp -v "$IMAGE_SBOM_MANIFEST_PATH" "$SBOM_IMAGES_DIR/$GOLDEN_IMAGE_NAME_SANITIZED.spdx.json" + echo "Generated SBOM:'$SBOM_IMAGES_DIR/$GOLDEN_IMAGE_NAME_SANITIZED.spdx.json'" + sudo rm -rf "$DOCKER_BUILD_DIR" +} + +function distroless_container { + if [[ ! "$DISTROLESS" =~ [Tt]rue ]]; then + echo "+++ Skip creating distroless container" + return + fi + + # shellcheck source=/dev/null + source "$CONTAINER_SRC_DIR/scripts/BuildGoldenDistrolessContainer.sh" + create_distroless_container +} + +print_inputs +validate_inputs +initialization +prepare_dockerfile +get_packages_to_install +prepare_docker_directory +docker_build +set_image_tag +finalize +publish_to_acr "$GOLDEN_IMAGE_NAME_FINAL" +generate_image_sbom +distroless_container diff --git a/.pipelines/containerSourceData/scripts/BuildGoldenContainers.sh b/.pipelines/containerSourceData/scripts/BuildGoldenContainers.sh deleted file mode 100755 index 050b322277b..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildGoldenContainers.sh +++ /dev/null @@ -1,1045 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -set -e - -# parse script parameters: -# -i -> published base container file -# -m -> folder containing artifacts of CBL-Mariner -# -n -> name of the container registry -# -g -> golden container image -# -o -> folder where to put artifacts to be published -# -s -> manifest tool directory path -# -b -> branch name -# -p -> publishing level -# -while getopts ":i:m:n:g:o:s:b:p:x:" OPTIONS; do - case ${OPTIONS} in - i ) BASE_IMAGE_FOLDER=$OPTARG;; - m ) MARINER_ARTIFACTS_FOLDER=$OPTARG;; - n ) CONTAINER_REGISTRY_NAME=$OPTARG - CONTAINER_REGISTRY_NAME_FULL="$CONTAINER_REGISTRY_NAME.azurecr.io";; - g ) GOLDEN_CONTAINER_IMAGE=$OPTARG;; - o ) OUTPUT_FOLDER=$OPTARG;; - s ) MANIFEST_TOOL_DIR=$OPTARG;; - b ) BRANCH_NAME=$OPTARG;; - p ) PUBLISHING_LEVEL=$OPTARG;; - x ) CONTAINER_SRC_DIR=$OPTARG;; - - \? ) - echo "Error - Invalid Option: -$OPTARG" 1>&2 - exit 1 - ;; - : ) - echo "Error - Invalid Option: -$OPTARG requires an argument" 1>&2 - exit 1 - ;; - esac -done - -MANIFEST_TOOL_DIR="$(cd "$MANIFEST_TOOL_DIR"; pwd)" -OUTPUT_FOLDER="$(cd "$OUTPUT_FOLDER"; pwd)" - -echo "- BASE IMAGE_FOLDER -> $BASE_IMAGE_FOLDER" -echo "- MARINER_ARTIFACTS_FOLDER -> $MARINER_ARTIFACTS_FOLDER" -echo "- CONTAINER_REGISTRY_NAME -> $CONTAINER_REGISTRY_NAME" -echo "- CONTAINER_REGISTRY_NAME_FULL -> $CONTAINER_REGISTRY_NAME_FULL" -echo "- GOLDEN_CONTAINER_IMAGE -> $GOLDEN_CONTAINER_IMAGE" -echo "- BRANCH_NAME -> $BRANCH_NAME" -echo "- PUBLISHING_LEVEL -> $PUBLISHING_LEVEL" -echo "- MANIFEST_TOOL_DIR -> $MANIFEST_TOOL_DIR" -echo "- OUTPUT_FOLDER -> $OUTPUT_FOLDER" - -readonly SCRIPT_FOLDER="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" -readonly ROOT_FOLDER="$(git rev-parse --show-toplevel)" - -# define golden images dependency components -readonly AZURECLI="azure-cli" -readonly AZURECLI_NO_DASH="azurecli" -readonly CDI_BASE_COMPONENT="cdi" -readonly CERT_MANAGER='cert-manager' -readonly CERT_MANAGER_NO_DASH='certmanager' -readonly INFLUX_DB="influxdb" -readonly KUBEVIRT_BASE_COMPONENT="kubevirt" -readonly MEMCACHED="memcached" -readonly MULTUS="multus" -readonly NGINX="nginx" -readonly NODEJS="nodejs" -readonly OPENMPI="openmpi" -readonly PHP="php" -readonly POSTGRES="postgres" -readonly PROMETHEUS="prometheus" -readonly PROMETHEUS_ADAPTER="prometheus-adapter" -readonly PROMETHEUS_ADAPTER_NO_DASH="prometheusadapter" -readonly PYTHON="python" -readonly PYTORCH="pytorch" -readonly RABBITMQSERVER="rabbitmq-server" -readonly RABBITMQSERVER_NO_DASH="rabbitmqserver" -readonly REDIS="redis" -readonly RUBY="ruby" -readonly RUST="rust" -readonly SRIOV_NETWORK_DEVICE_PLUGIN='sriov-network-device-plugin' -readonly SRIOV_NETWORK_DEVICE_PLUGIN_NO_DASH='sriovnetworkdeviceplugin' -readonly TELEGRAF="telegraf" -readonly TENSORFLOW="tensorflow" - -# The RPMS of CDI have containerized-data-importer as its prefix whereas the -# containers must have cdi as its prefix. Hence, the BASE component -# is set to cdi. The folder prefix is same as kubevirt. -readonly CDI_FOLDER_PREFIX=$KUBEVIRT_BASE_COMPONENT -readonly KUBEVIRT_FOLDER_PREFIX=$KUBEVIRT_BASE_COMPONENT - -echo "+++ create temp folder" -TEMPDIR=$ROOT_FOLDER/TEMPDIR_CONTAINER -mkdir -pv "$OUTPUT_FOLDER/SBOM_IMAGES" - -function cleanup { - echo "+++ remove $TEMPDIR" - sudo rm -rf "$TEMPDIR" -} -trap cleanup EXIT - -declare -A COMPONENT_VERSIONS -declare -A BUILDER_IMAGES - -# these variables are used to create text files listing golden image names. -readonly file_name_prefix='PublishedContainers' -readonly file_ext='.txt' - -# Validates the input such as base images exist and the Mariner RPMs tarball exists. -function input_validation { - BASE_IMAGE_FILE=$(find "$BASE_IMAGE_FOLDER" -name "PublishedContainers-base.txt") - if [[ ! -f $BASE_IMAGE_FILE ]]; then - echo "Error - No base image file in $BASE_IMAGE_FOLDER" - exit 1 - fi - - MARINER_RPMS_TARBALL=$(find "$MARINER_ARTIFACTS_FOLDER" -name "rpms.tar.gz" -maxdepth 1) - if [[ ! -f $MARINER_RPMS_TARBALL ]]; then - echo "Error - No Mariner RPMs tarball in $MARINER_ARTIFACTS_FOLDER" - exit 1 - fi -} - -# Reads base container names from the passed in text files -function read_base_container_name { - baseImageName="none" - - while read image; do - if [[ $baseImageName == "none" ]]; then - baseImageName=$image - fi - done < "$BASE_IMAGE_FILE" - - echo "- Full base ACR image name: $baseImageName" - base_container_acr=${baseImageName%%.*} - base_container_name_with_core=${baseImageName%:*} - base_container_name=${base_container_name_with_core%/*} - base_container_tag=${baseImageName#*:} - - echo "Base ACR Name -> $base_container_acr" - echo "Base ACR Container Name -> $base_container_name" - echo "Base ACR Container Tag -> $base_container_tag" -} - -# Builds, Tests, and Publishes Golden Container Image. -# The first argument is the main package name i.e., component name (e.g., nodejs, azure-cli, postgresql, etc) -# The second argument is the image name i.e., container type (e.g., nodejs, azure-cli, postgres, etc) -# The third argument is the base container name -# The fourth argument is the base container tag -# The fifth argument is the set of packages to be installed in the image. -# The sixth argument is the path to the corresponding dockerfile. -# The seventh argument is the runTest flag (0/1) -# The eighth argument is the passed in full containerName -function CreateGoldenContainer { - local componentName=$1 - local containerType=$2 - local baseContainerName=$3 - local baseContainerTag=$4 - local packagesToInstall=$5 - local goldenImageDockerfile=$6 - local runTest=$7 - local originalContainerName=$8 - local containerTypeNoDash - - echo "------ Display Arguments ------" - echo "Component Name: -> $componentName" - echo "Container Type: -> $containerType" - echo "Base Container Name: -> $baseContainerName" - echo "Base Container Tag: -> $baseContainerTag" - echo "Packages to Install: -> $packagesToInstall" - echo "Dockerfile: -> $goldenImageDockerfile" - echo "Test Container: -> $runTest" - echo "Container Name: -> $originalContainerName" - - echo "+++ create container based on $baseContainerName/core:$baseContainerTag for $componentName" - containerTypeNoDash=${containerType//-/} # Removes dash from containerType. Ex: azure-cli -> azurecli - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $originalContainerName" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp "$CONTAINER_SRC_DIR/$containerTypeNoDash/$goldenImageDockerfile" "$containerBuildDir/Dockerfile" - - # Ensure that the path exists before copying files. - if [ -d "$CONTAINER_SRC_DIR/$containerTypeNoDash/configuration-files" ]; then - cp "$CONTAINER_SRC_DIR/$containerTypeNoDash/configuration-files"/* "$containerBuildDir" - fi - - pushd "$containerBuildDir" - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - - cat Dockerfile - - if [ "$DISABLE_DOCKER_REDIRECTION" != "true" ]; then - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - fi - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - -t "$originalContainerName" --no-cache --progress=plain \ - -f $containerBuildDir/Dockerfile . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - - local containerId - local installedPackage - local componentVersion - - containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - # exec as root as the default user for some containers is non-root - # componentName e.g. nodejs-16.16.0-1.cm2.x86_64 - installedPackage=$(docker exec -u 0 "$containerId" tdnf repoquery --installed "$componentName" | grep ^"$componentName") - echo "Full Installed Package: -> $installedPackage" - componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - COMPONENT_VERSIONS[$containerType]=$componentVersion - docker rm -f "$containerId" - - # Rename the image to include package version - # For HCI Images, do not include "-cm" in the image tag; Instead use a "." - if $IS_HCI_IMAGE; then - # Example: acrafoimages.azurecr.io/base/kubevirt/virt-operator:0.59.0-2.2.0.20230607-amd64 - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - else - # Example: cblmarinermain.azurecr.io/base/nodejs:16.19.1-2-cm2.0.20230607-amd64 - local containerName="$originalContainerName:$componentVersion-cm$baseContainerTag" - fi - - # replace base container registry prefix by golden container registry prefix (if any) - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - BUILDER_IMAGES[$componentName]=$containerName - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - # Test image - if [ $runTest -ne 0 ]; then - test_golden_container "$containerTypeNoDash" "$containerName" - fi - - # Publish image - publish_container "$containerName" - - local containerNameSanitized - containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - - if [[ "$DISABLE_SBOM_GENERATION" != "true" ]]; then - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - fi - popd - - if [ "$DISABLE_DOCKER_REDIRECTION" != "true" ]; then - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - fi - - sudo rm -rf "$newDockerStorageLocation" - - # Clean up temp folder - sudo rm -rf "$containerBuildDir" - - # Save container name - echo "$containerName" >> "$TEMPDIR/$file_name_prefix-$containerTypeNoDash$file_ext" - echo "----------------------------------------------------------------------" - - save_container_list -} - -function DockerBuild { - local containerName=$1 - local marinerVersion=$2 - local imageType=$3 - local packagesToInstall=$4 - local packagesToHoldback=$5 - local installNonrootUser=$6 - local user=root - local userUid=0 - - if $installNonrootUser; then - user="nonroot" - userUid=65532 - fi - - # Create container - docker build . \ - -t "$containerName" \ - -f dockerfiles/dockerfile-new-image \ - --build-arg MARINER_VERSION="$marinerVersion" \ - --build-arg IMAGE_TYPE="$imageType" \ - --build-arg PACKAGES_TO_INSTALL="$packagesToInstall" \ - --build-arg PACKAGES_TO_HOLDBACK="$packagesToHoldback" \ - --build-arg USER="$user" \ - --build-arg USER_UID=$userUid \ - --no-cache \ - --progress=plain -} - -# Builds, Tests, and Publishes Distroless Golden Container Image. -# The first argument is the main package name i.e., component name (e.g., nodejs, azure-cli, postgresql, etc). -# The second argument is the image name i.e., container type (e.g., nodejs, azure-cli, postgres, etc). -# The third argument is the base container tag. -# The fourth argument is the set of packages to be installed in the image. -# The fifth argument is the set of packages to holdback from getting installed. -# The sixth argument is component version. -# The seventh argument is the passed in full containerName. -# The eighth argument is builder image to use in distroless test. -# The ninth argument is the flag to indicate whether to run the test or not. -function CreateDistrolessGoldenContainers { - local componentName=$1 - local containerType=$2 - local baseContainerTag=$3 - local packagesToInstall=$4 - local packagesToHoldback=$5 - local componentVersion=$6 - local containerName=$7 - local builderImage=$8 - local runTest=$9 - local containerTypeNoDash - - echo "------ Display Arguments ------" - echo "Component Name: -> $componentName" - echo "Container Type: -> $containerType" - echo "Base Container Tag: -> $baseContainerTag" - echo "Packages to Install: -> $packagesToInstall" - echo "Packages to Holdback: -> $packagesToHoldback" - echo "Component Version: -> $componentVersion" - echo "Container Name: -> $containerName" - echo "Run Test: -> $runTest" - - echo "+++ create distroless container for $componentName" - containerTypeNoDash=${containerType//-/} # Removes dash from containerType. Ex: azure-cli -> azurecli - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $containerName" - - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - echo " -> Modified Container Name: $containerName" - fi - - standardContainerName="$containerName:$componentVersion-cm$base_container_tag" - debugContainerName="$containerName:$componentVersion-debug-cm$base_container_tag" - nonrootContainerName="$containerName:$componentVersion-nonroot-cm$base_container_tag" - debugNonrootContainerName="$containerName:$componentVersion-debug-nonroot-cm$base_container_tag" - - marinara="marinara" - marinaraSrcDir="$TEMPDIR/$marinara-src" - git clone "https://github.com/microsoft/$marinara.git" "$marinaraSrcDir" - pushd "$marinaraSrcDir" - - # replace base container registry prefix by golden container registry prefix (if any) - if [[ -n $baseRegistryPrefix ]]; then - # add base container registry prefix to MARINARA - MARINARA_IMAGE=$CONTAINER_REGISTRY_NAME_FULL/$baseRegistryPrefix/$marinara:$baseContainerTag - else - MARINARA_IMAGE=$CONTAINER_REGISTRY_NAME_FULL/$marinara:$baseContainerTag - fi - echo "MARINARA_IMAGE -> $MARINARA_IMAGE" - - # Get Mariner version from base container tag - OLDIFS=$IFS - IFS='.' - read -ra tag_parts <<< "$baseContainerTag" - IFS=$OLDIFS - - mariner_version="${tag_parts[0]}.0" - - # Update dockerfile-marinara to use the current base container - sed -E "s|^FROM .*builder$|FROM $MARINARA_IMAGE as builder|g" -i "dockerfiles/dockerfile-new-image" - - # Create standard container - DockerBuild "$standardContainerName" "$mariner_version" "custom" "$packagesToInstall" "$packagesToHoldback" false - - # Create debug container - DockerBuild "$debugContainerName" "$mariner_version" "custom-debug" "$packagesToInstall" "$packagesToHoldback" false - - # Create nonroot container - DockerBuild "$nonrootContainerName" "$mariner_version" "custom-nonroot" "$packagesToInstall" "$packagesToHoldback" true - - # Create debug nonroot container - DockerBuild "$debugNonrootContainerName" "$mariner_version" "custom-debug-nonroot" "$packagesToInstall" "$packagesToHoldback" true - - popd > /dev/null - - echo "+++ remove $marinaraSrcDir" - sudo rm -rf "$marinaraSrcDir" - - # Test image - if [ $runTest -ne 0 ]; then - test_distroless_container "$containerTypeNoDash-distroless" "$builderImage" "$standardContainerName" - test_distroless_container "$containerTypeNoDash-distroless" "$builderImage" "$debugContainerName" - test_distroless_container "$containerTypeNoDash-distroless" "$builderImage" "$nonrootContainerName" - test_distroless_container "$containerTypeNoDash-distroless" "$builderImage" "$debugNonrootContainerName" - fi - - # Publish containers - publish_container "$standardContainerName" - publish_container "$debugContainerName" - publish_container "$nonrootContainerName" - publish_container "$debugNonrootContainerName" - - # Save containers names - { - echo "$standardContainerName"; - echo "$debugContainerName"; - echo "$nonrootContainerName"; - echo "$debugNonrootContainerName"; - } >> "$TEMPDIR/$file_name_prefix-$containerTypeNoDash$file_ext" - echo "----------------------------------------------------------------------" - - save_container_list -} - -function getPkgsFromFile() { - local folderName=$1 - local fileName=$2 - local -n array=$3 - while read -r pkg; do - array+=("$pkg") - done < "$CONTAINER_SRC_DIR/$folderName/$fileName" -} - -# Creates azurecli container -function create_azurecli_container { - local pkgsFileName="$AZURECLI_NO_DASH.pkg" - local packagesToInstall=() - getPkgsFromFile $AZURECLI_NO_DASH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$AZURECLI" \ - "$AZURECLI" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-AzureCLI" \ - 1 \ - "$base_container_name/$AZURECLI" -} - -# Creates memcached container -function create_memcached_container { - local pkgsFileName="$MEMCACHED.pkg" - local packagesToInstall=() - getPkgsFromFile $MEMCACHED $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$MEMCACHED" \ - "$MEMCACHED" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Memcached" \ - 1 \ - "$base_container_name/$MEMCACHED" -} - -# Creates nginx container -function create_nginx_container { - local pkgsFileName="$NGINX.pkg" - local packagesToInstall=() - getPkgsFromFile $NGINX $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$NGINX" \ - "$NGINX" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Nginx" \ - 1 \ - "$base_container_name/$NGINX" -} - -# Creates nodejs container -function create_nodejs_container { - local nodejsPkgsFileName="$NODEJS.pkg" - local packagesToInstall=() - getPkgsFromFile $NODEJS $nodejsPkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$NODEJS" \ - "$NODEJS" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Nodejs" \ - 1 \ - "$base_container_name/$NODEJS" - - local packagesToInstallInDistrolessNodejs=('distroless-packages-base' 'nodejs') - local packagesInDistrolessNodejs="${packagesToInstallInDistrolessNodejs[*]}" - - local packagesToHoldbackInDistrolessNodejs=('bash' 'bzi' 'coreutils' 'gmp' 'grep' 'libselinux' 'pcre' 'pcre-libs') - local holdbackInDistroless="${packagesToHoldbackInDistrolessNodejs[*]}" - - componentVersion=${COMPONENT_VERSIONS[$NODEJS]} - builderImage=${BUILDER_IMAGES[$NODEJS]} - CreateDistrolessGoldenContainers \ - "$NODEJS" \ - "$NODEJS" \ - "$base_container_tag" \ - "$packagesInDistrolessNodejs" \ - "$holdbackInDistroless" \ - "$componentVersion" \ - "$CONTAINER_REGISTRY_NAME_FULL/distroless/$NODEJS" \ - "$builderImage" \ - 1 - - local nodejs18PkgsFileName="${NODEJS}18.pkg" - local packagesToInstall18=() - getPkgsFromFile $NODEJS $nodejs18PkgsFileName packagesToInstall18 - local packages18="${packagesToInstall18[*]}" - CreateGoldenContainer \ - "${NODEJS}18" \ - "$NODEJS" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages18" \ - "Dockerfile-Nodejs" \ - 1 \ - "$base_container_name/$NODEJS" - - - local packagesToInstallInDistrolessNodejs18=('distroless-packages-base' 'nodejs18') - local packagesInDistrolessNodejs18="${packagesToInstallInDistrolessNodejs18[*]}" - componentVersion=${COMPONENT_VERSIONS[$NODEJS]} - builderImage=${BUILDER_IMAGES[${NODEJS}18]} - CreateDistrolessGoldenContainers \ - "${NODEJS}18" \ - "$NODEJS" \ - "$base_container_tag" \ - "$packagesInDistrolessNodejs18" \ - "$holdbackInDistroless" \ - "$componentVersion" \ - "$CONTAINER_REGISTRY_NAME_FULL/distroless/$NODEJS" \ - "$builderImage" \ - 1 -} - -# Creates php container -function create_php_container { - local pkgsFileName="$PHP.pkg" - local packagesToInstall=() - getPkgsFromFile $PHP $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$PHP" \ - "$PHP" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-PHP" \ - 1 \ - "$base_container_name/$PHP" -} - -# Creates python container -function create_python_container { - local pkgsFileName="$PYTHON.pkg" - local packagesToInstall=() - getPkgsFromFile $PYTHON $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$PYTHON" \ - "$PYTHON" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Python" \ - 1 \ - "$base_container_name/$PYTHON" - - local packagesToInstallInDistroless=('distroless-packages-base' 'python3') - local packagesInDistroless="${packagesToInstallInDistroless[*]}" - - local packagesToHoldbackInDistroless=('bash' 'grep' 'coreutils' 'gmp' 'libselinux' 'pcre' 'pcre-libs') - local holdbackInDistroless="${packagesToHoldbackInDistroless[*]}" - - componentVersion=${COMPONENT_VERSIONS[$PYTHON]} - builderImage=${BUILDER_IMAGES[$PYTHON]} - CreateDistrolessGoldenContainers \ - "$PYTHON" \ - "$PYTHON" \ - "$base_container_tag" \ - "$packagesInDistroless" \ - "$holdbackInDistroless" \ - "$componentVersion" \ - "$CONTAINER_REGISTRY_NAME_FULL/distroless/$PYTHON" \ - "$builderImage" \ - 0 -} - -# Creates pytorch container -function create_pytorch_container { - local pkgsFileName="$PYTORCH.pkg" - local packagesToInstall=() - getPkgsFromFile $PYTORCH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "${PYTHON}3-$PYTORCH" \ - "$PYTORCH" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Pytorch" \ - 1 \ - "$base_container_name/$PYTORCH" -} - -# Creates rabbitmq-server container -function create_rabbitmqserver_container { - local pkgsFileName="$RABBITMQSERVER_NO_DASH.pkg" - local packagesToInstall=() - getPkgsFromFile $RABBITMQSERVER_NO_DASH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$RABBITMQSERVER" \ - "$RABBITMQSERVER" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-rabbitmq-server" \ - 0 \ - "$base_container_name/$RABBITMQSERVER" -} - -# Creates ruby container -function create_ruby_container { - # Packages already installed in base mariner -> readline, zlib, bzip2. - # Replacement ruby runtime dependency: - # musl -> glibc, kernel-headers, binutils; no musl rpm in PMC. - local pkgsFileName="$RUBY.pkg" - local packagesToInstall=() - getPkgsFromFile $RUBY $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$RUBY" \ - "$RUBY" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Ruby" \ - 1 \ - "$base_container_name/$RUBY" -} - -# Creates rust container -function create_rust_container { - local pkgsFileName="$RUST.pkg" - local packagesToInstall=() - getPkgsFromFile $RUST $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$RUST" \ - "$RUST" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Rust" \ - 0 \ - "$base_container_name/$RUST" -} - -# Creates postgres container -function create_postgres_container { - local pkgsFileName="$POSTGRES.pkg" - local packagesToInstall=() - getPkgsFromFile $POSTGRES $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "${POSTGRES}ql" \ - "$POSTGRES" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Postgres" \ - 1 \ - "$base_container_name/$POSTGRES" -} - -# Creates InfluxDB container -function create_influxdb_container { - local pkgsFileName="$INFLUX_DB.pkg" - local packagesToInstall=() - getPkgsFromFile $INFLUX_DB $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$INFLUX_DB" \ - "$INFLUX_DB" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Influxdb" \ - 1 \ - "$base_container_name/$INFLUX_DB" -} - -# Creates prometheus container -function create_prometheus_container { - local pkgsFileName="$PROMETHEUS.pkg" - local packagesToInstall=() - getPkgsFromFile $PROMETHEUS $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$PROMETHEUS" \ - "$PROMETHEUS" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Prometheus" \ - 1 \ - "$base_container_name/$PROMETHEUS" - - local packagesToInstallInDistroless=('distroless-packages-base' 'prometheus') - local packagesInDistroless="${packagesToInstallInDistroless[*]}" - - # Potentially extraneous, can be investigated more. - local packagesToHoldbackInDistroless=('bash' 'grep' 'coreutils' 'gmp' 'libselinux' 'pcre' 'pcre-libs') - local holdbackInDistroless="${packagesToHoldbackInDistroless[*]}" - - componentVersion=${COMPONENT_VERSIONS["$PROMETHEUS"]} - builderImage=${BUILDER_IMAGES[$PROMETHEUS]} - CreateDistrolessGoldenContainers \ - "$PROMETHEUS" \ - "$PROMETHEUS" \ - "$base_container_tag" \ - "$packagesInDistroless" \ - "$holdbackInDistroless" \ - "$componentVersion" \ - "$CONTAINER_REGISTRY_NAME_FULL/distroless/$PROMETHEUS" \ - "$builderImage" \ - 0 -} - -function create_redis_container { - local pkgsFileName="$REDIS.pkg" - local packagesToInstall=() - getPkgsFromFile $REDIS $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$REDIS" \ - "$REDIS" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Redis" \ - 1 \ - "$base_container_name/$REDIS" -} - -# Creates prometheus-adapter container -function create_prometheus_adapter_container { - local pkgsFileName="$PROMETHEUS_ADAPTER_NO_DASH.pkg" - local packagesToInstall=() - getPkgsFromFile $PROMETHEUS_ADAPTER_NO_DASH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$PROMETHEUS_ADAPTER" \ - "$PROMETHEUS_ADAPTER" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Prometheus-Adapter" \ - 0 \ - "$base_container_name/$PROMETHEUS_ADAPTER" - - local packagesToInstallInDistroless=('distroless-packages-base' 'prometheus-adapter') - local packagesInDistroless="${packagesToInstallInDistroless[*]}" - - # Potentially extraneous, can be investigated more. - local packagesToHoldbackInDistroless=('bash' 'grep' 'coreutils' 'gmp' 'libselinux' 'pcre' 'pcre-libs') - local holdbackInDistroless="${packagesToHoldbackInDistroless[*]}" - - componentVersion=${COMPONENT_VERSIONS["$PROMETHEUS_ADAPTER"]} - builderImage=${BUILDER_IMAGES[$PROMETHEUS_ADAPTER]} - CreateDistrolessGoldenContainers \ - "$PROMETHEUS_ADAPTER" \ - "$PROMETHEUS_ADAPTER" \ - "$base_container_tag" \ - "$packagesInDistroless" \ - "$holdbackInDistroless" \ - "$componentVersion" \ - "$CONTAINER_REGISTRY_NAME_FULL/distroless/$PROMETHEUS_ADAPTER" \ - "$builderImage" \ - 0 -} - -# Creates telegraf container -function create_telegraf_container { - local pkgsFileName="$TELEGRAF.pkg" - local packagesToInstall=() - getPkgsFromFile $TELEGRAF $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$TELEGRAF" \ - "$TELEGRAF" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Telegraf" \ - 1 \ - "$base_container_name/$TELEGRAF" -} - -# Creates tensorflow container -function create_tensorflow_container { - local pkgsFileName="$TENSORFLOW.pkg" - local packagesToInstall=() - getPkgsFromFile $TENSORFLOW $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "python3-${TENSORFLOW}" \ - "$TENSORFLOW" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Tensorflow" \ - 1 \ - "$base_container_name/$TENSORFLOW" -} - -# Creates openmpi container -function create_openmpi_container { - local pkgsFileName="$OPENMPI.pkg" - local packagesToInstall=() - getPkgsFromFile $OPENMPI $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - CreateGoldenContainer \ - "$OPENMPI" \ - "$OPENMPI" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-Openmpi" \ - 1 \ - "$base_container_name/$OPENMPI" -} - -# ---- Mariner HCI IMAGES ---- - -# Creates Cdi containers -function create_cdi_containers { - source $CONTAINER_SRC_DIR/scripts/BuildCdiContainers.sh - create_cdi_subcomp_containers -} - -# Creates Cert-Manager containers -function create_cert_manager_containers { - source $CONTAINER_SRC_DIR/scripts/BuildCertManagerContainers.sh - create_cert_manager_subcomp_containers -} - -# Create containers for each of the kubevirt sub components - -# virt-operator, virt-api, virt-handler, virt-launcher, virt-controller -function create_kubevirt_containers { - source $CONTAINER_SRC_DIR/scripts/BuildKubevirtContainers.sh - create_kubevirt_subcomp_containers -} - -# Create Multus container -function create_multus_container_helper { - source $CONTAINER_SRC_DIR/scripts/BuildMultusContainer.sh - create_multus_container -} - -# Create Sriov network device plugin container -function create_sriov_dp_containers { - source $CONTAINER_SRC_DIR/scripts/BuildSriovDpContainer.sh - create_sriov_dp_container -} - -function start_building_containers { - case $GOLDEN_CONTAINER_IMAGE in - - "$AZURECLI_NO_DASH") - create_azurecli_container - ;; - - "$MEMCACHED") - create_memcached_container - ;; - - "$NGINX") - create_nginx_container - ;; - - "$NODEJS") - create_nodejs_container - ;; - - "$PHP") - create_php_container - ;; - - "$PYTHON") - create_python_container - ;; - - "$RABBITMQSERVER_NO_DASH") - create_rabbitmqserver_container - ;; - - "$REDIS") - create_redis_container - ;; - - "$RUBY") - create_ruby_container - ;; - - "$RUST") - create_rust_container - ;; - - "$POSTGRES") - create_postgres_container - ;; - - "$INFLUX_DB") - create_influxdb_container - ;; - - "$PROMETHEUS") - create_prometheus_container - ;; - - "$PROMETHEUS_ADAPTER_NO_DASH") - create_prometheus_adapter_container - ;; - - "$PYTORCH") - create_pytorch_container - ;; - - "$TELEGRAF") - create_telegraf_container - ;; - - "$TENSORFLOW") - create_tensorflow_container - ;; - - "$OPENMPI") - create_openmpi_container - ;; - - "$CDI_BASE_COMPONENT") - create_cdi_containers - ;; - - "$CERT_MANAGER_NO_DASH") - create_cert_manager_containers - ;; - - "$KUBEVIRT_BASE_COMPONENT") - create_kubevirt_containers - ;; - - "$MULTUS") - create_multus_container_helper - ;; - - "$SRIOV_NETWORK_DEVICE_PLUGIN_NO_DASH") - create_sriov_dp_containers - ;; - esac -} - -# source the CommonFunctions script to get the following functions: -# - azure_login -# - generate_container_sbom -# - SetDockerDefaultStorageLocation -# - ResetDockerDefaultStorageLocation -# - save_container_list -# - test_golden_container -# - publish_container -# - getRegistryPrefix -source $CONTAINER_SRC_DIR/scripts/CommonFunctions.sh - -input_validation -read_base_container_name -azure_login "$base_container_acr" - -# Create a variable to store the value of whether GOLDEN_CONTAINER_IMAGE is an HCI image -export IS_HCI_IMAGE=false -checkIfHciImage IS_HCI_IMAGE -echo "Is this an HCI Image: $IS_HCI_IMAGE" - -start_building_containers diff --git a/.pipelines/containerSourceData/scripts/BuildGoldenDistrolessContainer.sh b/.pipelines/containerSourceData/scripts/BuildGoldenDistrolessContainer.sh new file mode 100644 index 00000000000..0c287ea493a --- /dev/null +++ b/.pipelines/containerSourceData/scripts/BuildGoldenDistrolessContainer.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +set -e + +function DockerBuild { + local containerName=$1 + local azureLinuxVersion=$2 + local imageType=$3 + local packagesToInstall=$4 + local packagesToHoldback=$5 + local installNonrootUser=$6 + local rpmsDir=$7 + local user=root + local userUid=0 + + if $installNonrootUser; then + user="nonroot" + userUid=65532 + fi + + # Create container + echo "+++ Create container $containerName" + docker build . \ + -t "$containerName" \ + -f "$marinaraSrcDir/dockerfiles/dockerfile-new-image" \ + --build-arg AZL_VERSION="$azureLinuxVersion" \ + --build-arg IMAGE_TYPE="$imageType" \ + --build-arg PACKAGES_TO_INSTALL="$packagesToInstall" \ + --build-arg PACKAGES_TO_HOLDBACK="$packagesToHoldback" \ + --build-arg USER="$user" \ + --build-arg USER_UID=$userUid \ + --build-arg RPMS="$rpmsDir" \ + --build-arg LOCAL_REPO_FILE="$marinaraSrcDir/local.repo" \ + --no-cache \ + --progress=plain +} + +function create_distroless_container { + echo "+++ Create distroless container" + + distrolessPkgsFile="$CONTAINER_SRC_DIR/$IMAGE/distroless/$PACKAGE_FILE" + DISTROLESS_PACKAGES_TO_INSTALL=$(paste -s -d' ' < "$distrolessPkgsFile") + distrolessPkgsHoldbackFile="$CONTAINER_SRC_DIR/$IMAGE/distroless/holdback-$PACKAGE_FILE" + DISTROLESS_PACKAGES_TO_HOLD_BACK=$(paste -s -d' ' < "$distrolessPkgsHoldbackFile") + echo "Distroless Packages to install -> $DISTROLESS_PACKAGES_TO_INSTALL" + echo "Distroless Packages to hold back -> $DISTROLESS_PACKAGES_TO_HOLD_BACK" + + DISTROLESS_GOLDEN_IMAGE_NAME=${GOLDEN_IMAGE_NAME//base/distroless} + standardContainerName="$DISTROLESS_GOLDEN_IMAGE_NAME:$COMPONENT_VERSION-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + debugContainerName="$DISTROLESS_GOLDEN_IMAGE_NAME:$COMPONENT_VERSION-debug-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + nonrootContainerName="$DISTROLESS_GOLDEN_IMAGE_NAME:$COMPONENT_VERSION-nonroot-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + debugNonrootContainerName="$DISTROLESS_GOLDEN_IMAGE_NAME:$COMPONENT_VERSION-debug-nonroot-$DISTRO_IDENTIFIER$BASE_IMAGE_TAG" + + marinara="marinara" + marinaraSrcDir="$marinara-src" + + echo "+++ Clone marinara repo" + git clone "https://github.com/microsoft/$marinara.git" "$WORK_DIR/$marinaraSrcDir" + + # It is important to operate from the $WORK_DIR to ensure that docker can access the files. + pushd "$WORK_DIR" > /dev/null + + MARINARA_IMAGE=${BASE_IMAGE_NAME_FULL/base\/core/$marinara} + echo "MARINARA_IMAGE -> $MARINARA_IMAGE" + + sed -E "s|^FROM .*builder$|FROM $MARINARA_IMAGE as builder|g" -i "$marinaraSrcDir/dockerfiles/dockerfile-new-image" + + # WORK_DIR has a directory named 'Stage' which created inside prepare_docker_directory function + # This directory has a directory named RPMS which contains the RPMs to be installed in the container. + # The path to rpms is /Stage/RPMS + rpmsPath="/Stage/RPMS" + + # Create standard container + DockerBuild \ + "$standardContainerName" \ + "$AZURE_LINUX_VERSION" \ + "custom" \ + "$DISTROLESS_PACKAGES_TO_INSTALL" \ + "$DISTROLESS_PACKAGES_TO_HOLD_BACK" \ + false \ + "$rpmsPath" + + # Create debug container + DockerBuild \ + "$debugContainerName" \ + "$AZURE_LINUX_VERSION" \ + "custom-debug" \ + "$DISTROLESS_PACKAGES_TO_INSTALL" \ + "$DISTROLESS_PACKAGES_TO_HOLD_BACK" \ + false \ + "$rpmsPath" + + # Create nonroot container + DockerBuild \ + "$nonrootContainerName" \ + "$AZURE_LINUX_VERSION" \ + "custom-nonroot" \ + "$DISTROLESS_PACKAGES_TO_INSTALL" \ + "$DISTROLESS_PACKAGES_TO_HOLD_BACK" \ + true \ + "$rpmsPath" + + # Create debug nonroot container + DockerBuild \ + "$debugNonrootContainerName" \ + "$AZURE_LINUX_VERSION" \ + "custom-debug-nonroot" \ + "$DISTROLESS_PACKAGES_TO_INSTALL" \ + "$DISTROLESS_PACKAGES_TO_HOLD_BACK" \ + true \ + "$rpmsPath" + + popd > /dev/null + + echo "+++ Save distroless container images to file PublishedContainers-$IMAGE.txt" + { + echo "$standardContainerName"; + echo "$debugContainerName"; + echo "$nonrootContainerName"; + echo "$debugNonrootContainerName"; + } >> "$OUTPUT_DIR/PublishedContainers-$IMAGE.txt" + + publish_to_acr "$standardContainerName" + publish_to_acr "$debugContainerName" + publish_to_acr "$nonrootContainerName" + publish_to_acr "$debugNonrootContainerName" +} diff --git a/.pipelines/containerSourceData/scripts/BuildKubevirtContainers.sh b/.pipelines/containerSourceData/scripts/BuildKubevirtContainers.sh deleted file mode 100755 index 47b57091281..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildKubevirtContainers.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -function create_kubevirt_container_image_base { - local componentName=$1 - local containerType=$2 - local baseContainerName=$3 - local baseContainerTag=$4 - local packagesToInstall=$5 - local goldenImageDockerfile=$6 - local originalContainerName=$7 - local containerTypeNoDash - - echo "------ Display Arguments ------" - echo "Component Name: -> $componentName" - echo "Container Type: -> $containerType" - echo "Base Container Name: -> $baseContainerName" - echo "Base Container Tag: -> $baseContainerTag" - echo "Packages to Install: -> $packagesToInstall" - echo "Dockerfile: -> $goldenImageDockerfile" - echo "Container Name: -> $originalContainerName" - - echo "+++ create container based on $baseContainerName/core:$baseContainerTag for $componentName" - containerTypeNoDash=${containerType//-/} - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $originalContainerName" - - local containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp "$CONTAINER_SRC_DIR/$KUBEVIRT_BASE_COMPONENT/$goldenImageDockerfile" "$containerBuildDir/Dockerfile" - - # Ensure that the path exists before copying files. - if [ -d "$CONTAINER_SRC_DIR/$KUBEVIRT_BASE_COMPONENT/configuration-files" ]; then - cp "$CONTAINER_SRC_DIR/$KUBEVIRT_BASE_COMPONENT/configuration-files"/* "$containerBuildDir" - fi - - pushd "$containerBuildDir" - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - - cat Dockerfile - - if [ "$DISABLE_DOCKER_REDIRECTION" != "true" ]; then - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - fi - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - -t "$originalContainerName" --no-cache --progress=plain \ - -f $containerBuildDir/Dockerfile . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - - local containerId - local installedPackage - local componentVersion - - containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - # exec as root as the default user for some containers is non-root - # componentName e.g. nodejs-16.16.0-1.cm2.x86_64 - installedPackage=$(docker exec -u 0 "$containerId" tdnf repoquery --installed "$componentName" | grep ^"$componentName") - echo "Full Installed Package: -> $installedPackage" - componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - docker rm -f "$containerId" - - # Rename the image to include package version - # For HCI Images, do not include "-cm" in the image tag; Instead use a "." - if $IS_HCI_IMAGE; then - # Example: acrafoimages.azurecr.io/base/kubevirt/virt-operator:0.59.0-2.2.0.20230607-amd64 - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - else - # Example: cblmarinermain.azurecr.io/base/nodejs:16.19.1-2-cm2.0.20230607-amd64 - local containerName="$originalContainerName:$componentVersion-cm$baseContainerTag" - fi - - # replace base container registry prefix by golden container registry prefix (if any) - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - # Publish image - publish_container "$containerName" - - local containerNameSanitized - containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - - if [[ "$DISABLE_SBOM_GENERATION" != "true" ]]; then - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - fi - popd - - if [ "$DISABLE_DOCKER_REDIRECTION" != "true" ]; then - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - fi - - sudo rm -rf "$newDockerStorageLocation" - - # Clean up temp folder - sudo rm -rf "$containerBuildDir" - - # Save container name - echo "$containerName" >> "$TEMPDIR/$file_name_prefix-$containerTypeNoDash$file_ext" - echo "----------------------------------------------------------------------" - - save_container_list -} - -function create_kubevirt_subcomp_containers { - # NOTE: qemu and edk2 are architecture specific packages. - # Include this if when edk2 is availble for ARM as well - # if [[ $CONTAINER_ARCHITECTURE == "*AMD64*" ]]; then - # else add virtlauncher_rpmsToInstall+=('qemu-system-aarch64') - - mkdir -p $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - local sub_components - sub_components=('virt-operator' 'virt-api' 'virt-controller' 'virt-handler' 'virt-launcher') - - for comp in ${sub_components[@]} - do - # To build for specific versions - include it here with the name - dependency_component=$KUBEVIRT_BASE_COMPONENT-$comp - local pkgsFileName="$comp.pkg" - local packagesToInstall=() - getPkgsFromFile $KUBEVIRT_BASE_COMPONENT $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - echo "packages to install " $packages - create_kubevirt_container_image_base \ - "$dependency_component" \ - "$comp" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "Dockerfile-$dependency_component" \ - "$CONTAINER_REGISTRY_NAME_FULL/base/$KUBEVIRT_FOLDER_PREFIX/$comp" - done -} diff --git a/.pipelines/containerSourceData/scripts/BuildMultusContainer.sh b/.pipelines/containerSourceData/scripts/BuildMultusContainer.sh deleted file mode 100644 index 8ac9670b71b..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildMultusContainer.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -function create_multus_container_image_base { - local componentName - local baseContainerName - local baseContainerTag - local containerBuildDir - local initialDockerfile - local packagesToInstall - - # $1: component name - # $2: container name - # $3: container tag - # $4: packages to install - # $5: initial Dockerfile - componentName=$1 - baseContainerName=$2 - baseContainerTag=$3 - packagesToInstall=$4 - initialDockerfile=$5 - - local originalContainerName="$CONTAINER_REGISTRY_NAME_FULL/base/$componentName" - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $containerName" - - containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp $initialDockerfile $containerBuildDir/Dockerfile - - pushd $containerBuildDir > /dev/null - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - -t "$originalContainerName" --no-cache --progress=plain . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - - local containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - local installedPackage=$(docker exec "$containerId" rpm -qa | grep ^"$componentName") # nodejs-16.16.0-1.cm2.x86_64 - echo "Full Installed Package: -> $installedPackage" - local componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - docker rm -f "$containerId" - - # Rename the image to include package version - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - # replace base container registry prefix by golden container registry prefix (if any) - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - local containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - - publish_container "$containerName" - - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - - echo "$containerName" >> $TEMPDIR/$file_name_prefix-$componentName$file_ext - - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Clean up docker storage folder - sudo rm -rf "$newDockerStorageLocation" - - # clean up temp folder - popd > /dev/null - sudo rm -rf $containerBuildDir - - echo "----------------------------------------------------------------------" -} - -function create_multus_container { - mkdir -p $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - local dependency_component=$MULTUS - local pkgsFileName="$MULTUS.pkg" - local packagesToInstall=() - getPkgsFromFile $MULTUS $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - - echo "+++ create container based on $base_container_name:$base_container_tag for $dependency_component" - create_multus_container_image_base \ - "$dependency_component" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "$CONTAINER_SRC_DIR/$MULTUS/Dockerfile-Multus" - - # Save text files generated in TEMPDIR - echo "+++ publish container list into pipeline artifacts" - cp $TEMPDIR/$file_name_prefix-*$file_ext $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - -} \ No newline at end of file diff --git a/.pipelines/containerSourceData/scripts/BuildSriovDpContainer.sh b/.pipelines/containerSourceData/scripts/BuildSriovDpContainer.sh deleted file mode 100644 index 83ef625e87f..00000000000 --- a/.pipelines/containerSourceData/scripts/BuildSriovDpContainer.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -function create_sriov_dp_container_image_base { - local componentName - local baseContainerName - local baseContainerTag - local containerBuildDir - local initialDockerfile - local packagesToInstall - - # $1: component name - # $2: container name - # $3: container tag - # $4: packages to install - # $5: initial Dockerfile - componentName=$1 - baseContainerName=$2 - baseContainerTag=$3 - packagesToInstall=$4 - initialDockerfile=$5 - - local originalContainerName="$CONTAINER_REGISTRY_NAME_FULL/base/$componentName" - - echo - echo "----------------------------------------------------------------------" - echo "+++ create container $containerName" - - containerBuildDir="$TEMPDIR/ContainerBuildDir" - hostMountedDir="$TEMPDIR/ContainerBuildDir/Stage" - newDockerStorageLocation="$TEMPDIR/storage" - - mkdir -p "$containerBuildDir" - mkdir -p "$hostMountedDir" - mkdir -p "$newDockerStorageLocation" - - # Copy files into docker context directory - tar -xf "$MARINER_RPMS_TARBALL" -C "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/marinerLocalRepo.repo" "$hostMountedDir"/ - cp "$CONTAINER_SRC_DIR/Dockerfile-Initial" "$containerBuildDir/Dockerfile-Initial" - cp $initialDockerfile $containerBuildDir/Dockerfile - - pushd $containerBuildDir > /dev/null - - # set Dockerfile - echo "+++ Updating Dockerfile" - mainRunInstruction=$(cat Dockerfile-Initial) - sed -E "s|@INCLUDE_MAIN_RUN_INSTRUCTION@|$mainRunInstruction|g" -i Dockerfile - - SetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Build image - docker buildx build \ - --build-arg BASE_IMAGE="$baseContainerName/core:$baseContainerTag" \ - --build-arg RPMS_TO_INSTALL="$packagesToInstall" \ - -t "$originalContainerName" --no-cache --progress=plain . - - # Get the installed package's version - echo "+++ Get version of the installed package in the container" - - local containerId=$(docker run --entrypoint /bin/bash -dt "$originalContainerName") - local installedPackage=$(docker exec "$containerId" rpm -qa | grep ^"$componentName") # nodejs-16.16.0-1.cm2.x86_64 - echo "Full Installed Package: -> $installedPackage" - local componentVersion=$(echo "$installedPackage" | awk '{n=split($0,a,"-")};{split(a[n],b,".")}; {print a[n-1]"-"b[1]}') # 16.16.0-1 - echo "Component Version -> $componentVersion" - docker rm -f "$containerId" - - # Rename the image to include package version - local containerName="$originalContainerName:$componentVersion.$baseContainerTag" - # replace base container registry prefix by golden container registry prefix (if any) - local baseRegistryPrefix="" - local goldenRegistryPrefix="" - getRegistryPrefix 'base' $PUBLISHING_LEVEL $BRANCH_NAME baseRegistryPrefix - getRegistryPrefix $GOLDEN_CONTAINER_IMAGE $PUBLISHING_LEVEL $BRANCH_NAME goldenRegistryPrefix - if [[ -n $goldenRegistryPrefix ]]; then - if [[ -n $baseRegistryPrefix && \ - $containerName == *"$baseRegistryPrefix"* ]]; then - # replace base container registry prefix by golden container registry prefix - echo "replace $baseRegistryPrefix with $goldenRegistryPrefix in $containerName" - containerName=${containerName/"$baseRegistryPrefix"/"$goldenRegistryPrefix"} - else - # add golden container registry prefix - echo "add $goldenRegistryPrefix prefix to $containerName" - containerName=${containerName/"$CONTAINER_REGISTRY_NAME_FULL"/"$CONTAINER_REGISTRY_NAME_FULL/$goldenRegistryPrefix"} - fi - fi - - docker image tag "$originalContainerName" "$containerName" - docker rmi -f "$originalContainerName" - echo "Container Name: -> $containerName" - - local containerNameSanitized=$(echo "$containerName" | tr '/' '-' | tr ':' '_') - - publish_container "$containerName" - - # Call generate_container_sbom function to generate SBOM - generate_container_sbom \ - "$componentName" \ - "$baseContainerName" \ - "$baseContainerTag" \ - "$containerName" \ - "$componentVersion" \ - "$containerNameSanitized" - - echo "$containerName" >> $TEMPDIR/$file_name_prefix-$componentName$file_ext - - ResetDockerDefaultStorageLocation "$newDockerStorageLocation" - - # Clean up docker storage folder - sudo rm -rf "$newDockerStorageLocation" - - # clean up temp folder - popd > /dev/null - sudo rm -rf $containerBuildDir - - echo "----------------------------------------------------------------------" -} - -function create_sriov_dp_container { - mkdir -p $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER - local dependency_component=$SRIOV_NETWORK_DEVICE_PLUGIN - local pkgsFileName="$SRIOV_NETWORK_DEVICE_PLUGIN_NO_DASH.pkg" - local packagesToInstall=() - getPkgsFromFile $SRIOV_NETWORK_DEVICE_PLUGIN_NO_DASH $pkgsFileName packagesToInstall - local packages="${packagesToInstall[*]}" - - echo "+++ create container based on $base_container_name:$base_container_tag for $dependency_component" - create_sriov_dp_container_image_base \ - "$dependency_component" \ - "$base_container_name" \ - "$base_container_tag" \ - "$packages" \ - "$CONTAINER_SRC_DIR/$SRIOV_NETWORK_DEVICE_PLUGIN_NO_DASH/Dockerfile-sriov-network-device-plugin" - - # Save text files generated in TEMPDIR - echo "+++ publish container list into pipeline artifacts" - cp $TEMPDIR/$file_name_prefix-*$file_ext $OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER -} \ No newline at end of file diff --git a/.pipelines/containerSourceData/scripts/CommonFunctions.sh b/.pipelines/containerSourceData/scripts/CommonFunctions.sh deleted file mode 100755 index 0b06d2990bb..00000000000 --- a/.pipelines/containerSourceData/scripts/CommonFunctions.sh +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# Logs into Azure -function azure_login { - # Note this script assumes that az login has already been done - echo " -> login into ACR $1" - az acr login --name "$1" -} - -# Builds SBOM -function generate_container_sbom { - local component_name=$1 - local base_container_name=$2 - local base_container_tag=$3 - local container_name=$4 - local component_version_revision=$5 - local container_name_sanitized=$6 - - echo - echo "=====================================================================" - echo "Generate SBOM for containers $container_image_name" - echo "=====================================================================" - echo - - DOCKER_BUILD_DIR="$(pwd)" - - # generate-container-sbom.sh will create the SBOM at the following path - IMAGE_SBOM_MANIFEST_PATH="$DOCKER_BUILD_DIR/_manifest/spdx_2.2/manifest.spdx.json" - "$ROOT_FOLDER"/.pipelines/generate-container-sbom.sh \ - "$DOCKER_BUILD_DIR" \ - "$container_name" \ - "$MANIFEST_TOOL_DIR" \ - "$base_container_name-$component_name" \ - "$component_version_revision-cm$base_container_tag" - - cp -v "$IMAGE_SBOM_MANIFEST_PATH" "$OUTPUT_FOLDER/SBOM_IMAGES/$container_name_sanitized.spdx.json" - echo "Generated SBOM:'$OUTPUT_FOLDER/SBOM_IMAGES/$container_name_sanitized.spdx.json'" -} - -readonly DOCKER_DAEMON_JSON_FILE="/etc/docker/daemon.json" -readonly DOCKER_DAEMON_JSON_BACKUP_FILE="/etc/docker/daemon.json.cfbackup" - -# Sets the docker storage location to a user provided path which larger disk space -function SetDockerDefaultStorageLocation { - local newLocation=$1 - echo "Change docker default storage location" - echo "Default docker storage location" - sudo systemctl start docker - docker info | grep "Docker Root Dir" - - echo "Stop docker" - sudo systemctl stop docker.service - sudo systemctl stop docker.socket - - ls -lR /etc/docker - - # Do not clobber existing backup to not accidentally overwrite the valid backup - if [ ! -f $DOCKER_DAEMON_JSON_BACKUP_FILE ] && [ -f $DOCKER_DAEMON_JSON_FILE ]; then - echo "Backup daemon.json" - sudo cp $DOCKER_DAEMON_JSON_FILE $DOCKER_DAEMON_JSON_BACKUP_FILE - fi - - echo "Copy data-root property to daemon.json" - echo "{ \"data-root\": \"${newLocation}\" }" > daemon.json - - echo "Display daemon.json" - sudo cat daemon.json - - echo "Copy daemon.json to docker" - sudo cp daemon.json $DOCKER_DAEMON_JSON_FILE - - mkdir -p "${newLocation}" - - echo "Restart docker" - sudo systemctl daemon-reload - sudo systemctl start docker - - echo "New docker storage location" - docker info | grep "Docker Root Dir" - - echo "--------------------------------------------" -} - -# Resets the docker storage location from backup -function ResetDockerDefaultStorageLocation { - local currentLocation=$1 - echo "Reset docker default storage location" - echo "Stop docker" - sudo systemctl stop docker.service - sudo systemctl stop docker.socket - - echo "Recovering daemon.json from backup" - if [ -f $DOCKER_DAEMON_JSON_BACKUP_FILE ]; then - sudo mv $DOCKER_DAEMON_JSON_BACKUP_FILE $DOCKER_DAEMON_JSON_FILE - else - sudo rm $DOCKER_DAEMON_JSON_FILE - fi - - echo "Restart docker" - sudo systemctl daemon-reload - sudo systemctl start docker - - echo "New docker storage location" - docker info | grep "Docker Root Dir" -} - -# Saves the container list in folder named CONTAINER_LISTS_FOLDER -function save_container_list { - # Save text files generated in TEMPDIR - echo - echo "=====================================================================" - echo "Publish container list into pipeline artifacts" - echo "=====================================================================" - echo - - mkdir -pv "$OUTPUT_FOLDER/CONTAINER_LISTS_FOLDER" - cp "$TEMPDIR"/$file_name_prefix-*$file_ext "$OUTPUT_FOLDER"/CONTAINER_LISTS_FOLDER -} - -# Tests golden container -function test_golden_container { - local container_type=$1 - local container_image_name=$2 - - echo - echo "=====================================================================" - echo "Test container $container_image_name" - echo "=====================================================================" - echo - - "$ROOT_FOLDER/pipelines/test-golden-image-pipeline/test-source-artifacts/$container_type/TestRunner.sh" \ - -n "$container_image_name" \ - -o "$PWD" -} - -function test_distroless_container { - local test_dir_name=$1 - local builder_image=$2 - local container_image_name=$3 - - echo - echo "=====================================================================" - echo "Test container $container_image_name" - echo "=====================================================================" - echo - - "$ROOT_FOLDER/pipelines/test-golden-image-pipeline/test-source-artifacts/$test_dir_name/TestRunner.sh" \ - -b "$builder_image" \ - -n "$container_image_name" \ - -o "$PWD" -} - -# Publishes the given golden container to azure container registry -function publish_container { - local container_name=$1 - echo - echo "=====================================================================" - echo "Publish container $container_name" - echo "=====================================================================" - echo - - previous_login="none" - OLDIFS=$IFS - IFS='.' - read -ra name_parts <<< "$container_name" - IFS=$OLDIFS - container_registry="${name_parts[0]}" - - if [[ "$previous_login" != "$container_registry" ]]; then - echo " -> login into ACR $container_registry" - az acr login --name "$container_registry" - previous_login=$container_registry - fi - - docker image push "$container_name" - echo -} - -# Checks if $GOLDEN_CONTAINER_IMAGE is an HCI image by looking at the config file. -# Assigns a boolean to the out variables. -# The caller must define ROOT_FOLDER and GOLDEN_CONTAINER_IMAGE. -function checkIfHciImage { - local __containerImageName=$1 # [out parameter] - local isHciImage=false - ACR_MAPPING_CONFIG_FILE="$ROOT_FOLDER/pipelines/publish-containers/common/configuration/acrRepoMapping.json" - marinerHciGoldenImagesArray=$(jq ".MarinerHciGoldenImages[]" "$ACR_MAPPING_CONFIG_FILE" | tr -d \") - for marinerHciGoldenImage in $marinerHciGoldenImagesArray; do - if [[ $marinerHciGoldenImage == "$GOLDEN_CONTAINER_IMAGE" ]]; then - isHciImage=true - break - fi - done - eval $__containerImageName=$isHciImage -} - -# get registry prefix (if any) -# Assigns a string to the out variables. -# The caller must define ROOT_FOLDER -function getRegistryPrefix { - local container_name=$1 - local publishingLevel=$2 - local gitBranch=$3 - local __registryPrefix=$4 # [out parameter] - local prefix="" - - local git_branch_json="" - local acr_repo_mapping_json="" - local image_json="" - - ACR_MAPPING_CONFIG_FILE="$ROOT_FOLDER/pipelines/publish-containers/common/configuration/acrRepoMapping.json" - eval $__registryPrefix=$prefix - - git_branch_json=$(jq ".gitBranches[]|select(.gitBranch == \"$gitBranch\")" "$ACR_MAPPING_CONFIG_FILE") - if [[ -z $git_branch_json ]]; then - echo "No branch tag '$gitBranch' in json ($ACR_MAPPING_CONFIG_FILE)" - return - fi - - acr_repo_mapping_json=$(echo $git_branch_json | jq ".acrRepoMapping[]|select(.publishingLevel == \"$publishingLevel\")") - if [[ -z $acr_repo_mapping_json ]]; then - echo "No publishing level '$publishingLevel' for branch '$gitBranch' in json ($ACR_MAPPING_CONFIG_FILE)" - return - fi - - image_json=$(echo $acr_repo_mapping_json | jq ".images[]|select(.name == \"$container_name\")") - if [[ -z $image_json ]]; then - echo "No container named '$container_name' for publishing level '$publishingLevel' for branch '$gitBranch' in json ($ACR_MAPPING_CONFIG_FILE)" - return - fi - - prefix=$(echo $image_json | jq .repoPrefix | tr -d \") - # reset registry prefix to "" if it is not defined in json (jq return 'null') - if [[ $prefix == "null" ]]; then - prefix="" - echo "No registry prefix for '$container_name' branch '$gitBranch' publishing level '$publishingLevel'" - else - echo "Registry prefix '$prefix' for '$container_name' branch '$gitBranch' publishing level '$publishingLevel'" - fi - - eval $__registryPrefix=$prefix -} \ No newline at end of file diff --git a/.pipelines/containerSourceData/scripts/PublishContainers.sh b/.pipelines/containerSourceData/scripts/PublishContainers.sh new file mode 100755 index 00000000000..ce6c560515a --- /dev/null +++ b/.pipelines/containerSourceData/scripts/PublishContainers.sh @@ -0,0 +1,412 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +set -e + +# This script is used to publish the multi-arch tags for the container images. +# Note that this script assumes that 'az login' has already been done. + +# CONTAINER_SRC_DIR is expected to contain the acrRepoParser.py script and the configuration file acrRepoV2.json. +# The acrRepoParser.py script is used to parse the ACR repository details from the acrRepoV2.json configuration file. +# The directory (assuming it is called container_artifacts) is expected to have the following structure: +# container_artifacts +# ├── configuration +# │ └── acrRepoV2.json +# └── scripts +# └── acrRepoParser.py + +# parse script parameters: +# -c -> Container pipelines' configuration directory (e.g. $(Build.SourcesDirectory)/.pipelines/container_artifacts) +# -d -> Directory containing the containers list +# -e -> Containers file name prefix +# -f -> Containers file name suffix +# -g -> GitHub branch +# -p -> Publishing level +# -o -> Output folder +while getopts ":c:d:e:f:g:p:o:" OPTIONS; do + case ${OPTIONS} in + c ) CONTAINER_SRC_DIR=$OPTARG;; + d ) PUBLISHED_CONTAINERS_DIR=$OPTARG;; + e ) PUBLISHED_CONTAINER_FILE_PREFIX=$OPTARG;; + f ) PUBLISHED_CONTAINER_FILE_SUFFIX=$OPTARG;; + g ) GITHUB_BRANCH=$OPTARG;; + p ) PUBLISHING_LEVEL=$OPTARG;; + o ) OUTPUT_FOLDER=$OPTARG;; + + \? ) + echo "Error - Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Error - Invalid Option: -$OPTARG requires an argument" 1>&2 + exit 1 + ;; + esac +done + +FILE_SEARCH_PATTERN="$PUBLISHED_CONTAINER_FILE_PREFIX*$PUBLISHED_CONTAINER_FILE_SUFFIX" +echo "CONTAINER_SRC_DIR -> $CONTAINER_SRC_DIR" +echo "FILE_SEARCH_PATTERN -> $FILE_SEARCH_PATTERN" +echo "GITHUB_BRANCH -> $GITHUB_BRANCH" +echo "PUBLISHING_LEVEL -> $PUBLISHING_LEVEL" +echo "OUTPUT_FOLDER -> $OUTPUT_FOLDER" + +PUBLISHED_CONTAINER_FILES=$(find "$PUBLISHED_CONTAINERS_DIR" -name "$FILE_SEARCH_PATTERN") +if [[ -z $PUBLISHED_CONTAINER_FILES ]]; then + echo "Error - No published container lists in $PUBLISHED_CONTAINERS_DIR" + exit 1 +fi + +function cleanup { + echo "+++ logout from Azure Container Registry" + docker logout + docker system prune -f +} +trap cleanup EXIT + +CONTAINER_TAGS_DIR="$OUTPUT_FOLDER/CONTAINER_TAGS_FOLDER" +mkdir -p "$CONTAINER_TAGS_DIR" +FILE_NAME_PREFIX='PublishedTags' +FILE_EXT='.txt' + +# For Azure Linux 2.0, we have shipped the container images with +# the below value in the os-version field in the image manifest. +# TODO: We may need to update this value for Azure Linux 3.0. +OS_VERSION_PREFIX="cbl-mariner-" +DISTRO_IDENTIFIER="cm" + +function create_multi_arch_tags { + # $1: original container (without '-amd64' or '-arm64' extension in tag) + # $2: multi-arch name + # $3: multi-arch tag + # $4: azure linux version + # $5: architecture to build + local original_container=$1 + local multiarch_name=$2 + local multiarch_tag=$3 + local azure_linux_version=$4 + local architecture_build=$5 + + echo "-------------------------------------------------------" + echo "original_container -> $original_container" + echo "multiarch_name -> $multiarch_name" + echo "multiarch_tag -> $multiarch_tag" + echo "azure_linux_version -> $azure_linux_version" + echo "-------------------------------------------------------" + + full_multiarch_tag="$multiarch_name:$multiarch_tag" + + # First check if the already published tag is on the next Azure Linux version. + # If it is on the next version, then do not overwrite it. + set +e + manifest_json=$(docker manifest inspect "$full_multiarch_tag") + set -e + + if [[ -n $manifest_json ]]; then + echo "docker manifest found for container $full_multiarch_tag" + + # Parse the manifest json and look for the azure linux version in the key "os.version". + # Loop through the .manifests array and look for the os.version key. + # If the os.version key is found, then look for the version in its value starting with $OS_VERSION_PREFIX. + published_tag_os_version_key="null" + manifests=$(echo "$manifest_json" | jq .manifests) + manifest_array=$(echo "$manifests" | jq -c '.[]' | jq -r '.platform | with_entries(select(.key | contains("os.version")))' | jq -c '.[]') + for key in ${manifest_array[*]}; do + if [[ $key == *"$OS_VERSION_PREFIX"* ]]; then + # Remove the quotes from the value. + key=$(echo "$key" | tr -d \") + published_tag_os_version_key=$key + break + fi + done + + echo "published_tag_os_version_key -> $published_tag_os_version_key" + + if [[ $published_tag_os_version_key == "null" ]]; then + echo "OS Version key not found in the manifest file." + else + # OS version found. Look for the version in its value starting with $OS_VERSION_PREFIX. + published_tag_os_version=$(echo "$published_tag_os_version_key" | tr -d \" | tr -d $OS_VERSION_PREFIX) + + echo "published_tag_os_version -> $published_tag_os_version" + + # Check if the published tag has a greater Azure Linux version than the current tag's Azure Linux version. + # 1.0 > 2.0 => 0 (false) + # 2.0 > 1.0 => 1 (true) + # 2.0 > 2.0 => 0 (false) + is_published_tag_os_version_strictly_greater=$(echo "$published_tag_os_version>$azure_linux_version" | bc) + + # If the published tag is on the next Azure Linux version, then do not proceed. + if [ "$is_published_tag_os_version_strictly_greater" -eq 1 ]; then + echo "Published tag is already on the next Azure Linux version i.e., $published_tag_os_version." + echo "Do not overwrite it with $azure_linux_version." + return + fi + + echo "Published tag is on Azure Linux version $published_tag_os_version." + echo "Proceed with overwriting it with Azure Linux version $azure_linux_version." + echo "+++ update $full_multiarch_tag tag" + fi + else + echo "Manifest does not exist. Proceed with creating new tag." + echo "+++ create $full_multiarch_tag tag" + fi + + # create, annotate, and push manifest + docker manifest create "$full_multiarch_tag" --amend "$original_container-amd64" + docker manifest annotate "$full_multiarch_tag" "$original_container-amd64" \ + --os-version "$OS_VERSION_PREFIX$azure_linux_version" + + if [[ $architecture_build == *"ARM64"* ]]; then + docker manifest create "$full_multiarch_tag" --amend "$original_container-arm64" + docker manifest annotate "$full_multiarch_tag" "$original_container-arm64" \ + --os-version "$OS_VERSION_PREFIX$azure_linux_version" \ + --variant "v8" + fi + + echo "+++ push $full_multiarch_tag tag" + docker manifest push "$full_multiarch_tag" + echo "+++ $full_multiarch_tag tag pushed successfully" + + # Save the multi-arch tag to a file. + image_basename=${multiarch_name#*/} + dash_removed_name=${image_basename//-/} + final_name=${dash_removed_name////_} + + output_file="$CONTAINER_TAGS_DIR/$FILE_NAME_PREFIX-$final_name$FILE_EXT" + echo "Save the multi-arch tag to a file: $output_file" + + echo "$original_container-amd64" >> "$output_file" + if [[ $architecture_build == *"ARM64"* ]]; then + echo "$original_container-arm64" >> "$output_file" + fi + echo "$full_multiarch_tag" >> "$output_file" +} + +for PUBLISHED_CONTAINER_FILE in $PUBLISHED_CONTAINER_FILES +do + file_basename=$(basename "$PUBLISHED_CONTAINER_FILE") + container_type=$(echo "$file_basename" | sed -e "s/$PUBLISHED_CONTAINER_FILE_PREFIX-//" -e "s/$PUBLISHED_CONTAINER_FILE_SUFFIX//") + + # Rename core images to base to get the ACR Repo details. + if [[ "$container_type" =~ ^(distroless|busybox|marinara)$ ]]; then + container_type="base" + fi + echo "Container Type -> $container_type" + + TEMP_FILE=$(mktemp) + + python3 "$CONTAINER_SRC_DIR"/scripts/acrRepoParser.py \ + --config-file-path "$CONTAINER_SRC_DIR"/configuration/acrRepoV2.json \ + --image-name "$container_type" \ + --git-branch "$GITHUB_BRANCH" \ + --publishing-level "$PUBLISHING_LEVEL" \ + --output-file-path "$TEMP_FILE" + + IS_CORE_IMAGE=$(jq -r '.data_is_core_image' "$TEMP_FILE") + IS_GOLDEN_IMAGE=$(jq -r '.data_is_golden_image' "$TEMP_FILE") + IS_HCI_GOLDEN_IMAGE=$(jq -r '.data_is_hci_golden_image' "$TEMP_FILE") + ARCHITECTURE_TO_BUILD=$(jq -r '.data_architecture_to_build' "$TEMP_FILE") + TARGET_ACR=$(jq -r '.data_target_acr' "$TEMP_FILE") + + if [[ -z $TARGET_ACR ]]; then + echo "##vso[task.logissue type=warning]Target ACR not found for image $container_type" + continue + fi + + # Remove the temp file. + [ -f "$TEMP_FILE" ] && rm "$TEMP_FILE" + + echo "Container Type -> $container_type" + echo "IS_CORE_IMAGE -> $IS_CORE_IMAGE" + echo "IS_GOLDEN_IMAGE -> $IS_GOLDEN_IMAGE" + echo "IS_HCI_GOLDEN_IMAGE -> $IS_HCI_GOLDEN_IMAGE" + echo "ARCHITECTURE_TO_BUILD -> $ARCHITECTURE_TO_BUILD" + echo "TARGET_ACR -> $TARGET_ACR" + + while IFS= read -r image_name + do + echo + echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" + echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" + echo "Image name: $image_name" + echo + container_registry="${image_name%%.*}" + echo "+++ login into Azure ACR $container_registry" + az acr login --name "$container_registry" + + amd64_image=${image_name%-*}-amd64 + docker pull "$amd64_image" + + # Some container images are only built for AMD64 architecture. + if [[ $ARCHITECTURE_TO_BUILD == *"ARM64"* ]]; then + arm64_image=${image_name%-*}-arm64 + docker pull "$arm64_image" + fi + + if [[ $container_registry != "$TARGET_ACR" ]]; then + echo "+++ login into Azure ACR $TARGET_ACR" + az acr login --name "$TARGET_ACR" + + echo "Retagging the images to $TARGET_ACR" + # E.g., If container_registry is azurelinuxdevpreview and TARGET_ACR is azurelinuxpreview, then + # azurelinuxdevpreview.azurecr.io/base/core:2.0 -> azurelinuxpreview.azurecr.io/base/core:2.0 + + amd64_retagged_image_name=${amd64_image/"$container_registry"/"$TARGET_ACR"} + echo "Retagged amd64 image: $amd64_retagged_image_name" + docker image tag "$amd64_image" "$amd64_retagged_image_name" + docker rmi "$amd64_image" + docker image push "$amd64_retagged_image_name" + + if [[ $ARCHITECTURE_TO_BUILD == *"ARM64"* ]]; then + arm64_retagged_image_name=${arm64_image/"$container_registry"/"$TARGET_ACR"} + echo "Retagged arm64 image: $arm64_retagged_image_name" + docker image tag "$arm64_image" "$arm64_retagged_image_name" + docker rmi "$arm64_image" + docker image push "$arm64_retagged_image_name" + fi + + image_name=$amd64_retagged_image_name + fi + + # image_name has the following format [registry name].azurecr.io/[name]:tag-[amd64 or arm64] + # e.g.: azurelinuxpreview.azurecr.io/base/core:1.0.20210628-amd64 + # container name is [registry name].azurecr.io/[name] + # container tag is tag (without -[amd64 or arm64]) + image_name_with_noarch=${image_name%-*} + container_name=${image_name_with_noarch%:*} + container_tag=${image_name_with_noarch#*:} + + echo "Image Name: ------------------>" "$image_name" + echo "Image Name w/o Arch: ------------------>" "$image_name_with_noarch" + echo "Container Name: ------------------>" "$container_name" + echo "Container Tag: ------------------>" "$container_tag" + + if "$IS_CORE_IMAGE"; then + # For core images, we need to create multi-arch tags for + # the major version and the full version. + echo "Create multi-arch tags for core image: $container_type" + OLDIFS=$IFS + IFS='.' + read -ra tag_parts <<< "$container_tag" + IFS=$OLDIFS + + major_version="${tag_parts[0]}.${tag_parts[1]}" + azure_linux_version="${tag_parts[0]}.0" + + # create multi-arch tag full version (e.g.: 2.0.20210127) + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$container_tag" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create major version tag (e.g.: 2.0) + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$major_version" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + elif "$IS_GOLDEN_IMAGE"; then + # For golden images, we need to create multi-arch tags for + # the major version, the major and minor version, and the full version. + echo "Create multi-arch tags for golden image: $container_type" + package_version=${container_tag%-*} # 16.14.0 + package_version_major=${package_version%%.*} # 16 + package_version_major_minor=${package_version%.*} # 16.14 + + if [[ $package_version == *"-debug-nonroot" ]]; then + package_version_major=$package_version_major"-debug-nonroot" + package_version_major_minor=$package_version_major_minor"-debug-nonroot" + elif [[ $package_version == *"-nonroot" ]]; then + package_version_major=$package_version_major"-nonroot" + package_version_major_minor=$package_version_major_minor"-nonroot" + elif [[ $package_version == *"-debug" ]]; then + package_version_major=$package_version_major"-debug" + package_version_major_minor=$package_version_major_minor"-debug" + fi + + echo "Package Version: ------------------>" "$package_version" + echo "Package Version Major: ------------------>" "$package_version_major" + echo "Package Version Minor: ------------------>" "$package_version_major_minor" + + if $IS_HCI_GOLDEN_IMAGE; then + azure_linux_version=$(awk -F '-' '{print $2}' <<< "$container_tag") # 0.59.0-2.2.0.20230607 -> 2.2.0.20230607 + azure_linux_version=$(awk -F '.' '{print $2"."$3}' <<< "$azure_linux_version") # [2].[2].[0].[20230607] -> 2.0 + # ^ ^ + else + azure_linux_version=$(awk -F $DISTRO_IDENTIFIER '{print $2}' <<< "$container_tag") # 16.19.1-2-cm2.0.20230607 -> 2.0.20230607 + azure_linux_version=$(awk -F '.' '{print $1"."$2}' <<< "$azure_linux_version") # [2].[0].[20230607] -> 2.0 + # ^ ^ + fi + + # create multi-arch tag full version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16.14.0-1-cm2.0.20220412 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$container_tag" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create multi-arch tag with major version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version_major" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create multi-arch tag with major version and azure linux version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16-cm2.0 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version_major-$DISTRO_IDENTIFIER$azure_linux_version" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create multi-arch tag with major and minor version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16.14 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version_major_minor" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create multi-arch tag with major and minor version and azure linux version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16.14-cm2.0 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version_major_minor-$DISTRO_IDENTIFIER$azure_linux_version" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + if $IS_HCI_GOLDEN_IMAGE; then + # create multi-arch tag with major, minor, and patch version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16.14 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + + # create multi-arch tag with major, minor, and patch version and azure linux version + # e.g. azurelinuxpreview.azurecr.io/base/nodejs:16.14-cm2.0 + create_multi_arch_tags \ + "$image_name_with_noarch" \ + "$container_name" \ + "$package_version-$DISTRO_IDENTIFIER$azure_linux_version" \ + "$azure_linux_version" \ + "$ARCHITECTURE_TO_BUILD" + fi + fi + done < "$PUBLISHED_CONTAINER_FILE" +done diff --git a/SPECS-EXTENDED/libgit2-glib/libgit2-glib.spec b/SPECS-EXTENDED/libgit2-glib/libgit2-glib.spec index 1424aef55aa..b946f9f15cf 100644 --- a/SPECS-EXTENDED/libgit2-glib/libgit2-glib.spec +++ b/SPECS-EXTENDED/libgit2-glib/libgit2-glib.spec @@ -5,7 +5,7 @@ Distribution: Mariner Name: libgit2-glib Version: 0.99.0.1 -Release: 5%{?dist} +Release: 6%{?dist} Summary: Git library for GLib License: LGPLv2+ @@ -70,6 +70,9 @@ developing applications that use %{name}. %{_datadir}/vala/ %changelog +* Wed Feb 21 2024 Sam Meluch - 0.99.0.1-6 +- Dash roll to rebuild with new libgit2 + * Mon Mar 21 2022 Pawel Winogrodzki - 0.99.0.1-5 - Adding BR on '%%{_bindir}/xsltproc'. - Disabled gtk doc generation to remove network dependency during build-time. diff --git a/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec b/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec index c9741a1702b..b65b6d4b351 100644 --- a/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec +++ b/SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec @@ -12,7 +12,7 @@ Summary: Signed GRand Unified Bootloader for %{buildarch} systems Name: grub2-efi-binary-signed-%{buildarch} Version: 2.06 -Release: 12%{?dist} +Release: 13%{?dist} License: GPLv3+ Vendor: Microsoft Corporation Distribution: Mariner @@ -77,6 +77,9 @@ cp %{SOURCE3} %{buildroot}/boot/efi/EFI/BOOT/%{grubpxeefiname} /boot/efi/EFI/BOOT/%{grubpxeefiname} %changelog +* Thu Feb 15 2024 Dan Streetman - 2.06-13 +- match grub2 version + * Wed Oct 18 2023 Gary Swalling - 2.06-12 - Bump release number to match grub release number diff --git a/SPECS-SIGNED/hvloader-signed/hvloader-signed.spec b/SPECS-SIGNED/hvloader-signed/hvloader-signed.spec new file mode 100644 index 00000000000..ef70c9dd266 --- /dev/null +++ b/SPECS-SIGNED/hvloader-signed/hvloader-signed.spec @@ -0,0 +1,74 @@ +%global debug_package %{nil} +%define name_github HvLoader +%ifarch x86_64 +%global buildarch x86_64 +%endif +Summary: Signed HvLoader.efi for %{buildarch} systems +Name: hvloader-signed-%{buildarch} +Version: 1.0.1 +Release: 1%{?dist} +License: MIT +Vendor: Microsoft Corporation +Distribution: Mariner +URL: https://github.com/microsoft/HvLoader +# This package's "version" and "release" must reflect the unsigned version that +# was signed. +# An important consequence is that when making a change to this package, the +# unsigned version/release must be increased to keep the two versions consistent. +# Ideally though, this spec will not change much or at all, so the version will +# just track the unsigned package's version/release. +# +# To populate these sources: +# 1. Build the unsigned packages as normal +# 2. Sign the desired binary +# 3. Place the unsigned package and signed binary in this spec's folder +# 4. Build this spec +Source0: hvloader-%{version}-%{release}.%{buildarch}.rpm +Source1: HvLoader.efi +ExclusiveArch: x86_64 + +%description +This package contains the HvLoader EFI binary signed for secure boot. The package is +specifically created for installing on %{buildarch} systems + +%package -n hvloader +Summary: HvLoader.efi is an EFI application for loading an external hypervisor loader. +Group: Applications/System + +%description -n hvloader +HvLoader.efi is an EFI application for loading an external hypervisor loader. + +HvLoader.efi loads a given hypervisor loader binary (DLL, EFI, etc.), and +calls it's entry point passing HvLoader.efi ImageHandle. This way the +hypervisor loader binary has access to HvLoader.efi's command line options, +and use those as configuration parameters. The first HvLoader.efi command line +option is the path to hypervisor loader binary. + +%prep + +%build +mkdir rpm_contents +pushd rpm_contents + +# This spec's whole purpose is to inject the signed HvLoader binary +rpm2cpio %{SOURCE0} | cpio -idmv +cp %{SOURCE1} ./boot/efi/HvLoader.efi + +popd + +%install +pushd rpm_contents + +# Don't use * wildcard. It does not copy over hidden files in the root folder... +cp -rp ./. %{buildroot}/ + +popd + +%files -n hvloader +%license MdeModulePkg/Application/%{name_github}-%{version}/LICENSE +/boot/efi/HvLoader.efi + +%changelog +* Thu Jan 04 2024 Cameron Baird - 1.0.1-1 +- Original version for CBL-Mariner. +- License verified diff --git a/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec b/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec new file mode 100644 index 00000000000..425beecc4c3 --- /dev/null +++ b/SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec @@ -0,0 +1,153 @@ +%global debug_package %{nil} +%global sha512hmac bash %{_sourcedir}/sha512hmac-openssl.sh +%ifarch x86_64 +%global buildarch x86_64 +%endif +%define uname_r %{version}-%{release} +Summary: Signed MSHV-enabled Linux Kernel for %{buildarch} systems +Name: kernel-mshv-signed-%{buildarch} +Version: 5.15.126.mshv9 +Release: 2%{?dist} +License: GPLv2 +Vendor: Microsoft Corporation +Distribution: Mariner +Group: System Environment/Kernel +URL: https://github.com/microsoft/CBL-Mariner-Linux-Kernel +# This spec purpose is to take an input kernel rpm and input secure-boot-signed +# kernel binary from the same build and generate a new "kernel" rpm with the +# signed kernel binary + all of the other original kernel files, triggers, +# scriptlets, requires, provides, etc. +# +# We need to ensure the kernel modules and kernel binary used are from the exact +# same build because at build time the kernel modules are signed with an +# ephemeral key that the kernel enrolls in its keyring. We enforce kernel +# module signature checking when we enable security features like kernel +# lockdown so our kernel can only load those specific kernel modules at runtime. +# +# Additionally, to complete the UEFI Secure Boot chain, we must PE-sign the +# kernel binary. Ideally we would enable secure-boot signing tools like pesign +# or sbsign to be callable from inside the rpmbuild environment, that way we can +# secure-boot sign the kernel binary during the kernel's rpmbuild. It is best +# practice to sign as soon as possible. However there are issues getting that +# secure boot signing infrastructure in place today. Hence we sign the +# resulting kernel binary and "repackage" the kernel RPM (something rpm itself +# actively tries to make sure you never do...generally for good reasons). +# +# To achive this repackaging, this spec creates a new subpackage named +# "kernel-mshv". To retain all of the initial kernel-mshv package behaviors, we make sure +# the subpackage has the same requires, provides, triggers, post steps, and +# files as the original kernel package. +# +# This specific repackaging implementation leaves room for us to enable the +# more ideal secure-boot signing flow in the future without introducing any +# sort of breaking change or new packaging. Users still install a "kernel-mshv" +# package like they normally would. +# +# Maintenance Notes: +# - This spec's "version" and "release" must reflect the unsigned version that +# was signed. An important consequence is that when making a change to this +# spec or the normal kernel spec, the other spec's version version/release must +# be increased to keep the two versions consistent. +# +# - Make sure the kernel subpackage's Requires, Provides, triggers, post/postun +# scriptlets, and files match the normal kernel-mshv spec's. The kernel subpackage +# should contain the same content as the input kernel package but replace the +# kernel binary with our signed kernel binary. Since all the requires, provides, +# etc are the same, this new kernel package can be a direct replacement for the +# normal kernel package and RPM will resolve packages with kernel dependencies +# correctly. +# +# To populate the input sources: +# 1. Build the unsigned packages as normal +# 2. Sign the desired binary +# 3. Place the unsigned package and signed binary in this spec's folder +# 4. Build this spec +Source0: kernel-mshv-%{version}-%{release}.%{buildarch}.rpm +Source1: vmlinuz-%{uname_r} +Source2: sha512hmac-openssl.sh +BuildRequires: cpio +BuildRequires: openssl +BuildRequires: sed + +%description +This package contains the MSHV-enabled Linux kernel package with kernel-mshv signed with the production key + +%package -n kernel-mshv +Summary: MSHV-enabled Linux Kernel +Group: System Environment/Kernel +Requires: filesystem +Requires: kmod +Requires(post): coreutils +Requires(postun): coreutils +%{?grub2_configuration_requires} +ExclusiveArch: x86_64 + +%description -n kernel-mshv +The kernel package contains the signed MSHV-enabled Linux kernel. + +%prep + +%build +mkdir rpm_contents +pushd rpm_contents + +# This spec's whole purpose is to inject the signed kernel binary +rpm2cpio %{SOURCE0} | cpio -idmv +cp %{SOURCE1} ./boot/vmlinuz-%{uname_r} + +popd + +%install +pushd rpm_contents + +# Don't use * wildcard. It does not copy over hidden files in the root folder... +cp -rp ./. %{buildroot}/ + +popd + +%triggerin -n kernel-mshv -- initramfs +mkdir -p %{_localstatedir}/lib/rpm-state/initramfs/pending +touch %{_localstatedir}/lib/rpm-state/initramfs/pending/%{uname_r} +echo "initrd generation of kernel %{uname_r} will be triggered later" >&2 + +%triggerun -n kernel-mshv -- initramfs +rm -rf %{_localstatedir}/lib/rpm-state/initramfs/pending/%{uname_r} +rm -rf /boot/efi/initrd.img-%{uname_r} +echo "initrd of kernel %{uname_r} removed" >&2 + +%postun -n kernel-mshv +if [ ! -e /boot/mariner-mshv.cfg ] +then + ls /boot/linux-*.cfg 1> /dev/null 2>&1 + if [ $? -eq 0 ] + then + list=`ls -tu /boot/linux-*.cfg | head -n1` + test -n "$list" && ln -sf "$list" /boot/mariner-mshv.cfg + fi +fi +%grub2_postun + +%post -n kernel-mshv +/sbin/depmod -a %{uname_r} +ln -sf linux-%{uname_r}.cfg /boot/mariner-mshv.cfg +%grub2_post + +%files -n kernel-mshv +%defattr(-,root,root) +%license COPYING +%exclude %dir /usr/lib/debug +/boot/System.map-%{uname_r} +/boot/config-%{uname_r} +/boot/vmlinuz-%{uname_r} +/boot/efi/vmlinuz-%{uname_r} +%config(noreplace) /boot/linux-%{uname_r}.cfg +%config(noreplace) %{_sysconfdir}/default/grub.d/50_mariner_mshv.cfg +%config %{_localstatedir}/lib/initramfs/kernel/%{uname_r} +%defattr(0644,root,root) +/lib/modules/%{uname_r}/* +%exclude /lib/modules/%{uname_r}/build + +%changelog +* Thu Jan 04 2024 Cameron Baird - 5.15.126.mshv9-2 +- Original version for CBL-Mariner. +- License verified diff --git a/SPECS-SIGNED/kernel-mshv-signed/sha512hmac-openssl.sh b/SPECS-SIGNED/kernel-mshv-signed/sha512hmac-openssl.sh new file mode 100644 index 00000000000..af67fa7b8f4 --- /dev/null +++ b/SPECS-SIGNED/kernel-mshv-signed/sha512hmac-openssl.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Mocks sha512hmac using the openssl tool. +# Only for use during RPM build. + +openssl sha512 -hmac FIPS-FTW-RHT2009 -hex "$1" | cut -f 2 -d ' ' | echo "$(cat -) $1" \ No newline at end of file diff --git a/SPECS/LICENSES-AND-NOTICES/LICENSES-MAP.md b/SPECS/LICENSES-AND-NOTICES/LICENSES-MAP.md index 505933a6be7..18ff2e5a08d 100644 --- a/SPECS/LICENSES-AND-NOTICES/LICENSES-MAP.md +++ b/SPECS/LICENSES-AND-NOTICES/LICENSES-MAP.md @@ -9,7 +9,7 @@ The CBL-Mariner SPEC files originated from a variety of sources with varying lic | Fedora (Copyright Remi Collet) | [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode) | libmemcached-awesome
librabbitmq | | Fedora (ISC) | [ISC License](https://github.com/sarugaku/resolvelib/blob/main/LICENSE) | python-resolvelib | | Magnus Edenhill Open Source | [Magnus Edenhill Open Source BSD License](https://github.com/jemalloc/jemalloc/blob/dev/COPYING) | librdkafka | -| Microsoft | [Microsoft MIT License](/LICENSES-AND-NOTICES/LICENSE.md) | application-gateway-kubernetes-ingress
asc
azcopy
azure-iot-sdk-c
azure-storage-cpp
bazel
blobfuse
blobfuse2
bmon
bpftrace
ccache
cert-manager
cf-cli
check-restart
clamav
cloud-hypervisor
cmake-fedora
coredns
csi-driver-lvm
dcos-cli
debugedit
dejavu-fonts
distroless-packages
doxygen
dtc
elixir
espeak-ng
espeakup
flannel
fluent-bit
freefont
gflags
gh
go-md2man
grpc
grub2-efi-binary-signed
GSL
gtk-update-icon-cache
helm
hvloader
installkernel
intel-pf-bb-config
ivykis
jsonbuilder
jx
kata-containers-cc
kata-packages-uvm
keda
keras
kernel-azure-signed
kernel-hci-signed
kernel-mos-signed
kernel-signed
KeysInUse-OpenSSL
kpatch
kube-vip-cloud-provider
kubernetes
libacvp
libconfini
libconfuse
libgdiplus
libmaxminddb
libmetalink
libsafec
libuv
libxml++
livepatch-5.15.102.1-1.cm2
livepatch-5.15.102.1-3.cm2
livepatch-5.15.107.1-1.cm2
livepatch-5.15.110.1-1.cm2
livepatch-5.15.111.1-1.cm2
livepatch-5.15.112.1-1.cm2
livepatch-5.15.112.1-2.cm2
livepatch-5.15.116.1-1.cm2
livepatch-5.15.116.1-2.cm2
livepatch-5.15.122.1-2.cm2
livepatch-5.15.125.1-1.cm2
livepatch-5.15.125.1-2.cm2
livepatch-5.15.126.1-1.cm2
livepatch-5.15.131.1-1.cm2
livepatch-5.15.131.1-3.cm2
livepatch-5.15.94.1-1.cm2
livepatch-5.15.94.1-1.cm2-signed
livepatch-5.15.95.1-1.cm2
livepatch-5.15.98.1-1.cm2
livepatching
lld
lld16
local-path-provisioner
lsb-release
ltp
lttng-consume
mariner-release
mariner-repos
mariner-rpm-macros
maven3
mm-common
moby-buildx
moby-cli
moby-compose
moby-containerd
moby-containerd-cc
moby-engine
moby-runc
msgpack
ncompress
networkd-dispatcher
nlohmann-json
nmap
nmi
node-problem-detector
ntopng
opentelemetry-cpp
packer
pcaudiolib
pcre2
perl-Test-Warnings
perl-Text-Template
pigz
prebuilt-ca-certificates
prebuilt-ca-certificates-base
prometheus-adapter
python-cachetools
python-cherrypy
python-cstruct
python-execnet
python-google-pasta
python-libclang
python-logutils
python-nocasedict
python-opt-einsum
python-pecan
python-pyrpm
python-remoto
python-repoze-lru
python-routes
python-rsa
python-sphinxcontrib-websupport
python-tensorboard
python-tensorboard-plugin-wit
python-tensorflow-estimator
python-yamlloader
R
rabbitmq-server
reaper
rocksdb
rubygem-addressable
rubygem-asciidoctor
rubygem-async
rubygem-async-http
rubygem-async-io
rubygem-async-pool
rubygem-aws-eventstream
rubygem-aws-partitions
rubygem-aws-sdk-core
rubygem-aws-sdk-kms
rubygem-aws-sdk-s3
rubygem-aws-sdk-sqs
rubygem-aws-sigv4
rubygem-bigdecimal
rubygem-bindata
rubygem-concurrent-ruby
rubygem-connection_pool
rubygem-console
rubygem-cool.io
rubygem-deep_merge
rubygem-digest-crc
rubygem-elastic-transport
rubygem-elasticsearch
rubygem-elasticsearch-api
rubygem-eventmachine
rubygem-excon
rubygem-faraday
rubygem-faraday-em_http
rubygem-faraday-em_synchrony
rubygem-faraday-excon
rubygem-faraday-httpclient
rubygem-faraday-multipart
rubygem-faraday-net_http
rubygem-faraday-net_http_persistent
rubygem-faraday-patron
rubygem-faraday-rack
rubygem-faraday-retry
rubygem-ffi
rubygem-fiber-local
rubygem-fluent-config-regexp-type
rubygem-fluent-logger
rubygem-fluent-plugin-elasticsearch
rubygem-fluent-plugin-kafka
rubygem-fluent-plugin-prometheus
rubygem-fluent-plugin-prometheus_pushgateway
rubygem-fluent-plugin-record-modifier
rubygem-fluent-plugin-rewrite-tag-filter
rubygem-fluent-plugin-s3
rubygem-fluent-plugin-systemd
rubygem-fluent-plugin-td
rubygem-fluent-plugin-webhdfs
rubygem-fluent-plugin-windows-exporter
rubygem-fluentd
rubygem-hirb
rubygem-hocon
rubygem-hoe
rubygem-http_parser.rb
rubygem-httpclient
rubygem-io-event
rubygem-jmespath
rubygem-ltsv
rubygem-mini_portile2
rubygem-minitest
rubygem-mocha
rubygem-msgpack
rubygem-multi_json
rubygem-multipart-post
rubygem-net-http-persistent
rubygem-nio4r
rubygem-nokogiri
rubygem-oj
rubygem-parallel
rubygem-power_assert
rubygem-prometheus-client
rubygem-protocol-hpack
rubygem-protocol-http
rubygem-protocol-http1
rubygem-protocol-http2
rubygem-public_suffix
rubygem-puppet-resource_api
rubygem-rdiscount
rubygem-rdkafka
rubygem-rexml
rubygem-ruby-kafka
rubygem-ruby-progressbar
rubygem-rubyzip
rubygem-semantic_puppet
rubygem-serverengine
rubygem-sigdump
rubygem-strptime
rubygem-systemd-journal
rubygem-td
rubygem-td-client
rubygem-td-logger
rubygem-test-unit
rubygem-thor
rubygem-timers
rubygem-tzinfo
rubygem-tzinfo-data
rubygem-webhdfs
rubygem-webrick
rubygem-yajl-ruby
rubygem-zip-zip
sdbus-cpp
sgx-backwards-compatability
shim
shim-unsigned
shim-unsigned-aarch64
shim-unsigned-x64
skopeo
span-lite
sriov-network-device-plugin
swupdate
SymCrypt
SymCrypt-OpenSSL
tensorflow
terraform
tinyxml2
toml11
tracelogging
umoci
usrsctp
vala
verity-read-only-root
vnstat
zstd | +| Microsoft | [Microsoft MIT License](/LICENSES-AND-NOTICES/LICENSE.md) | application-gateway-kubernetes-ingress
asc
azcopy
azure-iot-sdk-c
azure-storage-cpp
bazel
blobfuse
blobfuse2
bmon
bpftrace
ccache
cert-manager
cf-cli
check-restart
clamav
cloud-hypervisor
cmake-fedora
coredns
csi-driver-lvm
dcos-cli
debugedit
dejavu-fonts
distroless-packages
doxygen
dtc
elixir
espeak-ng
espeakup
flannel
fluent-bit
freefont
gflags
gh
go-md2man
grpc
grub2-efi-binary-signed
GSL
gtk-update-icon-cache
helm
hvloader
hvloader-signed
installkernel
intel-pf-bb-config
ivykis
jsonbuilder
jx
kata-containers-cc
kata-packages-uvm
keda
keras
kernel-azure-signed
kernel-hci-signed
kernel-mos-signed
kernel-mshv-signed
kernel-signed
KeysInUse-OpenSSL
kpatch
kube-vip-cloud-provider
kubernetes
libacvp
libconfini
libconfuse
libgdiplus
libmaxminddb
libmetalink
libsafec
libuv
libxml++
livepatch-5.15.102.1-1.cm2
livepatch-5.15.102.1-3.cm2
livepatch-5.15.107.1-1.cm2
livepatch-5.15.110.1-1.cm2
livepatch-5.15.111.1-1.cm2
livepatch-5.15.112.1-1.cm2
livepatch-5.15.112.1-2.cm2
livepatch-5.15.116.1-1.cm2
livepatch-5.15.116.1-2.cm2
livepatch-5.15.122.1-2.cm2
livepatch-5.15.125.1-1.cm2
livepatch-5.15.125.1-2.cm2
livepatch-5.15.126.1-1.cm2
livepatch-5.15.131.1-1.cm2
livepatch-5.15.131.1-3.cm2
livepatch-5.15.94.1-1.cm2
livepatch-5.15.94.1-1.cm2-signed
livepatch-5.15.95.1-1.cm2
livepatch-5.15.98.1-1.cm2
livepatching
lld
lld16
local-path-provisioner
lsb-release
ltp
lttng-consume
mariner-release
mariner-repos
mariner-rpm-macros
maven3
mm-common
moby-buildx
moby-cli
moby-compose
moby-containerd
moby-containerd-cc
moby-engine
moby-runc
msgpack
ncompress
networkd-dispatcher
nlohmann-json
nmap
nmi
node-problem-detector
ntopng
opentelemetry-cpp
osslsigncode
packer
pcaudiolib
pcre2
perl-Test-Warnings
perl-Text-Template
pigz
prebuilt-ca-certificates
prebuilt-ca-certificates-base
prometheus-adapter
python-cachetools
python-cherrypy
python-cstruct
python-execnet
python-google-pasta
python-libclang
python-logutils
python-nocasedict
python-opt-einsum
python-pecan
python-pyrpm
python-remoto
python-repoze-lru
python-routes
python-rsa
python-sphinxcontrib-websupport
python-tensorboard
python-tensorboard-plugin-wit
python-tensorflow-estimator
python-yamlloader
R
rabbitmq-server
reaper
rocksdb
rubygem-addressable
rubygem-asciidoctor
rubygem-async
rubygem-async-http
rubygem-async-io
rubygem-async-pool
rubygem-aws-eventstream
rubygem-aws-partitions
rubygem-aws-sdk-core
rubygem-aws-sdk-kms
rubygem-aws-sdk-s3
rubygem-aws-sdk-sqs
rubygem-aws-sigv4
rubygem-bigdecimal
rubygem-bindata
rubygem-concurrent-ruby
rubygem-connection_pool
rubygem-console
rubygem-cool.io
rubygem-deep_merge
rubygem-digest-crc
rubygem-elastic-transport
rubygem-elasticsearch
rubygem-elasticsearch-api
rubygem-eventmachine
rubygem-excon
rubygem-faraday
rubygem-faraday-em_http
rubygem-faraday-em_synchrony
rubygem-faraday-excon
rubygem-faraday-httpclient
rubygem-faraday-multipart
rubygem-faraday-net_http
rubygem-faraday-net_http_persistent
rubygem-faraday-patron
rubygem-faraday-rack
rubygem-faraday-retry
rubygem-ffi
rubygem-fiber-local
rubygem-fluent-config-regexp-type
rubygem-fluent-logger
rubygem-fluent-plugin-elasticsearch
rubygem-fluent-plugin-kafka
rubygem-fluent-plugin-prometheus
rubygem-fluent-plugin-prometheus_pushgateway
rubygem-fluent-plugin-record-modifier
rubygem-fluent-plugin-rewrite-tag-filter
rubygem-fluent-plugin-s3
rubygem-fluent-plugin-systemd
rubygem-fluent-plugin-td
rubygem-fluent-plugin-webhdfs
rubygem-fluent-plugin-windows-exporter
rubygem-fluentd
rubygem-hirb
rubygem-hocon
rubygem-hoe
rubygem-http_parser.rb
rubygem-httpclient
rubygem-io-event
rubygem-jmespath
rubygem-ltsv
rubygem-mini_portile2
rubygem-minitest
rubygem-mocha
rubygem-msgpack
rubygem-multi_json
rubygem-multipart-post
rubygem-net-http-persistent
rubygem-nio4r
rubygem-nokogiri
rubygem-oj
rubygem-parallel
rubygem-power_assert
rubygem-prometheus-client
rubygem-protocol-hpack
rubygem-protocol-http
rubygem-protocol-http1
rubygem-protocol-http2
rubygem-public_suffix
rubygem-puppet-resource_api
rubygem-rdiscount
rubygem-rdkafka
rubygem-rexml
rubygem-ruby-kafka
rubygem-ruby-progressbar
rubygem-rubyzip
rubygem-semantic_puppet
rubygem-serverengine
rubygem-sigdump
rubygem-strptime
rubygem-systemd-journal
rubygem-td
rubygem-td-client
rubygem-td-logger
rubygem-test-unit
rubygem-thor
rubygem-timers
rubygem-tzinfo
rubygem-tzinfo-data
rubygem-webhdfs
rubygem-webrick
rubygem-yajl-ruby
rubygem-zip-zip
sdbus-cpp
sgx-backwards-compatability
shim
shim-unsigned
shim-unsigned-aarch64
shim-unsigned-x64
skopeo
span-lite
sriov-network-device-plugin
swupdate
SymCrypt
SymCrypt-OpenSSL
tensorflow
terraform
tinyxml2
toml11
tracelogging
umoci
usrsctp
vala
verity-read-only-root
vnstat
zstd | | Netplan source | [GPLv3](https://github.com/canonical/netplan/blob/main/COPYING) | netplan | | Numad source | [LGPLv2 License](https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt) | numad | | NVIDIA | [ASL 2.0 License and spec specific licenses](http://www.apache.org/licenses/LICENSE-2.0) | knem
libnvidia-container
mlnx-ofa_kernel
mlnx-tools
mlx-bootctl
nvidia-container-runtime
nvidia-container-toolkit
nvidia-docker2
ofed-scripts
perftest | diff --git a/SPECS/LICENSES-AND-NOTICES/data/licenses.json b/SPECS/LICENSES-AND-NOTICES/data/licenses.json index 32717e2e353..59be0e84aa0 100644 --- a/SPECS/LICENSES-AND-NOTICES/data/licenses.json +++ b/SPECS/LICENSES-AND-NOTICES/data/licenses.json @@ -2186,6 +2186,7 @@ "gtk-update-icon-cache", "helm", "hvloader", + "hvloader-signed", "installkernel", "intel-pf-bb-config", "ivykis", @@ -2198,6 +2199,7 @@ "kernel-azure-signed", "kernel-hci-signed", "kernel-mos-signed", + "kernel-mshv-signed", "kernel-signed", "KeysInUse-OpenSSL", "kpatch", @@ -2259,6 +2261,7 @@ "node-problem-detector", "ntopng", "opentelemetry-cpp", + "osslsigncode", "packer", "pcaudiolib", "pcre2", diff --git a/SPECS/bind/bind.signatures.json b/SPECS/bind/bind.signatures.json index f91b7d2dec4..f6261881922 100644 --- a/SPECS/bind/bind.signatures.json +++ b/SPECS/bind/bind.signatures.json @@ -14,6 +14,6 @@ "named.sysconfig": "8f8eff846667b7811358e289e9fe594de17d0e47f2b8cebf7840ad8db7f34816", "setup-named-chroot.sh": "786fbc88c7929fadf217cf2286f2eb03b6fba14843e5da40ad43c0022dd71c3a", "setup-named-softhsm.sh": "3b243d9e48577acb95a08ae5dd7288c5eec4830bc02bd29b1f1724c497d12864", - "bind-9.16.44.tar.xz": "cfaa953c36d5ca42d9584fcf9653d07c85527b59687e7c4d4cb8071272db6754" + "bind-9.16.48.tar.xz": "8d3814582348f90dead1ad410b1019094cd399d3d83930abebb2b3b1eb0b2bbb" } } \ No newline at end of file diff --git a/SPECS/bind/bind.spec b/SPECS/bind/bind.spec index 0a2c6f939ab..44d31175ab9 100644 --- a/SPECS/bind/bind.spec +++ b/SPECS/bind/bind.spec @@ -9,7 +9,7 @@ Summary: Domain Name System software Name: bind -Version: 9.16.44 +Version: 9.16.48 Release: 1%{?dist} License: ISC Vendor: Microsoft Corporation @@ -613,6 +613,9 @@ fi; %{_mandir}/man8/named-nzd2nzf.8* %changelog +* Wed Feb 28 2024 CBL-Mariner Servicing Account - 9.16.48-1 +- Auto-upgrade to 9.16.48 - Fix CVE-2023-50387 + * Wed Sep 27 2023 CBL-Mariner Servicing Account - 9.16.44-1 - Auto-upgrade to 9.16.44 - Fix CVE-2023-3341 diff --git a/SPECS/dnsmasq/CVE-2023-28450.patch b/SPECS/dnsmasq/CVE-2023-28450.patch deleted file mode 100644 index d912f42bdd6..00000000000 --- a/SPECS/dnsmasq/CVE-2023-28450.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 1e5db66df5728e22b1a42dbe2654d9d52d3d97cd Mon Sep 17 00:00:00 2001 -From: Simon Kelley -Date: Wed, 8 Mar 2023 03:37:46 +0530 -Subject: [PATCH] Set the default maximum DNS UDP packet size to 1232 - -Backported by @rohitrawat from upstream on 2023-03-23 -Applies on v2.89 cleanly - -Signed-off-by: Rohit Rawat ---- - man/dnsmasq.8 | 3 ++- - src/config.h | 2 +- - 2 files changed, 3 insertions(+), 2 deletions(-) - -diff --git a/man/dnsmasq.8 b/man/dnsmasq.8 -index 3d1d96a..e21bd09 100644 ---- a/man/dnsmasq.8 -+++ b/man/dnsmasq.8 -@@ -183,7 +183,8 @@ to zero completely disables DNS function, leaving only DHCP and/or TFTP. - .TP - .B \-P, --edns-packet-max= - Specify the largest EDNS.0 UDP packet which is supported by the DNS --forwarder. Defaults to 4096, which is the RFC5625-recommended size. -+forwarder. Defaults to 1232, which is the recommended size following the -+DNS flag day in 2020. Only increase if you know what you are doing. - .TP - .B \-Q, --query-port= - Send outbound DNS queries from, and listen for their replies on, the -diff --git a/src/config.h b/src/config.h -index 1e7b30f..37b374e 100644 ---- a/src/config.h -+++ b/src/config.h -@@ -19,7 +19,7 @@ - #define CHILD_LIFETIME 150 /* secs 'till terminated (RFC1035 suggests > 120s) */ - #define TCP_MAX_QUERIES 100 /* Maximum number of queries per incoming TCP connection */ - #define TCP_BACKLOG 32 /* kernel backlog limit for TCP connections */ --#define EDNS_PKTSZ 4096 /* default max EDNS.0 UDP packet from RFC5625 */ -+#define EDNS_PKTSZ 1232 /* default max EDNS.0 UDP packet from from /dnsflagday.net/2020 */ - #define SAFE_PKTSZ 1232 /* "go anywhere" UDP packet size, see https://dnsflagday.net/2020/ */ - #define KEYBLOCK_LEN 40 /* choose to minimise fragmentation when storing DNSSEC keys */ - #define DNSSEC_WORK 50 /* Max number of queries to validate one question */ --- -2.17.1 - diff --git a/SPECS/dnsmasq/dnsmasq.signatures.json b/SPECS/dnsmasq/dnsmasq.signatures.json index 625e56ff115..a9b1befc936 100644 --- a/SPECS/dnsmasq/dnsmasq.signatures.json +++ b/SPECS/dnsmasq/dnsmasq.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "dnsmasq-2.89.tar.xz": "02bd230346cf0b9d5909f5e151df168b2707103785eb616b56685855adebb609" + "dnsmasq-2.90.tar.xz": "8e50309bd837bfec9649a812e066c09b6988b73d749b7d293c06c57d46a109e4" } } \ No newline at end of file diff --git a/SPECS/dnsmasq/dnsmasq.spec b/SPECS/dnsmasq/dnsmasq.spec index 2d2a8f6f434..9a634b21b0a 100644 --- a/SPECS/dnsmasq/dnsmasq.spec +++ b/SPECS/dnsmasq/dnsmasq.spec @@ -1,7 +1,7 @@ Summary: DNS proxy with integrated DHCP server Name: dnsmasq -Version: 2.89 -Release: 2%{?dist} +Version: 2.90 +Release: 1%{?dist} License: GPLv2 or GPLv3 Group: System Environment/Daemons URL: https://www.thekelleys.org.uk/dnsmasq/ @@ -9,7 +9,6 @@ Source0: https://www.thekelleys.org.uk/%{name}/%{name}-%{version}.tar.xz Vendor: Microsoft Corporation Distribution: Mariner Patch0: fix-missing-ioctl-SIOCGSTAMP-add-sockios-header-linux-5.2.patch -Patch1: CVE-2023-28450.patch BuildRequires: kernel-headers @@ -67,6 +66,9 @@ EOF %config /usr/share/dnsmasq/trust-anchors.conf %changelog +* Wed Feb 28 2024 CBL-Mariner Servicing Account - 2.90-1 +- Auto-upgrade to 2.90 - Fix CVE-2023-50387 + * Thu Mar 23 2023 Rohit Rawat - 2.89-2 - Patch CVE-2023-28450 diff --git a/SPECS/grub2/grub2.signatures.json b/SPECS/grub2/grub2.signatures.json index 388131cf790..ea51ec599fc 100644 --- a/SPECS/grub2/grub2.signatures.json +++ b/SPECS/grub2/grub2.signatures.json @@ -3,6 +3,6 @@ "gnulib-d271f868a8df9bbec29049d01e056481b7a1a263.tar.gz": "4e23415ae2977ffca15e07419ceff3e9334d0369eafc9e7ae2578f8dd9a4839c", "grub-2.06.tar.gz": "660eaa2355a4045d8d0cdb5765169d1cad9912ec07873b86c9c6d55dbaa9dfca", "macros.grub2": "b03f6f713601214406971de53538dfc25136bf836f09a663eaffc4332a72c38b", - "sbat.csv.in": "11106562bccc09244f573be85ba2fe07cd1c830ef5bf3cc5e83a194c717880a5" + "sbat.csv.in": "e024eeb72dea5016d3494931d236e1b17f7db34f8d07676c942b6af0fda6fb8d" } -} \ No newline at end of file +} diff --git a/SPECS/grub2/grub2.spec b/SPECS/grub2/grub2.spec index 4cb76549e09..11747396252 100644 --- a/SPECS/grub2/grub2.spec +++ b/SPECS/grub2/grub2.spec @@ -6,7 +6,7 @@ Summary: GRand Unified Bootloader Name: grub2 Version: 2.06 -Release: 12%{?dist} +Release: 13%{?dist} License: GPLv3+ Vendor: Microsoft Corporation Distribution: Mariner @@ -85,6 +85,24 @@ Patch0199: 0199-fs-f2fs-Do-not-copy-file-names-that-are-too-long.patch Patch0200: 0200-fs-btrfs-Fix-several-fuzz-issues-with-invalid-dir-it.patch Patch0201: 0201-fs-btrfs-Fix-more-ASAN-and-SEGV-issues-found-with-fu.patch Patch0202: 0202-fs-btrfs-Fix-more-fuzz-issues-related-to-chunks.patch +# Required to reach SBAT 3 +Patch: sbat-3-0001-font-Reject-glyphs-exceeds-font-max_glyph_width-or-f.patch +Patch: sbat-3-0004-font-Remove-grub_font_dup_glyph.patch +Patch: sbat-3-0005-font-Fix-integer-overflow-in-ensure_comb_space.patch +Patch: sbat-3-0006-font-Fix-integer-overflow-in-BMP-index.patch +Patch: sbat-3-0007-font-Fix-integer-underflow-in-binary-search-of-char-.patch +Patch: sbat-3-0008-kern-efi-sb-Enforce-verification-of-font-files.patch +Patch: sbat-3-0009-fbutil-Fix-integer-overflow.patch +Patch: sbat-3-0011-font-Harden-grub_font_blit_glyph-and-grub_font_blit_.patch +Patch: sbat-3-0012-font-Assign-null_font-to-glyphs-in-ascii_font_glyph.patch +Patch: sbat-3-0013-normal-charset-Fix-an-integer-overflow-in-grub_unico.patch +# Required to reach SBAT 4 +Patch: sbat-4-0001-fs-ntfs-Fix-an-OOB-write-when-parsing-the-ATTRIBUTE_.patch +Patch: sbat-4-0002-fs-ntfs-Fix-an-OOB-read-when-reading-data-from-the-r.patch +Patch: sbat-4-0003-fs-ntfs-Fix-an-OOB-read-when-parsing-directory-entri.patch +Patch: sbat-4-0004-fs-ntfs-Fix-an-OOB-read-when-parsing-bitmaps-for-ind.patch +Patch: sbat-4-0005-fs-ntfs-Fix-an-OOB-read-when-parsing-a-volume-label.patch +Patch: sbat-4-0006-fs-ntfs-Make-code-more-readable.patch BuildRequires: autoconf BuildRequires: device-mapper-devel BuildRequires: python3 @@ -387,6 +405,9 @@ cp $GRUB_PXE_MODULE_SOURCE $EFI_BOOT_DIR/$GRUB_PXE_MODULE_NAME %{_sysconfdir}/default/grub.d %changelog +* Thu Feb 15 2024 Dan Streetman - 2.06-13 +- update grub to sbat 4 + * Wed Oct 18 2023 Gary Swalling - 2.06-12 - CVE-2021-3695 CVE-2021-3696 CVE-2021-3697 CVE-2022-28733 CVE-2022-28734 CVE-2022-28735 CVE-2022-28736 and increment SBAT level to 2 diff --git a/SPECS/grub2/sbat-3-0001-font-Reject-glyphs-exceeds-font-max_glyph_width-or-f.patch b/SPECS/grub2/sbat-3-0001-font-Reject-glyphs-exceeds-font-max_glyph_width-or-f.patch new file mode 100644 index 00000000000..477a7b1b226 --- /dev/null +++ b/SPECS/grub2/sbat-3-0001-font-Reject-glyphs-exceeds-font-max_glyph_width-or-f.patch @@ -0,0 +1,33 @@ +From f6b6236077f059e64ee315f2d7acb8fa4eda87c5 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Wed, 3 Aug 2022 19:45:33 +0800 +Subject: [PATCH 01/13] font: Reject glyphs exceeds font->max_glyph_width or + font->max_glyph_height + +Check glyph's width and height against limits specified in font's +metadata. Reject the glyph (and font) if such limits are exceeded. + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index 42189c325..756ca0abf 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -760,7 +760,9 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code) + || read_be_uint16 (font->file, &height) != 0 + || read_be_int16 (font->file, &xoff) != 0 + || read_be_int16 (font->file, &yoff) != 0 +- || read_be_int16 (font->file, &dwidth) != 0) ++ || read_be_int16 (font->file, &dwidth) != 0 ++ || width > font->max_char_width ++ || height > font->max_char_height) + { + remove_font (font); + return 0; +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0004-font-Remove-grub_font_dup_glyph.patch b/SPECS/grub2/sbat-3-0004-font-Remove-grub_font_dup_glyph.patch new file mode 100644 index 00000000000..dc68acedc4d --- /dev/null +++ b/SPECS/grub2/sbat-3-0004-font-Remove-grub_font_dup_glyph.patch @@ -0,0 +1,42 @@ +From c51292274ded3259eb04c2f1c8d253ffbdb5216a Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Fri, 5 Aug 2022 02:13:29 +0800 +Subject: [PATCH 04/13] font: Remove grub_font_dup_glyph() + +Remove grub_font_dup_glyph() since nobody is using it since 2013, and +I'm too lazy to fix the integer overflow problem in it. + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 14 -------------- + 1 file changed, 14 deletions(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index e6548892f..a8576ffec 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -1055,20 +1055,6 @@ grub_font_get_glyph_with_fallback (grub_font_t font, grub_uint32_t code) + return best_glyph; + } + +-#if 0 +-static struct grub_font_glyph * +-grub_font_dup_glyph (struct grub_font_glyph *glyph) +-{ +- static struct grub_font_glyph *ret; +- ret = grub_malloc (sizeof (*ret) + (glyph->width * glyph->height + 7) / 8); +- if (!ret) +- return NULL; +- grub_memcpy (ret, glyph, sizeof (*ret) +- + (glyph->width * glyph->height + 7) / 8); +- return ret; +-} +-#endif +- + /* FIXME: suboptimal. */ + static void + grub_font_blit_glyph (struct grub_font_glyph *target, +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0005-font-Fix-integer-overflow-in-ensure_comb_space.patch b/SPECS/grub2/sbat-3-0005-font-Fix-integer-overflow-in-ensure_comb_space.patch new file mode 100644 index 00000000000..2ba00bb5988 --- /dev/null +++ b/SPECS/grub2/sbat-3-0005-font-Fix-integer-overflow-in-ensure_comb_space.patch @@ -0,0 +1,48 @@ +From 23843fe8947e4da955a05ad3d1858725bfcb56c8 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Fri, 5 Aug 2022 02:27:05 +0800 +Subject: [PATCH 05/13] font: Fix integer overflow in ensure_comb_space() + +In fact it can't overflow at all because glyph_id->ncomb is only 8-bit +wide. But let's keep safe if somebody changes the width of glyph_id->ncomb +in the future. This patch also fixes the inconsistency between +render_max_comb_glyphs and render_combining_glyphs when grub_malloc() +returns NULL. + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index a8576ffec..9e3e0a94e 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -1468,14 +1468,18 @@ ensure_comb_space (const struct grub_unicode_glyph *glyph_id) + if (glyph_id->ncomb <= render_max_comb_glyphs) + return; + +- render_max_comb_glyphs = 2 * glyph_id->ncomb; +- if (render_max_comb_glyphs < 8) ++ if (grub_mul (glyph_id->ncomb, 2, &render_max_comb_glyphs)) ++ render_max_comb_glyphs = 0; ++ if (render_max_comb_glyphs > 0 && render_max_comb_glyphs < 8) + render_max_comb_glyphs = 8; + grub_free (render_combining_glyphs); +- render_combining_glyphs = grub_malloc (render_max_comb_glyphs +- * sizeof (render_combining_glyphs[0])); ++ render_combining_glyphs = (render_max_comb_glyphs > 0) ? ++ grub_calloc (render_max_comb_glyphs, sizeof (render_combining_glyphs[0])) : NULL; + if (!render_combining_glyphs) +- grub_errno = 0; ++ { ++ render_max_comb_glyphs = 0; ++ grub_errno = GRUB_ERR_NONE; ++ } + } + + int +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0006-font-Fix-integer-overflow-in-BMP-index.patch b/SPECS/grub2/sbat-3-0006-font-Fix-integer-overflow-in-BMP-index.patch new file mode 100644 index 00000000000..07011ad2d21 --- /dev/null +++ b/SPECS/grub2/sbat-3-0006-font-Fix-integer-overflow-in-BMP-index.patch @@ -0,0 +1,65 @@ +From b9396daf1c2e3cdc0a1e69b056852e0769fb24de Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Mon, 15 Aug 2022 02:04:58 +0800 +Subject: [PATCH 06/13] font: Fix integer overflow in BMP index + +The BMP index (font->bmp_idx) is designed as a reverse lookup table of +char entries (font->char_index), in order to speed up lookups for BMP +chars (i.e. code < 0x10000). The values in BMP index are the subscripts +of the corresponding char entries, stored in grub_uint16_t, while 0xffff +means not found. + +This patch fixes the problem of large subscript truncated to grub_uint16_t, +leading BMP index to return wrong char entry or report false miss. The +code now checks for bounds and uses BMP index as a hint, and fallbacks +to binary-search if necessary. + +On the occasion add a comment about BMP index is initialized to 0xffff. + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index 9e3e0a94e..e4cb0d867 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -300,6 +300,8 @@ load_font_index (grub_file_t file, grub_uint32_t sect_length, struct + font->bmp_idx = grub_malloc (0x10000 * sizeof (grub_uint16_t)); + if (!font->bmp_idx) + return 1; ++ ++ /* Init the BMP index array to 0xffff. */ + grub_memset (font->bmp_idx, 0xff, 0x10000 * sizeof (grub_uint16_t)); + + +@@ -328,7 +330,7 @@ load_font_index (grub_file_t file, grub_uint32_t sect_length, struct + return 1; + } + +- if (entry->code < 0x10000) ++ if (entry->code < 0x10000 && i < 0xffff) + font->bmp_idx[entry->code] = i; + + last_code = entry->code; +@@ -696,9 +698,12 @@ find_glyph (const grub_font_t font, grub_uint32_t code) + /* Use BMP index if possible. */ + if (code < 0x10000 && font->bmp_idx) + { +- if (font->bmp_idx[code] == 0xffff) +- return 0; +- return &table[font->bmp_idx[code]]; ++ if (font->bmp_idx[code] < 0xffff) ++ return &table[font->bmp_idx[code]]; ++ /* ++ * When we are here then lookup in BMP index result in miss, ++ * fallthough to binary-search. ++ */ + } + + /* Do a binary search in `char_index', which is ordered by code point. */ +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0007-font-Fix-integer-underflow-in-binary-search-of-char-.patch b/SPECS/grub2/sbat-3-0007-font-Fix-integer-underflow-in-binary-search-of-char-.patch new file mode 100644 index 00000000000..8b5da8908ec --- /dev/null +++ b/SPECS/grub2/sbat-3-0007-font-Fix-integer-underflow-in-binary-search-of-char-.patch @@ -0,0 +1,86 @@ +From 1d2015598cc7a9fca4b39186273e3519a88e80c7 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Sun, 14 Aug 2022 18:09:38 +0800 +Subject: [PATCH 07/13] font: Fix integer underflow in binary search of char + index + +If search target is less than all entries in font->index then "hi" +variable is set to -1, which translates to SIZE_MAX and leads to errors. + +This patch fixes the problem by replacing the entire binary search code +with the libstdc++'s std::lower_bound() implementation. + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 40 ++++++++++++++++++++++------------------ + 1 file changed, 22 insertions(+), 18 deletions(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index e4cb0d867..abd412a5e 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -688,12 +688,12 @@ read_be_int16 (grub_file_t file, grub_int16_t * value) + static inline struct char_index_entry * + find_glyph (const grub_font_t font, grub_uint32_t code) + { +- struct char_index_entry *table; +- grub_size_t lo; +- grub_size_t hi; +- grub_size_t mid; ++ struct char_index_entry *table, *first, *end; ++ grub_size_t len; + + table = font->char_index; ++ if (table == NULL) ++ return NULL; + + /* Use BMP index if possible. */ + if (code < 0x10000 && font->bmp_idx) +@@ -706,25 +706,29 @@ find_glyph (const grub_font_t font, grub_uint32_t code) + */ + } + +- /* Do a binary search in `char_index', which is ordered by code point. */ +- lo = 0; +- hi = font->num_chars - 1; +- +- if (!table) +- return 0; ++ /* ++ * Do a binary search in char_index which is ordered by code point. ++ * The code below is the same as libstdc++'s std::lower_bound(). ++ */ ++ first = table; ++ len = font->num_chars; ++ end = first + len; + +- while (lo <= hi) ++ while (len > 0) + { +- mid = lo + (hi - lo) / 2; +- if (code < table[mid].code) +- hi = mid - 1; +- else if (code > table[mid].code) +- lo = mid + 1; ++ grub_size_t half = len >> 1; ++ struct char_index_entry *middle = first + half; ++ ++ if (middle->code < code) ++ { ++ first = middle + 1; ++ len = len - half - 1; ++ } + else +- return &table[mid]; ++ len = half; + } + +- return 0; ++ return (first < end && first->code == code) ? first : NULL; + } + + /* Get a glyph for the Unicode character CODE in FONT. The glyph is loaded +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0008-kern-efi-sb-Enforce-verification-of-font-files.patch b/SPECS/grub2/sbat-3-0008-kern-efi-sb-Enforce-verification-of-font-files.patch new file mode 100644 index 00000000000..784f6c36505 --- /dev/null +++ b/SPECS/grub2/sbat-3-0008-kern-efi-sb-Enforce-verification-of-font-files.patch @@ -0,0 +1,54 @@ +From 93a786a00163e50c29f0394df198518617e1c9a5 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Sun, 14 Aug 2022 15:51:54 +0800 +Subject: [PATCH 08/13] kern/efi/sb: Enforce verification of font files + +As a mitigation and hardening measure enforce verification of font +files. Then only trusted font files can be load. This will reduce the +attack surface at cost of losing the ability of end-users to customize +fonts if e.g. UEFI Secure Boot is enabled. Vendors can always customize +fonts because they have ability to pack fonts into their GRUB bundles. + +This goal is achieved by: + + * Removing GRUB_FILE_TYPE_FONT from shim lock verifier's + skip-verification list. + + * Adding GRUB_FILE_TYPE_FONT to lockdown verifier's defer-auth list, + so font files must be verified by a verifier before they can be loaded. + +Suggested-by: Daniel Kiper +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/kern/efi/sb.c | 1 - + grub-core/kern/lockdown.c | 1 + + 2 files changed, 1 insertion(+), 1 deletion(-) + +diff --git a/grub-core/kern/efi/sb.c b/grub-core/kern/efi/sb.c +index 89c4bb3fd..db42c2539 100644 +--- a/grub-core/kern/efi/sb.c ++++ b/grub-core/kern/efi/sb.c +@@ -145,7 +145,6 @@ shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)), + case GRUB_FILE_TYPE_PRINT_BLOCKLIST: + case GRUB_FILE_TYPE_TESTLOAD: + case GRUB_FILE_TYPE_GET_SIZE: +- case GRUB_FILE_TYPE_FONT: + case GRUB_FILE_TYPE_ZFS_ENCRYPTION_KEY: + case GRUB_FILE_TYPE_CAT: + case GRUB_FILE_TYPE_HEXCAT: +diff --git a/grub-core/kern/lockdown.c b/grub-core/kern/lockdown.c +index 0bc70fd42..af6d493cd 100644 +--- a/grub-core/kern/lockdown.c ++++ b/grub-core/kern/lockdown.c +@@ -51,6 +51,7 @@ lockdown_verifier_init (grub_file_t io __attribute__ ((unused)), + case GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE: + case GRUB_FILE_TYPE_ACPI_TABLE: + case GRUB_FILE_TYPE_DEVICE_TREE_IMAGE: ++ case GRUB_FILE_TYPE_FONT: + *flags = GRUB_VERIFY_FLAGS_DEFER_AUTH; + + /* Fall through. */ +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0009-fbutil-Fix-integer-overflow.patch b/SPECS/grub2/sbat-3-0009-fbutil-Fix-integer-overflow.patch new file mode 100644 index 00000000000..86ebb790d0d --- /dev/null +++ b/SPECS/grub2/sbat-3-0009-fbutil-Fix-integer-overflow.patch @@ -0,0 +1,85 @@ +From 1eac01c147b4d85d2ec4a7e5671fa4345f2e8549 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Tue, 6 Sep 2022 03:03:21 +0800 +Subject: [PATCH 09/13] fbutil: Fix integer overflow + +Expressions like u64 = u32 * u32 are unsafe because their products are +truncated to u32 even if left hand side is u64. This patch fixes all +problems like that one in fbutil. + +To get right result not only left hand side have to be u64 but it's also +necessary to cast at least one of the operands of all leaf operators of +right hand side to u64, e.g. u64 = u32 * u32 + u32 * u32 should be +u64 = (u64)u32 * u32 + (u64)u32 * u32. + +For 1-bit bitmaps grub_uint64_t have to be used. It's safe because any +combination of values in (grub_uint64_t)u32 * u32 + u32 expression will +not overflow grub_uint64_t. + +Other expressions like ptr + u32 * u32 + u32 * u32 are also vulnerable. +They should be ptr + (grub_addr_t)u32 * u32 + (grub_addr_t)u32 * u32. + +This patch also adds a comment to grub_video_fb_get_video_ptr() which +says it's arguments must be valid and no sanity check is performed +(like its siblings in grub-core/video/fb/fbutil.c). + +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/video/fb/fbutil.c | 4 ++-- + include/grub/fbutil.h | 13 +++++++++---- + 2 files changed, 11 insertions(+), 6 deletions(-) + +diff --git a/grub-core/video/fb/fbutil.c b/grub-core/video/fb/fbutil.c +index b98bb51fe..25ef39f47 100644 +--- a/grub-core/video/fb/fbutil.c ++++ b/grub-core/video/fb/fbutil.c +@@ -67,7 +67,7 @@ get_pixel (struct grub_video_fbblit_info *source, + case 1: + if (source->mode_info->blit_format == GRUB_VIDEO_BLIT_FORMAT_1BIT_PACKED) + { +- int bit_index = y * source->mode_info->width + x; ++ grub_uint64_t bit_index = (grub_uint64_t) y * source->mode_info->width + x; + grub_uint8_t *ptr = source->data + bit_index / 8; + int bit_pos = 7 - bit_index % 8; + color = (*ptr >> bit_pos) & 0x01; +@@ -138,7 +138,7 @@ set_pixel (struct grub_video_fbblit_info *source, + case 1: + if (source->mode_info->blit_format == GRUB_VIDEO_BLIT_FORMAT_1BIT_PACKED) + { +- int bit_index = y * source->mode_info->width + x; ++ grub_uint64_t bit_index = (grub_uint64_t) y * source->mode_info->width + x; + grub_uint8_t *ptr = source->data + bit_index / 8; + int bit_pos = 7 - bit_index % 8; + *ptr = (*ptr & ~(1 << bit_pos)) | ((color & 0x01) << bit_pos); +diff --git a/include/grub/fbutil.h b/include/grub/fbutil.h +index 4205eb917..78a1ab3b4 100644 +--- a/include/grub/fbutil.h ++++ b/include/grub/fbutil.h +@@ -31,14 +31,19 @@ struct grub_video_fbblit_info + grub_uint8_t *data; + }; + +-/* Don't use for 1-bit bitmaps, addressing needs to be done at the bit level +- and it doesn't make sense, in general, to ask for a pointer +- to a particular pixel's data. */ ++/* ++ * Don't use for 1-bit bitmaps, addressing needs to be done at the bit level ++ * and it doesn't make sense, in general, to ask for a pointer ++ * to a particular pixel's data. ++ * ++ * This function assumes that bounds checking has been done in previous phase ++ * and they are opted out in here. ++ */ + static inline void * + grub_video_fb_get_video_ptr (struct grub_video_fbblit_info *source, + unsigned int x, unsigned int y) + { +- return source->data + y * source->mode_info->pitch + x * source->mode_info->bytes_per_pixel; ++ return source->data + (grub_addr_t) y * source->mode_info->pitch + (grub_addr_t) x * source->mode_info->bytes_per_pixel; + } + + /* Advance pointer by VAL bytes. If there is no unaligned access available, +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0011-font-Harden-grub_font_blit_glyph-and-grub_font_blit_.patch b/SPECS/grub2/sbat-3-0011-font-Harden-grub_font_blit_glyph-and-grub_font_blit_.patch new file mode 100644 index 00000000000..df290027128 --- /dev/null +++ b/SPECS/grub2/sbat-3-0011-font-Harden-grub_font_blit_glyph-and-grub_font_blit_.patch @@ -0,0 +1,75 @@ +From 9d81f71c6b8f55cf20cd56f5fe29c759df9b48cc Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Mon, 24 Oct 2022 07:15:41 +0800 +Subject: [PATCH 11/13] font: Harden grub_font_blit_glyph() and + grub_font_blit_glyph_mirror() + +As a mitigation and hardening measure add sanity checks to +grub_font_blit_glyph() and grub_font_blit_glyph_mirror(). This patch +makes these two functions do nothing if target blitting area isn't fully +contained in target bitmap. Therefore, if complex calculations in caller +overflows and malicious coordinates are given, we are still safe because +any coordinates which result in out-of-bound-write are rejected. However, +this patch only checks for invalid coordinates, and doesn't provide any +protection against invalid source glyph or destination glyph, e.g. +mismatch between glyph size and buffer size. + +This hardening measure is designed to mitigate possible overflows in +blit_comb(). If overflow occurs, it may return invalid bounding box +during dry run and call grub_font_blit_glyph() with malicious +coordinates during actual blitting. However, we are still safe because +the scratch glyph itself is valid, although its size makes no sense, and +any invalid coordinates are rejected. + +It would be better to call grub_fatal() if illegal parameter is detected. +However, doing this may end up in a dangerous recursion because grub_fatal() +would print messages to the screen and we are in the progress of drawing +characters on the screen. + +Reported-by: Daniel Axtens +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index 3d3d803e8..cf15dc2f9 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -1069,8 +1069,15 @@ static void + grub_font_blit_glyph (struct grub_font_glyph *target, + struct grub_font_glyph *src, unsigned dx, unsigned dy) + { ++ grub_uint16_t max_x, max_y; + unsigned src_bit, tgt_bit, src_byte, tgt_byte; + unsigned i, j; ++ ++ /* Harden against out-of-bound writes. */ ++ if ((grub_add (dx, src->width, &max_x) || max_x > target->width) || ++ (grub_add (dy, src->height, &max_y) || max_y > target->height)) ++ return; ++ + for (i = 0; i < src->height; i++) + { + src_bit = (src->width * i) % 8; +@@ -1102,9 +1109,16 @@ grub_font_blit_glyph_mirror (struct grub_font_glyph *target, + struct grub_font_glyph *src, + unsigned dx, unsigned dy) + { ++ grub_uint16_t max_x, max_y; + unsigned tgt_bit, src_byte, tgt_byte; + signed src_bit; + unsigned i, j; ++ ++ /* Harden against out-of-bound writes. */ ++ if ((grub_add (dx, src->width, &max_x) || max_x > target->width) || ++ (grub_add (dy, src->height, &max_y) || max_y > target->height)) ++ return; ++ + for (i = 0; i < src->height; i++) + { + src_bit = (src->width * i + src->width - 1) % 8; +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0012-font-Assign-null_font-to-glyphs-in-ascii_font_glyph.patch b/SPECS/grub2/sbat-3-0012-font-Assign-null_font-to-glyphs-in-ascii_font_glyph.patch new file mode 100644 index 00000000000..ac7c5f82cb0 --- /dev/null +++ b/SPECS/grub2/sbat-3-0012-font-Assign-null_font-to-glyphs-in-ascii_font_glyph.patch @@ -0,0 +1,36 @@ +From 22b77b87e10a3a6c9bb9885415bc9a9c678378e6 Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Fri, 28 Oct 2022 17:29:16 +0800 +Subject: [PATCH 12/13] font: Assign null_font to glyphs in ascii_font_glyph[] + +The calculations in blit_comb() need information from glyph's font, e.g. +grub_font_get_xheight(main_glyph->font). However, main_glyph->font is +NULL if main_glyph comes from ascii_font_glyph[]. Therefore +grub_font_get_*() crashes because of NULL pointer. + +There is already a solution, the null_font. So, assign it to those glyphs +in ascii_font_glyph[]. + +Reported-by: Daniel Axtens +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/font/font.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/grub-core/font/font.c b/grub-core/font/font.c +index cf15dc2f9..3821937e6 100644 +--- a/grub-core/font/font.c ++++ b/grub-core/font/font.c +@@ -137,7 +137,7 @@ ascii_glyph_lookup (grub_uint32_t code) + ascii_font_glyph[current]->offset_x = 0; + ascii_font_glyph[current]->offset_y = -2; + ascii_font_glyph[current]->device_width = 8; +- ascii_font_glyph[current]->font = NULL; ++ ascii_font_glyph[current]->font = &null_font; + + grub_memcpy (ascii_font_glyph[current]->bitmap, + &ascii_bitmaps[current * ASCII_BITMAP_SIZE], +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-3-0013-normal-charset-Fix-an-integer-overflow-in-grub_unico.patch b/SPECS/grub2/sbat-3-0013-normal-charset-Fix-an-integer-overflow-in-grub_unico.patch new file mode 100644 index 00000000000..f871b1eb372 --- /dev/null +++ b/SPECS/grub2/sbat-3-0013-normal-charset-Fix-an-integer-overflow-in-grub_unico.patch @@ -0,0 +1,55 @@ +From 1514678888595ef41a968a0c69b7ff769edd1e9c Mon Sep 17 00:00:00 2001 +From: Zhang Boyang +Date: Fri, 28 Oct 2022 21:31:39 +0800 +Subject: [PATCH 13/13] normal/charset: Fix an integer overflow in + grub_unicode_aglomerate_comb() + +The out->ncomb is a bit-field of 8 bits. So, the max possible value is 255. +However, code in grub_unicode_aglomerate_comb() doesn't check for an +overflow when incrementing out->ncomb. If out->ncomb is already 255, +after incrementing it will get 0 instead of 256, and cause illegal +memory access in subsequent processing. + +This patch introduces GRUB_UNICODE_NCOMB_MAX to represent the max +acceptable value of ncomb. The code now checks for this limit and +ignores additional combining characters when limit is reached. + +Reported-by: Daniel Axtens +Signed-off-by: Zhang Boyang +Reviewed-by: Daniel Kiper +--- + grub-core/normal/charset.c | 3 +++ + include/grub/unicode.h | 2 ++ + 2 files changed, 5 insertions(+) + +diff --git a/grub-core/normal/charset.c b/grub-core/normal/charset.c +index 000e687bd..4f6647116 100644 +--- a/grub-core/normal/charset.c ++++ b/grub-core/normal/charset.c +@@ -472,6 +472,9 @@ grub_unicode_aglomerate_comb (const grub_uint32_t *in, grub_size_t inlen, + if (!haveout) + continue; + ++ if (out->ncomb == GRUB_UNICODE_NCOMB_MAX) ++ continue; ++ + if (comb_type == GRUB_UNICODE_COMB_MC + || comb_type == GRUB_UNICODE_COMB_ME + || comb_type == GRUB_UNICODE_COMB_MN) +diff --git a/include/grub/unicode.h b/include/grub/unicode.h +index 71a4d1a54..9360b0b97 100644 +--- a/include/grub/unicode.h ++++ b/include/grub/unicode.h +@@ -147,7 +147,9 @@ struct grub_unicode_glyph + grub_uint8_t bidi_level:6; /* minimum: 6 */ + enum grub_bidi_type bidi_type:5; /* minimum: :5 */ + ++#define GRUB_UNICODE_NCOMB_MAX ((1 << 8) - 1) + unsigned ncomb:8; ++ + /* Hint by unicode subsystem how wide this character usually is. + Real width is determined by font. Set only in UTF-8 stream. */ + int estimated_width:8; +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0001-fs-ntfs-Fix-an-OOB-write-when-parsing-the-ATTRIBUTE_.patch b/SPECS/grub2/sbat-4-0001-fs-ntfs-Fix-an-OOB-write-when-parsing-the-ATTRIBUTE_.patch new file mode 100644 index 00000000000..f0b92488c61 --- /dev/null +++ b/SPECS/grub2/sbat-4-0001-fs-ntfs-Fix-an-OOB-write-when-parsing-the-ATTRIBUTE_.patch @@ -0,0 +1,93 @@ +From 43651027d24e62a7a463254165e1e46e42aecdea Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:31:57 +0300 +Subject: [PATCH 1/6] fs/ntfs: Fix an OOB write when parsing the + $ATTRIBUTE_LIST attribute for the $MFT file + +When parsing an extremely fragmented $MFT file, i.e., the file described +using the $ATTRIBUTE_LIST attribute, current NTFS code will reuse a buffer +containing bytes read from the underlying drive to store sector numbers, +which are consumed later to read data from these sectors into another buffer. + +These sectors numbers, two 32-bit integers, are always stored at predefined +offsets, 0x10 and 0x14, relative to first byte of the selected entry within +the $ATTRIBUTE_LIST attribute. Usually, this won't cause any problem. + +However, when parsing a specially-crafted file system image, this may cause +the NTFS code to write these integers beyond the buffer boundary, likely +causing the GRUB memory allocator to misbehave or fail. These integers contain +values which are controlled by on-disk structures of the NTFS file system. + +Such modification and resulting misbehavior may touch a memory range not +assigned to the GRUB and owned by firmware or another EFI application/driver. + +This fix introduces checks to ensure that these sector numbers are never +written beyond the boundary. + +Fixes: CVE-2023-4692 + +Reported-by: Maxim Suhanov +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 18 +++++++++++++++++- + 1 file changed, 17 insertions(+), 1 deletion(-) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index bbdbe24ad..c3c4db117 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -184,7 +184,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + } + if (at->attr_end) + { +- grub_uint8_t *pa; ++ grub_uint8_t *pa, *pa_end; + + at->emft_buf = grub_malloc (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR); + if (at->emft_buf == NULL) +@@ -209,11 +209,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + } + at->attr_nxt = at->edat_buf; + at->attr_end = at->edat_buf + u32at (pa, 0x30); ++ pa_end = at->edat_buf + n; + } + else + { + at->attr_nxt = at->attr_end + u16at (pa, 0x14); + at->attr_end = at->attr_end + u32at (pa, 4); ++ pa_end = at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR); + } + at->flags |= GRUB_NTFS_AF_ALST; + while (at->attr_nxt < at->attr_end) +@@ -230,6 +232,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + at->flags |= GRUB_NTFS_AF_GPOS; + at->attr_cur = at->attr_nxt; + pa = at->attr_cur; ++ ++ if ((pa >= pa_end) || (pa_end - pa < 0x18)) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list"); ++ return NULL; ++ } ++ + grub_set_unaligned32 ((char *) pa + 0x10, + grub_cpu_to_le32 (at->mft->data->mft_start)); + grub_set_unaligned32 ((char *) pa + 0x14, +@@ -240,6 +249,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + { + if (*pa != attr) + break; ++ ++ if ((pa >= pa_end) || (pa_end - pa < 0x18)) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list"); ++ return NULL; ++ } ++ + if (read_attr + (at, pa + 0x10, + u32at (pa, 0x10) * (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR), +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0002-fs-ntfs-Fix-an-OOB-read-when-reading-data-from-the-r.patch b/SPECS/grub2/sbat-4-0002-fs-ntfs-Fix-an-OOB-read-when-reading-data-from-the-r.patch new file mode 100644 index 00000000000..beae88ee410 --- /dev/null +++ b/SPECS/grub2/sbat-4-0002-fs-ntfs-Fix-an-OOB-read-when-reading-data-from-the-r.patch @@ -0,0 +1,58 @@ +From 0ed2458cc4eff6d9a9199527e2a0b6d445802f94 Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:32:33 +0300 +Subject: [PATCH 2/6] fs/ntfs: Fix an OOB read when reading data from the + resident $DATA attribute + +When reading a file containing resident data, i.e., the file data is stored in +the $DATA attribute within the NTFS file record, not in external clusters, +there are no checks that this resident data actually fits the corresponding +file record segment. + +When parsing a specially-crafted file system image, the current NTFS code will +read the file data from an arbitrary, attacker-chosen memory offset and of +arbitrary, attacker-chosen length. + +This allows an attacker to display arbitrary chunks of memory, which could +contain sensitive information like password hashes or even plain-text, +obfuscated passwords from BS EFI variables. + +This fix implements a check to ensure that resident data is read from the +corresponding file record segment only. + +Fixes: CVE-2023-4693 + +Reported-by: Maxim Suhanov +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index c3c4db117..a68e173d8 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -401,7 +401,18 @@ read_data (struct grub_ntfs_attr *at, grub_uint8_t *pa, grub_uint8_t *dest, + { + if (ofs + len > u32at (pa, 0x10)) + return grub_error (GRUB_ERR_BAD_FS, "read out of range"); +- grub_memcpy (dest, pa + u32at (pa, 0x14) + ofs, len); ++ ++ if (u32at (pa, 0x10) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute too large"); ++ ++ if (pa >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range"); ++ ++ if (u16at (pa, 0x14) + u32at (pa, 0x10) > ++ (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) pa) ++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range"); ++ ++ grub_memcpy (dest, pa + u16at (pa, 0x14) + ofs, len); + return 0; + } + +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0003-fs-ntfs-Fix-an-OOB-read-when-parsing-directory-entri.patch b/SPECS/grub2/sbat-4-0003-fs-ntfs-Fix-an-OOB-read-when-parsing-directory-entri.patch new file mode 100644 index 00000000000..6785d1f0b35 --- /dev/null +++ b/SPECS/grub2/sbat-4-0003-fs-ntfs-Fix-an-OOB-read-when-parsing-directory-entri.patch @@ -0,0 +1,73 @@ +From 7e5f031a6a6a3decc2360a7b0c71abbe598e7354 Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:33:17 +0300 +Subject: [PATCH 3/6] fs/ntfs: Fix an OOB read when parsing directory entries + from resident and non-resident index attributes + +This fix introduces checks to ensure that index entries are never read +beyond the corresponding directory index. + +The lack of this check is a minor issue, likely not exploitable in any way. + +Reported-by: Maxim Suhanov +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index a68e173d8..2d78b96e1 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -599,7 +599,7 @@ get_utf8 (grub_uint8_t *in, grub_size_t len) + } + + static int +-list_file (struct grub_ntfs_file *diro, grub_uint8_t *pos, ++list_file (struct grub_ntfs_file *diro, grub_uint8_t *pos, grub_uint8_t *end_pos, + grub_fshelp_iterate_dir_hook_t hook, void *hook_data) + { + grub_uint8_t *np; +@@ -610,6 +610,9 @@ list_file (struct grub_ntfs_file *diro, grub_uint8_t *pos, + grub_uint8_t namespace; + char *ustr; + ++ if ((pos >= end_pos) || (end_pos - pos < 0x52)) ++ break; ++ + if (pos[0xC] & 2) /* end signature */ + break; + +@@ -617,6 +620,9 @@ list_file (struct grub_ntfs_file *diro, grub_uint8_t *pos, + ns = *(np++); + namespace = *(np++); + ++ if (2 * ns > end_pos - pos - 0x52) ++ break; ++ + /* + * Ignore files in DOS namespace, as they will reappear as Win32 + * names. +@@ -806,7 +812,9 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + } + + cur_pos += 0x10; /* Skip index root */ +- ret = list_file (mft, cur_pos + u16at (cur_pos, 0), hook, hook_data); ++ ret = list_file (mft, cur_pos + u16at (cur_pos, 0), ++ at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR), ++ hook, hook_data); + if (ret) + goto done; + +@@ -893,6 +901,7 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + (const grub_uint8_t *) "INDX"))) + goto done; + ret = list_file (mft, &indx[0x18 + u16at (indx, 0x18)], ++ indx + (mft->data->idx_size << GRUB_NTFS_BLK_SHR), + hook, hook_data); + if (ret) + goto done; +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0004-fs-ntfs-Fix-an-OOB-read-when-parsing-bitmaps-for-ind.patch b/SPECS/grub2/sbat-4-0004-fs-ntfs-Fix-an-OOB-read-when-parsing-bitmaps-for-ind.patch new file mode 100644 index 00000000000..65718a6f2b8 --- /dev/null +++ b/SPECS/grub2/sbat-4-0004-fs-ntfs-Fix-an-OOB-read-when-parsing-bitmaps-for-ind.patch @@ -0,0 +1,51 @@ +From 7a5a116739fa6d8a625da7d6b9272c9a2462f967 Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:33:44 +0300 +Subject: [PATCH 4/6] fs/ntfs: Fix an OOB read when parsing bitmaps for index + attributes + +This fix introduces checks to ensure that bitmaps for directory indices +are never read beyond their actual sizes. + +The lack of this check is a minor issue, likely not exploitable in any way. + +Reported-by: Maxim Suhanov +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index 2d78b96e1..bb70c89fb 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -843,6 +843,25 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + + if (is_resident) + { ++ if (bitmap_len > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "resident bitmap too large"); ++ goto done; ++ } ++ ++ if (cur_pos >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "resident bitmap out of range"); ++ goto done; ++ } ++ ++ if (u16at (cur_pos, 0x14) + u32at (cur_pos, 0x10) > ++ (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) cur_pos) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "resident bitmap out of range"); ++ goto done; ++ } ++ + grub_memcpy (bmp, cur_pos + u16at (cur_pos, 0x14), + bitmap_len); + } +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0005-fs-ntfs-Fix-an-OOB-read-when-parsing-a-volume-label.patch b/SPECS/grub2/sbat-4-0005-fs-ntfs-Fix-an-OOB-read-when-parsing-a-volume-label.patch new file mode 100644 index 00000000000..5e40dac8764 --- /dev/null +++ b/SPECS/grub2/sbat-4-0005-fs-ntfs-Fix-an-OOB-read-when-parsing-a-volume-label.patch @@ -0,0 +1,61 @@ +From 1fe82c41e070385e273d7bb1cfb482627a3c28e8 Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:38:19 +0300 +Subject: [PATCH 5/6] fs/ntfs: Fix an OOB read when parsing a volume label + +This fix introduces checks to ensure that an NTFS volume label is always +read from the corresponding file record segment. + +The current NTFS code allows the volume label string to be read from an +arbitrary, attacker-chosen memory location. However, the bytes read are +always treated as UTF-16LE. So, the final string displayed is mostly +unreadable and it can't be easily converted back to raw bytes. + +The lack of this check is a minor issue, likely not causing a significant +data leak. + +Reported-by: Maxim Suhanov +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 18 +++++++++++++++++- + 1 file changed, 17 insertions(+), 1 deletion(-) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index bb70c89fb..ff5e3740f 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -1213,13 +1213,29 @@ grub_ntfs_label (grub_device_t device, char **label) + + init_attr (&mft->attr, mft); + pa = find_attr (&mft->attr, GRUB_NTFS_AT_VOLUME_NAME); ++ ++ if (pa >= mft->buf + (mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse volume label"); ++ goto fail; ++ } ++ ++ if (mft->buf + (mft->data->mft_size << GRUB_NTFS_BLK_SHR) - pa < 0x16) ++ { ++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse volume label"); ++ goto fail; ++ } ++ + if ((pa) && (pa[8] == 0) && (u32at (pa, 0x10))) + { + int len; + + len = u32at (pa, 0x10) / 2; + pa += u16at (pa, 0x14); +- *label = get_utf8 (pa, len); ++ if (mft->buf + (mft->data->mft_size << GRUB_NTFS_BLK_SHR) - pa >= 2 * len) ++ *label = get_utf8 (pa, len); ++ else ++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse volume label"); + } + + fail: +-- +2.34.1 + diff --git a/SPECS/grub2/sbat-4-0006-fs-ntfs-Make-code-more-readable.patch b/SPECS/grub2/sbat-4-0006-fs-ntfs-Make-code-more-readable.patch new file mode 100644 index 00000000000..c30fa5b6cdd --- /dev/null +++ b/SPECS/grub2/sbat-4-0006-fs-ntfs-Make-code-more-readable.patch @@ -0,0 +1,159 @@ +From e58b870ff926415e23fc386af41ff81b2f588763 Mon Sep 17 00:00:00 2001 +From: Maxim Suhanov +Date: Mon, 28 Aug 2023 16:40:07 +0300 +Subject: [PATCH 6/6] fs/ntfs: Make code more readable + +Move some calls used to access NTFS attribute header fields into +functions with human-readable names. + +Suggested-by: Daniel Kiper +Signed-off-by: Maxim Suhanov +Reviewed-by: Daniel Kiper +--- + grub-core/fs/ntfs.c | 48 +++++++++++++++++++++++++++++++-------------- + 1 file changed, 33 insertions(+), 15 deletions(-) + +diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c +index ff5e3740f..de435aa14 100644 +--- a/grub-core/fs/ntfs.c ++++ b/grub-core/fs/ntfs.c +@@ -52,6 +52,24 @@ u64at (void *ptr, grub_size_t ofs) + return grub_le_to_cpu64 (grub_get_unaligned64 ((char *) ptr + ofs)); + } + ++static grub_uint16_t ++first_attr_off (void *mft_buf_ptr) ++{ ++ return u16at (mft_buf_ptr, 0x14); ++} ++ ++static grub_uint16_t ++res_attr_data_off (void *res_attr_ptr) ++{ ++ return u16at (res_attr_ptr, 0x14); ++} ++ ++static grub_uint32_t ++res_attr_data_len (void *res_attr_ptr) ++{ ++ return u32at (res_attr_ptr, 0x10); ++} ++ + grub_ntfscomp_func_t grub_ntfscomp_func; + + static grub_err_t +@@ -106,7 +124,7 @@ init_attr (struct grub_ntfs_attr *at, struct grub_ntfs_file *mft) + { + at->mft = mft; + at->flags = (mft == &mft->data->mmft) ? GRUB_NTFS_AF_MMFT : 0; +- at->attr_nxt = mft->buf + u16at (mft->buf, 0x14); ++ at->attr_nxt = mft->buf + first_attr_off (mft->buf); + at->attr_end = at->emft_buf = at->edat_buf = at->sbuf = NULL; + } + +@@ -154,7 +172,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + return NULL; + } + +- new_pos = &at->emft_buf[u16at (at->emft_buf, 0x14)]; ++ new_pos = &at->emft_buf[first_attr_off (at->emft_buf)]; + while (*new_pos != 0xFF) + { + if ((*new_pos == *at->attr_cur) +@@ -213,7 +231,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr) + } + else + { +- at->attr_nxt = at->attr_end + u16at (pa, 0x14); ++ at->attr_nxt = at->attr_end + res_attr_data_off (pa); + at->attr_end = at->attr_end + u32at (pa, 4); + pa_end = at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR); + } +@@ -399,20 +417,20 @@ read_data (struct grub_ntfs_attr *at, grub_uint8_t *pa, grub_uint8_t *dest, + + if (pa[8] == 0) + { +- if (ofs + len > u32at (pa, 0x10)) ++ if (ofs + len > res_attr_data_len (pa)) + return grub_error (GRUB_ERR_BAD_FS, "read out of range"); + +- if (u32at (pa, 0x10) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) ++ if (res_attr_data_len (pa) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) + return grub_error (GRUB_ERR_BAD_FS, "resident attribute too large"); + + if (pa >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR)) + return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range"); + +- if (u16at (pa, 0x14) + u32at (pa, 0x10) > ++ if (res_attr_data_off (pa) + res_attr_data_len (pa) > + (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) pa) + return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range"); + +- grub_memcpy (dest, pa + u16at (pa, 0x14) + ofs, len); ++ grub_memcpy (dest, pa + res_attr_data_off (pa) + ofs, len); + return 0; + } + +@@ -556,7 +574,7 @@ init_file (struct grub_ntfs_file *mft, grub_uint64_t mftno) + (unsigned long long) mftno); + + if (!pa[8]) +- mft->size = u32at (pa, 0x10); ++ mft->size = res_attr_data_len (pa); + else + mft->size = u64at (pa, 0x30); + +@@ -805,7 +823,7 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + (u32at (cur_pos, 0x18) != 0x490024) || + (u32at (cur_pos, 0x1C) != 0x300033)) + continue; +- cur_pos += u16at (cur_pos, 0x14); ++ cur_pos += res_attr_data_off (cur_pos); + if (*cur_pos != 0x30) /* Not filename index */ + continue; + break; +@@ -834,7 +852,7 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + { + int is_resident = (cur_pos[8] == 0); + +- bitmap_len = ((is_resident) ? u32at (cur_pos, 0x10) : ++ bitmap_len = ((is_resident) ? res_attr_data_len (cur_pos) : + u32at (cur_pos, 0x28)); + + bmp = grub_malloc (bitmap_len); +@@ -855,14 +873,14 @@ grub_ntfs_iterate_dir (grub_fshelp_node_t dir, + goto done; + } + +- if (u16at (cur_pos, 0x14) + u32at (cur_pos, 0x10) > ++ if (res_attr_data_off (cur_pos) + res_attr_data_len (cur_pos) > + (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) cur_pos) + { + grub_error (GRUB_ERR_BAD_FS, "resident bitmap out of range"); + goto done; + } + +- grub_memcpy (bmp, cur_pos + u16at (cur_pos, 0x14), ++ grub_memcpy (bmp, cur_pos + res_attr_data_off (cur_pos), + bitmap_len); + } + else +@@ -1226,12 +1244,12 @@ grub_ntfs_label (grub_device_t device, char **label) + goto fail; + } + +- if ((pa) && (pa[8] == 0) && (u32at (pa, 0x10))) ++ if ((pa) && (pa[8] == 0) && (res_attr_data_len (pa))) + { + int len; + +- len = u32at (pa, 0x10) / 2; +- pa += u16at (pa, 0x14); ++ len = res_attr_data_len (pa) / 2; ++ pa += res_attr_data_off (pa); + if (mft->buf + (mft->data->mft_size << GRUB_NTFS_BLK_SHR) - pa >= 2 * len) + *label = get_utf8 (pa, len); + else +-- +2.34.1 + diff --git a/SPECS/grub2/sbat.csv.in b/SPECS/grub2/sbat.csv.in index a9979261ba7..1d4129606bc 100644 --- a/SPECS/grub2/sbat.csv.in +++ b/SPECS/grub2/sbat.csv.in @@ -1,3 +1,3 @@ sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md -grub,2,Free Software Foundation,grub,@@VERSION@@,https://www.gnu.org/software/grub/ -grub.mariner,2,Microsoft,grub2,@@VERSION_RELEASE@@,https://github.com/microsoft/CBL-Mariner +grub,4,Free Software Foundation,grub,@@VERSION@@,https://www.gnu.org/software/grub/ +grub.mariner,3,Microsoft,grub2,@@VERSION_RELEASE@@,https://github.com/microsoft/CBL-Mariner diff --git a/SPECS/kata-containers-cc/kata-containers-cc.signatures.json b/SPECS/kata-containers-cc/kata-containers-cc.signatures.json index 19e953e1141..26cb1e1781e 100644 --- a/SPECS/kata-containers-cc/kata-containers-cc.signatures.json +++ b/SPECS/kata-containers-cc/kata-containers-cc.signatures.json @@ -1,7 +1,7 @@ { "Signatures": { "mariner-coco-build-uvm.sh": "4f2be6965d8c4d7919fd201a68160fc8ab02a1be50a336abbfea13f16a6ffb89", - "kata-containers-cc-0.6.3-cargo.tar.gz": "7ff6c5f7f7aa31a99ea5d837876291d886b16c32f21b6d65d044fd398abff1e6", - "kata-containers-cc-0.6.3.tar.gz": "1f366ce70bf83a239a7ec99334506adb28c3199157b4370840c3685378a34268" + "kata-containers-cc-3.2.0.azl0-cargo.tar.gz": "7ff6c5f7f7aa31a99ea5d837876291d886b16c32f21b6d65d044fd398abff1e6", + "kata-containers-cc-3.2.0.azl0.tar.gz": "78f3749c848c77f0d54aa16a4f29209a07f3d4af30664c0d9212300ac364aaec" } } diff --git a/SPECS/kata-containers-cc/kata-containers-cc.spec b/SPECS/kata-containers-cc/kata-containers-cc.spec index 90688a18430..1fb257de6be 100644 --- a/SPECS/kata-containers-cc/kata-containers-cc.spec +++ b/SPECS/kata-containers-cc/kata-containers-cc.spec @@ -1,6 +1,6 @@ %global runtime_make_vars DEFMEMSZ=256 \\\ - DEFSHAREDFS_CLH_SNP_VIRTIOFS=none \\\ DEFSTATICSANDBOXWORKLOADMEM=1792 \\\ + DEFSNPGUEST=true \\\ SKIP_GO_VERSION_CHECK=1 %global agent_make_vars LIBC=gnu \\\ @@ -9,16 +9,15 @@ %global debug_package %{nil} Name: kata-containers-cc -Version: 0.6.3 -Release: 4%{?dist} +Version: 3.2.0.azl0 +Release: 1%{?dist} Summary: Kata Confidential Containers package developed for Confidential Containers on AKS License: ASL 2.0 Vendor: Microsoft Corporation URL: https://github.com/microsoft/kata-containers -Source0: https://github.com/microsoft/kata-containers/archive/refs/tags/cc-%{version}.tar.gz#/%{name}-%{version}.tar.gz -Source1: https://github.com/microsoft/kata-containers/archive/refs/tags/%{name}-%{version}.tar.gz -Source2: %{name}-%{version}-cargo.tar.gz -Source3: mariner-coco-build-uvm.sh +Source0: https://github.com/microsoft/kata-containers/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz +Source1: %{name}-%{version}-cargo.tar.gz +Source2: mariner-coco-build-uvm.sh ExclusiveArch: x86_64 @@ -70,7 +69,7 @@ This package contains the the tooling and files required to build the UVM %prep %autosetup -p1 -n %{name}-%{version} pushd %{_builddir}/%{name}-%{version} -tar -xf %{SOURCE2} +tar -xf %{SOURCE1} popd %build @@ -138,7 +137,7 @@ pushd %{_builddir}/%{name}-%{version} rm tools/osbuilder/.gitignore rm tools/osbuilder/rootfs-builder/.gitignore -install -D -m 0755 %{SOURCE3} %{buildroot}%{osbuilder}/mariner-coco-build-uvm.sh +install -D -m 0755 %{SOURCE2} %{buildroot}%{osbuilder}/mariner-coco-build-uvm.sh install -D -m 0644 VERSION %{buildroot}%{osbuilder}/VERSION install -D -m 0644 ci/install_yq.sh %{buildroot}%{osbuilder}/ci/install_yq.sh install -D -m 0644 versions.yaml %{buildroot}%{osbuilder}/versions.yaml @@ -185,14 +184,27 @@ install -D -m 0755 kata-monitor %{buildroot}%{coco_bin}/kata-monitor install -D -m 0755 kata-runtime %{buildroot}%{coco_bin}/kata-runtime install -D -m 0755 data/kata-collect-data.sh %{buildroot}%{coco_bin}/kata-collect-data.sh -# Note: we deploy two configurations - the additional one is for policy/snapshotter testing w/o SEV SNP or IGVM -install -D -m 0644 config/configuration-clh.toml %{buildroot}/%{defaults_kata}/configuration-clh.toml +# We deploy 3 configurations: +# configuration-clh-snp: production Kata-CC - IGVM & image, confidential_guest=true, sev_snp_guest=true +# configuration-clh-snp-debug: debug Kata-CC - kernel & image, confidential_guest=true, sev_snp_guest=false +# configuration-clh (symlinked to by configuration.toml): vanilla Kata - kernel & initrd, confidential_guest=false, sev_snp_guest=false install -D -m 0644 config/configuration-clh-snp.toml %{buildroot}/%{defaults_kata}/configuration-clh-snp.toml +install -D -m 0644 config/configuration-clh.toml %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +install -D -m 0644 config/configuration-clh.toml %{buildroot}/%{defaults_kata}/configuration-clh.toml -# adapt upstream config files -# change paths with locations specific to our distribution -sed -i 's|/usr|/opt/confidential-containers|g' %{buildroot}/%{defaults_kata}/configuration-clh.toml -sed -i 's|/usr|/opt/confidential-containers|g' %{buildroot}/%{defaults_kata}/configuration-clh-snp.toml +# Adapt configuration files: +# - Change paths with locations specific to our distribution. +sed --follow-symlinks -i 's|/usr|/opt/confidential-containers|g' %{buildroot}/%{defaults_kata}/configuration-clh*.toml +# - Set up configuration-clh-snp-debug. Note that kernel and image are already +# set through configuration-clh.toml.in. +sed -i 's|-igvm.img|-igvm-debug.img|g' %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +sed -i '/^#confidential_guest =/s|^#||g' %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +sed -i '/^#enable_debug =/s|^#||g' %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +sed -i '/^#debug_console_enabled =/s|^#||g' %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +sed -i 's|shared_fs = "virtio-fs"|shared_fs = "none"|g' %{buildroot}/%{defaults_kata}/configuration-clh-snp-debug.toml +# - Set up configuration-clh. +sed -i '/^#initrd =/s|^#||g' %{buildroot}/%{defaults_kata}/configuration-clh.toml +sed -i '/^image =/s|^|#|g' %{buildroot}/%{defaults_kata}/configuration-clh.toml popd # tardev-snapshotter @@ -275,8 +287,11 @@ install -D -m 0755 %{_builddir}/%{name}-%{version}/tools/osbuilder/image-builder %exclude %{osbuilder}/tools/osbuilder/rootfs-builder/ubuntu %changelog -* Fri Feb 02 2024 CBL-Mariner Servicing Account - 0.6.3-4 -- Bump release to rebuild with go 1.21.6 +* Mon Feb 12 2024 Aurelien Bombo - 3.2.0.azl0-1 +- Use Microsoft sources based on upstream Kata version 3.2.0. + +* Fri Feb 02 2024 CBL-Mariner Servicing Account - 0.6.3-4 +- Bump release to rebuild with go 1.21.6 * Tue Jan 30 2024 Archana Choudhary - 0.6.3-3 - Remove kernel-uvm-cvm(-devel) dependency diff --git a/SPECS/kata-containers/0001-Append-systemd-kernel-cmdline-params-for-initrd.patch b/SPECS/kata-containers/0001-Append-systemd-kernel-cmdline-params-for-initrd.patch deleted file mode 100644 index 8744c2c5e96..00000000000 --- a/SPECS/kata-containers/0001-Append-systemd-kernel-cmdline-params-for-initrd.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 0503cd61a56ed09de60981fedecc226df3845860 Mon Sep 17 00:00:00 2001 -From: dallasd1 -Date: Wed, 26 Jul 2023 08:40:44 -0700 -Subject: [PATCH] Append systemd kernel cmdline params for initrd - ---- - src/runtime/pkg/katautils/create.go | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/runtime/pkg/katautils/create.go b/src/runtime/pkg/katautils/create.go -index 67ea03dcf..2c829a691 100644 ---- a/src/runtime/pkg/katautils/create.go -+++ b/src/runtime/pkg/katautils/create.go -@@ -57,7 +57,7 @@ func getKernelParams(needSystemd, trace bool) []vc.Param { - } - - func needSystemd(config vc.HypervisorConfig) bool { -- return config.ImagePath != "" -+ return config.ImagePath != "" || config.InitrdPath != "" - } - - // HandleFactory set the factory --- -2.17.1 - diff --git a/SPECS/kata-containers/0001-Merged-PR-9607-Allow-10-seconds-for-VM-creation-star.patch b/SPECS/kata-containers/0001-Merged-PR-9607-Allow-10-seconds-for-VM-creation-star.patch deleted file mode 100644 index 58c3ef06405..00000000000 --- a/SPECS/kata-containers/0001-Merged-PR-9607-Allow-10-seconds-for-VM-creation-star.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 590604dca0f6a0636933be21fc6a490c0f17af34 Mon Sep 17 00:00:00 2001 -From: Daniel Mihai -Date: Tue, 16 Aug 2022 17:01:12 +0000 -Subject: [PATCH 2/3] Merged PR 9607: Allow 10 seconds for VM creation + start - -Allow 10 seconds for VM creation + start ---- - src/runtime/virtcontainers/clh.go | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go -index 71bd931..444d9de 100644 ---- a/src/runtime/virtcontainers/clh.go -+++ b/src/runtime/virtcontainers/clh.go -@@ -688,7 +688,9 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error { - } - clh.state.PID = pid - -- ctx, cancel := context.WithTimeout(ctx, clh.getClhAPITimeout()*time.Second) -+ // FIXME - for now allow more than one second to create and start the VM. -+ //ctx, cancel := context.WithTimeout(ctx, clh.getClhAPITimeout()*time.Second) -+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - if err := clh.bootVM(ctx); err != nil { --- -2.25.1 - diff --git a/SPECS/kata-containers/0001-osbuilder-Add-support-for-CBL-Mariner.patch b/SPECS/kata-containers/0001-osbuilder-Add-support-for-CBL-Mariner.patch deleted file mode 100644 index d7d8b128c83..00000000000 --- a/SPECS/kata-containers/0001-osbuilder-Add-support-for-CBL-Mariner.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 36198274dcb4332f1acd445d2a80854232b1d236 Mon Sep 17 00:00:00 2001 -From: Dallas Delaney -Date: Thu, 26 Jan 2023 14:58:55 -0800 -Subject: [PATCH] osbuilder: Add support for CBL-Mariner - -Add osbuilder support to build a rootfs and image -based on the CBL-Mariner Linux distro - -Fixes: #6462 - -Signed-off-by: Dallas Delaney ---- - tools/osbuilder/README.md | 14 +++++----- - .../rootfs-builder/cbl-mariner/Dockerfile.in | 15 +++++++++++ - .../rootfs-builder/cbl-mariner/config.sh | 10 +++++++ - .../rootfs-builder/cbl-mariner/rootfs_lib.sh | 26 +++++++++++++++++++ - 4 files changed, 58 insertions(+), 7 deletions(-) - create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in - create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/config.sh - create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh - -diff --git a/tools/osbuilder/README.md b/tools/osbuilder/README.md -index 343d2bf60..9415de74e 100644 ---- a/tools/osbuilder/README.md -+++ b/tools/osbuilder/README.md -@@ -80,7 +80,7 @@ filesystem components to generate an initrd. - 3. When generating an image, the initrd is extracted to obtain the base rootfs for - the image. - --Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu`. -+Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu|cbl-mariner`. - For example `make USE_DOCKER=true DISTRO=alpine rootfs` will make an Alpine rootfs using Docker. - - ### Rootfs creation -@@ -209,9 +209,9 @@ of the the osbuilder distributions. - > Note: this table is not relevant for the dracut build method, since it supports - any Linux distribution and architecture where dracut is available. - --| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu | --|-- |-- |-- |-- |-- | --|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | --|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| --|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| --|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| -+| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu |CBL-Mariner | -+|-- |-- |-- |-- |-- |-- | -+|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | | -+|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| | -+|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| | -+|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| -diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in -new file mode 100644 -index 000000000..6fa29807d ---- /dev/null -+++ b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in -@@ -0,0 +1,15 @@ -+# Copyright (c) 2023 Microsoft Corporation -+# -+# SPDX-License-Identifier: Apache-2.0 -+ -+ARG IMAGE_REGISTRY=mcr.microsoft.com -+FROM ${IMAGE_REGISTRY}/cbl-mariner/base/core:@OS_VERSION@ -+ -+RUN tdnf -y install \ -+ ca-certificates \ -+ build-essential \ -+ dnf \ -+ git \ -+ tar -+ -+@INSTALL_RUST@ -diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh -new file mode 100644 -index 000000000..694124acd ---- /dev/null -+++ b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh -@@ -0,0 +1,10 @@ -+# Copyright (c) 2023 Microsoft Corporation -+# -+# SPDX-License-Identifier: Apache-2.0 -+ -+OS_NAME=cbl-mariner -+OS_VERSION=${OS_VERSION:-2.0} -+LIBC="gnu" -+PACKAGES="core-packages-base-image ca-certificates" -+[ "$AGENT_INIT" = no ] && PACKAGES+=" systemd" -+[ "$SECCOMP" = yes ] && PACKAGES+=" libseccomp" -diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh -new file mode 100644 -index 000000000..0288d4d77 ---- /dev/null -+++ b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh -@@ -0,0 +1,26 @@ -+# Copyright (c) 2023 Microsoft Corporation -+# -+# SPDX-License-Identifier: Apache-2.0 -+ -+build_rootfs() -+{ -+ # Mandatory -+ local ROOTFS_DIR="$1" -+ -+ [ -z "$ROOTFS_DIR" ] && die "need rootfs" -+ -+ # In case of support EXTRA packages, use it to allow -+ # users add more packages to the base rootfs -+ local EXTRA_PKGS=${EXTRA_PKGS:-""} -+ -+ check_root -+ mkdir -p "${ROOTFS_DIR}" -+ PKG_MANAGER="tdnf" -+ -+ DNF="${PKG_MANAGER} -y --installroot=${ROOTFS_DIR} --noplugins --releasever=${OS_VERSION}" -+ -+ info "install packages for rootfs" -+ $DNF install ${EXTRA_PKGS} ${PACKAGES} -+ -+ rm -rf ${ROOTFS_DIR}/usr/share/{bash-completion,cracklib,doc,info,locale,man,misc,pixmaps,terminfo,zoneinfo,zsh} -+} --- -2.33.8 - diff --git a/SPECS/kata-containers/0002-Merged-PR-9671-Wait-for-a-possibly-slow-Guest.patch b/SPECS/kata-containers/0002-Merged-PR-9671-Wait-for-a-possibly-slow-Guest.patch deleted file mode 100644 index aeac808d058..00000000000 --- a/SPECS/kata-containers/0002-Merged-PR-9671-Wait-for-a-possibly-slow-Guest.patch +++ /dev/null @@ -1,29 +0,0 @@ -From ec322fec7e9c132c4caa0a93175320cb0d8fba73 Mon Sep 17 00:00:00 2001 -From: Daniel Mihai -Date: Mon, 22 Aug 2022 22:02:31 +0000 -Subject: [PATCH 3/3] Merged PR 9671: Wait for a possibly slow Guest - -Wait for a possibly slow Guest - -On some Host VMs it takes longer than 30 seconds to connect to -the Agent - e.g., if enable_debug is enabled for [hypervisor.clh]. ---- - src/runtime/config/configuration-clh.toml.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in -index f09c095f..0ce7a98d 100644 ---- a/src/runtime/config/configuration-clh.toml.in -+++ b/src/runtime/config/configuration-clh.toml.in -@@ -289,7 +289,7 @@ block_device_driver = "virtio-blk" - - # Agent connection dialing timeout value in seconds - # (default: 30) --#dial_timeout = 30 -+dial_timeout = 60 - - [runtime] - # If enabled, the runtime will log additional debug messages to the --- -2.17.1 - diff --git a/SPECS/kata-containers/0003-Merged-PR-9805-Add-support-for-MSHV.patch b/SPECS/kata-containers/0003-Merged-PR-9805-Add-support-for-MSHV.patch deleted file mode 100644 index b682c2b8d1f..00000000000 --- a/SPECS/kata-containers/0003-Merged-PR-9805-Add-support-for-MSHV.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 67e4b4ceaefea83a1e5c77a7760fa1f9b37589f4 Mon Sep 17 00:00:00 2001 -From: Daniel Mihai -Date: Thu, 1 Sep 2022 15:07:16 +0000 -Subject: [PATCH 09/10] Merged PR 9805: Add support for MSHV - -Cloud Hypervisor is able to use either /dev/mshv or /dev/kvm. ---- - src/runtime/pkg/resourcecontrol/cgroups.go | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/runtime/pkg/resourcecontrol/cgroups.go b/src/runtime/pkg/resourcecontrol/cgroups.go -index 4210392d..d4608458 100644 ---- a/src/runtime/pkg/resourcecontrol/cgroups.go -+++ b/src/runtime/pkg/resourcecontrol/cgroups.go -@@ -64,7 +64,8 @@ func sandboxDevices() []specs.LinuxDeviceCgroup { - // In order to run Virtual Machines and create virtqueues, hypervisors - // need access to certain character devices in the host, like kvm and vhost-net. - hypervisorDevices := []string{ -- "/dev/kvm", // To run virtual machines -+ "/dev/kvm", // To run virtual machines using KVM -+ "/dev/mshv", // To run virtual machines using MSHV - "/dev/vhost-net", // To create virtqueues - "/dev/vfio/vfio", // To access VFIO devices - "/dev/vhost-vsock", // To interact with vsock if --- -2.17.1 - diff --git a/SPECS/kata-containers/0004-Merged-PR-9806-Fix-enable_debug-for-hypervisor.clh.patch b/SPECS/kata-containers/0004-Merged-PR-9806-Fix-enable_debug-for-hypervisor.clh.patch deleted file mode 100644 index 6843b704163..00000000000 --- a/SPECS/kata-containers/0004-Merged-PR-9806-Fix-enable_debug-for-hypervisor.clh.patch +++ /dev/null @@ -1,28 +0,0 @@ -From c844e8011f0726e2a371115c209d4c3d63273b3b Mon Sep 17 00:00:00 2001 -From: Daniel Mihai -Date: Thu, 1 Sep 2022 15:54:16 +0000 -Subject: [PATCH 10/10] Merged PR 9806: Fix enable_debug for [hypervisor.clh] - -Fix error when using enable_debug = true in configuration.toml: - -level=error msg="Error create pseudo tty" -error="open /dev/ptmx: operation not permitted" ---- - src/runtime/pkg/resourcecontrol/cgroups.go | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/src/runtime/pkg/resourcecontrol/cgroups.go b/src/runtime/pkg/resourcecontrol/cgroups.go -index d4608458..f674e97a 100644 ---- a/src/runtime/pkg/resourcecontrol/cgroups.go -+++ b/src/runtime/pkg/resourcecontrol/cgroups.go -@@ -57,6 +57,7 @@ func sandboxDevices() []specs.LinuxDeviceCgroup { - "/dev/zero", - "/dev/urandom", - "/dev/console", -+ "/dev/ptmx", - } - - // Processes running in a device-cgroup are constrained, they have acccess --- -2.17.1 - diff --git a/SPECS/kata-containers/0005-Merged-PR-9956-shim-avoid-memory-hotplug-timeout.patch b/SPECS/kata-containers/0005-Merged-PR-9956-shim-avoid-memory-hotplug-timeout.patch deleted file mode 100644 index de9230c9e7d..00000000000 --- a/SPECS/kata-containers/0005-Merged-PR-9956-shim-avoid-memory-hotplug-timeout.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 7fab743a43e4f2063d560161753f2b6390c7add6 Mon Sep 17 00:00:00 2001 -From: Dan Mihai -Date: Thu, 15 Sep 2022 20:50:12 +0000 -Subject: [PATCH] Merged PR 9956: shim: avoid memory hotplug timeout - -Wait up to 10 seconds for cloud-hypervisor memory hotplug. ---- - src/runtime/virtcontainers/clh.go | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go -index 118e1b4d..f18b6c6f 100644 ---- a/src/runtime/virtcontainers/clh.go -+++ b/src/runtime/virtcontainers/clh.go -@@ -918,7 +918,9 @@ func (clh *cloudHypervisor) ResizeMemory(ctx context.Context, reqMemMB uint32, m - } - - cl := clh.client() -- ctx, cancelResize := context.WithTimeout(ctx, clh.getClhAPITimeout()*time.Second) -+ // FIXME: memory hotplug sometimes takes longer than 1 second. -+ // ctx, cancelResize := context.WithTimeout(ctx, clh.getClhAPITimeout()*time.Second) -+ ctx, cancelResize := context.WithTimeout(ctx, 10*time.Second) - defer cancelResize() - - resize := *chclient.NewVmResize() --- -2.17.1 - diff --git a/SPECS/kata-containers/drop-mut-for-variables-that-are-not-mutated.patch b/SPECS/kata-containers/drop-mut-for-variables-that-are-not-mutated.patch deleted file mode 100644 index 6eddcfdc68f..00000000000 --- a/SPECS/kata-containers/drop-mut-for-variables-that-are-not-mutated.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 19a8a137b1c5fd9248896bd5f63638acfc9aff8c Mon Sep 17 00:00:00 2001 -From: Muhammad Falak R Wani -Date: Thu, 14 Sep 2023 14:56:17 +0530 -Subject: [PATCH 1/2] kata-types: drop mut for variables that are not mutated - -Signed-off-by: Muhammad Falak R Wani ---- - src/libs/kata-types/src/annotations/mod.rs | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/src/libs/kata-types/src/annotations/mod.rs b/src/libs/kata-types/src/annotations/mod.rs -index c8d6312..d6c51c1 100644 ---- a/src/libs/kata-types/src/annotations/mod.rs -+++ b/src/libs/kata-types/src/annotations/mod.rs -@@ -462,8 +462,8 @@ impl Annotation { - let u32_err = io::Error::new(io::ErrorKind::InvalidData, "parse u32 error".to_string()); - let u64_err = io::Error::new(io::ErrorKind::InvalidData, "parse u64 error".to_string()); - let i32_err = io::Error::new(io::ErrorKind::InvalidData, "parse i32 error".to_string()); -- let mut hv = config.hypervisor.get_mut(hypervisor_name).unwrap(); -- let mut ag = config.agent.get_mut(agent_name).unwrap(); -+ let hv = config.hypervisor.get_mut(hypervisor_name).unwrap(); -+ let ag = config.agent.get_mut(agent_name).unwrap(); - for (key, value) in &self.annotations { - if hv.security_info.is_annotation_enabled(key) { - match key.as_str() { --- -2.40.1 - -From 7ec3b121c3891f4e4de643bcbef3287d7f564d7f Mon Sep 17 00:00:00 2001 -From: Muhammad Falak R Wani -Date: Thu, 14 Sep 2023 15:31:16 +0530 -Subject: [PATCH 2/2] agent: drop mut from variable which is not mutated - -Signed-off-by: Muhammad Falak R Wani ---- - src/agent/src/signal.rs | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/agent/src/signal.rs b/src/agent/src/signal.rs -index 79dea3b..8ec6556 100644 ---- a/src/agent/src/signal.rs -+++ b/src/agent/src/signal.rs -@@ -57,7 +57,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc>) -> Result - continue; - } - -- let mut p = process.unwrap(); -+ let p = process.unwrap(); - - let ret: i32 = match wait_status { - WaitStatus::Exited(_, c) => c, --- -2.40.1 - diff --git a/SPECS/kata-containers/kata-containers.signatures.json b/SPECS/kata-containers/kata-containers.signatures.json index 9c1da048474..edaeb204e20 100644 --- a/SPECS/kata-containers/kata-containers.signatures.json +++ b/SPECS/kata-containers/kata-containers.signatures.json @@ -1,8 +1,8 @@ { "Signatures": { "50-kata": "fb108c6337b3d3bf80b43ab04f2bf9a3bdecd29075ebd16320aefe8f81c502a7", - "kata-containers-3.1.0-vendor.tar.gz": "d14032fc30e0f8e1bd9afc57264ed703df6cdf48ad2b1845b02e046763ac3352", - "kata-containers-3.1.0.tar.gz": "9785078a2250a784c30692f156de4a1a2cfa754a38b48b755ece7517902ffed3", + "kata-containers-3.2.0.azl0-cargo.tar.gz": "7ff6c5f7f7aa31a99ea5d837876291d886b16c32f21b6d65d044fd398abff1e6", + "kata-containers-3.2.0.azl0.tar.gz": "6b3cb0067ccc36f4ff80cfc88cb006b30994564a43c23bc54770edeb382bcf72", "mariner-build-uvm.sh": "a0fbee4def82ee492eab64a8b5a948c2fef125fa1ca5686aafa0a80c64144068" } } diff --git a/SPECS/kata-containers/kata-containers.spec b/SPECS/kata-containers/kata-containers.spec index 8ac721cfc0f..92da03ab364 100644 --- a/SPECS/kata-containers/kata-containers.spec +++ b/SPECS/kata-containers/kata-containers.spec @@ -21,46 +21,32 @@ %global kataclhdir /usr/share/cloud-hypervisor %global katainitrddir /var/cache/kata-containers/osbuilder-images/kernel-uvm -%global runtime_make_vars QEMUPATH=%{qemupath} \\\ - KERNELTYPE="compressed" \\\ +# DEFAULT_HYPERVISOR: makes configuration.toml link to configuration-clh.toml. +%global runtime_make_vars KERNELTYPE="compressed" \\\ KERNELPARAMS="systemd.legacy_systemd_cgroup_controller=yes systemd.unified_cgroup_hierarchy=0" \\\ - DEFSHAREDFS="virtio-fs" \\\ - DEFVIRTIOFSDAEMON=%{_libexecdir}/"virtiofsd" \\\ - DEFVIRTIOFSCACHESIZE=0 \\\ - DEFSANDBOXCGROUPONLY=false \\\ + DEFVIRTIOFSDAEMON=%{_libexecdir}/"virtiofsd-rs" \\\ + DEFSTATICRESOURCEMGMT_CLH=true \\\ DEFSTATICSANDBOXWORKLOADMEM=1792 \\\ DEFMEMSZ=256 \\\ SKIP_GO_VERSION_CHECK=y \\\ - MACHINETYPE=%{machinetype} \\\ DESTDIR=%{buildroot} \\\ PREFIX=/usr \\\ - FEATURE_SELINUX="yes" \\\ - DEFENABLEANNOTATIONS=['\\\".*\\\"'] \\\ DEFAULT_HYPERVISOR=cloud-hypervisor %global agent_make_vars LIBC=gnu \\\ DESTDIR=%{buildroot}%{kataagentdir} -Summary: Kata Containers version 2.x repository +Summary: Kata Containers Name: kata-containers -Version: 3.1.0 -Release: 11%{?dist} +Version: 3.2.0.azl0 +Release: 1%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation -URL: https://github.com/%{name}/%{name} -Source0: https://github.com/%{name}/%{name}/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz -Source1: https://github.com/%{name}/%{name}/releases/download/%{version}/%{name}-%{version}-vendor.tar.gz +URL: https://github.com/microsoft/kata-containers +Source0: https://github.com/microsoft/kata-containers/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz +Source1: %{name}-%{version}-cargo.tar.gz Source2: 50-kata Source3: mariner-build-uvm.sh -Patch0: 0001-Merged-PR-9607-Allow-10-seconds-for-VM-creation-star.patch -Patch1: 0002-Merged-PR-9671-Wait-for-a-possibly-slow-Guest.patch -Patch2: 0003-Merged-PR-9805-Add-support-for-MSHV.patch -Patch3: 0004-Merged-PR-9806-Fix-enable_debug-for-hypervisor.clh.patch -Patch4: 0005-Merged-PR-9956-shim-avoid-memory-hotplug-timeout.patch -Patch5: runtime-reduce-uvm-high-mem-footprint.patch -Patch6: drop-mut-for-variables-that-are-not-mutated.patch -Patch7: 0001-osbuilder-Add-support-for-CBL-Mariner.patch -Patch8: 0001-Append-systemd-kernel-cmdline-params-for-initrd.patch BuildRequires: golang BuildRequires: git-core @@ -76,23 +62,21 @@ BuildRequires: kernel BuildRequires: busybox BuildRequires: cargo BuildRequires: rust +BuildRequires: device-mapper-devel +BuildRequires: clang Requires: busybox Requires: kernel Requires: libseccomp -Requires: qemu-virtiofsd - -Conflicts: kata-agent -Conflicts: kata-ksm-throttler -Conflicts: kata-proxy -Conflicts: kata-runtime -Conflicts: kata-shim +# Must match the version specified by the `assets.virtiofsd.version` field in +# %{SOURCE0}/versions.yaml. +Requires: virtiofsd = 1.8.0 %description -Kata Containers version 2.x repository. Kata Containers is an open source -project and community working to build a standard implementation of lightweight -Virtual Machines (VMs) that feel and perform like containers, but provide the -workload isolation and security advantages of VMs. https://katacontainers.io/.} +Kata Containers is an open source project and community working to build a +standard implementation of lightweight Virtual Machines (VMs) that feel and +perform like containers, but provide the workload isolation and security +advantages of VMs. https://katacontainers.io/.} %package tools Summary: Kata Tools package @@ -143,12 +127,13 @@ install -m 0755 -D -t %{buildroot}%{katauvmdir} %{SOURCE3} install -m 0644 -D -t %{buildroot}%{katauvmdir} VERSION install -m 0644 -D -t %{buildroot}%{katauvmdir} versions.yaml install -D -m 0644 ci/install_yq.sh %{buildroot}%{katauvmdir}/ci/install_yq.sh -sed -i 's#distro_config_dir="${script_dir}/${distro}#distro_config_dir="${script_dir}/cbl-mariner#g' tools/osbuilder/rootfs-builder/rootfs.sh +sed --follow-symlinks -i 's#distro_config_dir="${script_dir}/${distro}#distro_config_dir="${script_dir}/cbl-mariner#g' tools/osbuilder/rootfs-builder/rootfs.sh pushd src/runtime %make_install %{runtime_make_vars} -sed -i -e "s|image = .*$|initrd = \"%{katainitrddir}/kata-containers-initrd.img\"|" %{buildroot}%{kataconfigdir}/configuration.toml -sed -i -e "s|kernel = .*$|kernel = \"%{kataclhdir}/vmlinux.bin\"|" %{buildroot}%{kataconfigdir}/configuration.toml +# Ensure sed doesn't replace the configuration.toml symlink by a regular file. +sed --follow-symlinks -i -e "s|image = .*$|initrd = \"%{katainitrddir}/kata-containers-initrd.img\"|" %{buildroot}%{kataconfigdir}/configuration.toml +sed --follow-symlinks -i -e "s|kernel = .*$|kernel = \"%{kataclhdir}/vmlinux.bin\"|" %{buildroot}%{kataconfigdir}/configuration.toml popd pushd src/agent @@ -229,6 +214,9 @@ ln -sf %{_bindir}/kata-runtime %{buildroot}%{_prefix}/local/bin/kata-runtime %exclude %{kataosbuilderdir}/rootfs-builder/ubuntu %changelog +* Mon Feb 12 2024 Aurelien Bombo - 3.2.0.azl0-1 +- Use Microsoft sources based on upstream version 3.2.0. + * Fri Feb 02 2024 CBL-Mariner Servicing Account - 3.1.0-11 - Bump release to rebuild with go 1.21.6 diff --git a/SPECS/kata-containers/runtime-reduce-uvm-high-mem-footprint.patch b/SPECS/kata-containers/runtime-reduce-uvm-high-mem-footprint.patch deleted file mode 100644 index 7175f085b22..00000000000 --- a/SPECS/kata-containers/runtime-reduce-uvm-high-mem-footprint.patch +++ /dev/null @@ -1,283 +0,0 @@ -From ff6c016a20f95580e7d1f06e3787c0675675807f Mon Sep 17 00:00:00 2001 -From: Manuel Huber -Date: Wed, 22 Mar 2023 17:12:09 +0000 -Subject: [PATCH] Merged PR 12983: Commit d5ed88f3: Fix 43668151: Resolve high - UVM memory footprint - -Bug: https://microsoft.visualstudio.com/OS/_workitems/edit/43668151 - -Rationale: This is a temporary solution for optimizing memory usage for the -current mechanism of requesting resources through pod Limit annotations: -- if no Limits are specified and hence WorkloadMemMB is 0, set - a default value 'StaticWorkloadDefaultMem' to allocate a default amount - of memory for use for containers in the sandbox in addition to the base memory -- if Limits are specified, the base memory and the sum of Limits are - allocated. The end user needs to be aware of the minimum memory - requirements for their pods, otherwise the pod will be stuck in the - ContainerCreating state - -Testing: Manual testing, creating pods with Limits and without limits, and with two containers where each container has a limit, tested with integration in a SPEC file where the config variables were set via environment variables via the make command ---- - src/runtime/Makefile | 8 ++++- - src/runtime/config/configuration-clh.toml.in | 17 +++++---- - src/runtime/config/configuration-fc.toml.in | 5 +++ - src/runtime/config/configuration-qemu.toml.in | 7 +++- - src/runtime/pkg/katautils/config.go | 36 ++++++++++--------- - src/runtime/pkg/oci/utils.go | 11 ++++++ - src/runtime/virtcontainers/hypervisor.go | 2 +- - src/runtime/virtcontainers/sandbox.go | 3 ++ - 8 files changed, 63 insertions(+), 26 deletions(-) - -diff --git a/src/runtime/Makefile b/src/runtime/Makefile -index 99dde7e..1fbac61 100644 ---- a/src/runtime/Makefile -+++ b/src/runtime/Makefile -@@ -158,7 +158,7 @@ DEFVCPUS := 1 - # Default maximum number of vCPUs - DEFMAXVCPUS := 0 - # Default memory size in MiB --DEFMEMSZ := 2048 -+DEFMEMSZ ?= 2048 - # Default memory slots - # Cases to consider : - # - nvdimm rootfs image -@@ -225,6 +225,9 @@ DEFSANDBOXCGROUPONLY ?= false - - DEFSTATICRESOURCEMGMT ?= false - -+# Default memory for use for workloads within the sandbox if no specific workload memory value is requested -+DEFSTATICSANDBOXWORKLOADMEM ?= 2048 -+ - DEFBINDMOUNTS := [] - - SED = sed -@@ -292,6 +295,7 @@ ifneq (,$(CLHCMD)) - # CLH-specific options (all should be suffixed by "_CLH") - # currently, huge pages are required for virtiofsd support - DEFNETWORKMODEL_CLH := tcfilter -+ DEFSTATICRESOURCEMGMT_CLH = true - KERNELTYPE_CLH = uncompressed - KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH)) - KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH) -@@ -501,7 +505,9 @@ USER_VARS += DEFENTROPYSOURCE - USER_VARS += DEFVALIDENTROPYSOURCES - USER_VARS += DEFSANDBOXCGROUPONLY - USER_VARS += DEFSTATICRESOURCEMGMT -+USER_VARS += DEFSTATICRESOURCEMGMT_CLH - USER_VARS += DEFSTATICRESOURCEMGMT_FC -+USER_VARS += DEFSTATICSANDBOXWORKLOADMEM - USER_VARS += DEFBINDMOUNTS - USER_VARS += DEFVFIOMODE - USER_VARS += BUILDFLAGS -diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in -index df7cc7a..d9e4864 100644 ---- a/src/runtime/config/configuration-clh.toml.in -+++ b/src/runtime/config/configuration-clh.toml.in -@@ -31,7 +31,7 @@ rootfs_type=@DEFROOTFSTYPE@ - # - # Known limitations: - # * Does not work by design: --# - CPU Hotplug -+# - CPU Hotplug - # - Memory Hotplug - # - NVDIMM devices - # -@@ -206,9 +206,9 @@ block_device_driver = "virtio-blk" - # and we strongly advise users to refer the Cloud Hypervisor official - # documentation for a better understanding of its internals: - # https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md --# -+# - # Bandwidth rate limiter options --# -+# - # net_rate_limiter_bw_max_rate controls network I/O bandwidth (size in bits/sec - # for SB/VM). - # The same value is used for inbound and outbound bandwidth. -@@ -242,9 +242,9 @@ block_device_driver = "virtio-blk" - # and we strongly advise users to refer the Cloud Hypervisor official - # documentation for a better understanding of its internals: - # https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md --# -+# - # Bandwidth rate limiter options --# -+# - # disk_rate_limiter_bw_max_rate controls disk I/O bandwidth (size in bits/sec - # for SB/VM). - # The same value is used for inbound and outbound bandwidth. -@@ -380,7 +380,12 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ - # - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O - # does not yet support sandbox sizing annotations. - # - When running single containers using a tool like ctr, container sizing information will be available. --static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ -+static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_CLH@ -+ -+# If set, the runtime will use the value as the default workload memory in MB for the sandbox when no workload memory request is passed -+# down to the shim via the OCI when static sandbox resource management is enabled. With this, we ensure that workloads have a proper -+# default amount of memory available within the sandbox. -+static_sandbox_default_workload_mem=@DEFSTATICSANDBOXWORKLOADMEM@ - - # If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. - # This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. -diff --git a/src/runtime/config/configuration-fc.toml.in b/src/runtime/config/configuration-fc.toml.in -index 10dc177..6dfe5ce 100644 ---- a/src/runtime/config/configuration-fc.toml.in -+++ b/src/runtime/config/configuration-fc.toml.in -@@ -358,6 +358,11 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ - # - When running single containers using a tool like ctr, container sizing information will be available. - static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_FC@ - -+# If set, the runtime will use the value as the default workload memory in MB for the sandbox when no workload memory request is passed -+# down to the shim via the OCI when static sandbox resource management is enabled. With this, we ensure that workloads have a proper -+# default amount of memory available within the sandbox. -+static_sandbox_default_workload_mem=@DEFSTATICSANDBOXWORKLOADMEM@ -+ - # If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will - # be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. - disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@ -diff --git a/src/runtime/config/configuration-qemu.toml.in b/src/runtime/config/configuration-qemu.toml.in -index 4fb5a8b..cb29ca5 100644 ---- a/src/runtime/config/configuration-qemu.toml.in -+++ b/src/runtime/config/configuration-qemu.toml.in -@@ -33,7 +33,7 @@ rootfs_type=@DEFROOTFSTYPE@ - # - # Known limitations: - # * Does not work by design: --# - CPU Hotplug -+# - CPU Hotplug - # - Memory Hotplug - # - NVDIMM devices - # -@@ -622,6 +622,11 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ - # - When running single containers using a tool like ctr, container sizing information will be available. - static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ - -+# If set, the runtime will use the value as the default workload memory in MB for the sandbox when no workload memory request is passed -+# down to the shim via the OCI when static sandbox resource management is enabled. With this, we ensure that workloads have a proper -+# default amount of memory available within the sandbox. -+static_sandbox_default_workload_mem=@DEFSTATICSANDBOXWORKLOADMEM@ -+ - # If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. - # This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. - # If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` -diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go -index 997d073..866db0b 100644 ---- a/src/runtime/pkg/katautils/config.go -+++ b/src/runtime/pkg/katautils/config.go -@@ -161,23 +161,24 @@ type hypervisor struct { - } - - type runtime struct { -- InterNetworkModel string `toml:"internetworking_model"` -- JaegerEndpoint string `toml:"jaeger_endpoint"` -- JaegerUser string `toml:"jaeger_user"` -- JaegerPassword string `toml:"jaeger_password"` -- VfioMode string `toml:"vfio_mode"` -- GuestSeLinuxLabel string `toml:"guest_selinux_label"` -- SandboxBindMounts []string `toml:"sandbox_bind_mounts"` -- Experimental []string `toml:"experimental"` -- Tracing bool `toml:"enable_tracing"` -- DisableNewNetNs bool `toml:"disable_new_netns"` -- DisableGuestSeccomp bool `toml:"disable_guest_seccomp"` -- EnableVCPUsPinning bool `toml:"enable_vcpus_pinning"` -- Debug bool `toml:"enable_debug"` -- SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"` -- StaticSandboxResourceMgmt bool `toml:"static_sandbox_resource_mgmt"` -- EnablePprof bool `toml:"enable_pprof"` -- DisableGuestEmptyDir bool `toml:"disable_guest_empty_dir"` -+ InterNetworkModel string `toml:"internetworking_model"` -+ JaegerEndpoint string `toml:"jaeger_endpoint"` -+ JaegerUser string `toml:"jaeger_user"` -+ JaegerPassword string `toml:"jaeger_password"` -+ VfioMode string `toml:"vfio_mode"` -+ GuestSeLinuxLabel string `toml:"guest_selinux_label"` -+ SandboxBindMounts []string `toml:"sandbox_bind_mounts"` -+ Experimental []string `toml:"experimental"` -+ Tracing bool `toml:"enable_tracing"` -+ DisableNewNetNs bool `toml:"disable_new_netns"` -+ DisableGuestSeccomp bool `toml:"disable_guest_seccomp"` -+ EnableVCPUsPinning bool `toml:"enable_vcpus_pinning"` -+ Debug bool `toml:"enable_debug"` -+ SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"` -+ StaticSandboxResourceMgmt bool `toml:"static_sandbox_resource_mgmt"` -+ EnablePprof bool `toml:"enable_pprof"` -+ DisableGuestEmptyDir bool `toml:"disable_guest_empty_dir"` -+ StaticSandboxWorkloadDefaultMem uint32 `toml:"static_sandbox_default_workload_mem"` - } - - type agent struct { -@@ -1372,6 +1373,7 @@ func LoadConfiguration(configPath string, ignoreLogging bool) (resolvedConfigPat - config.EnableVCPUsPinning = tomlConf.Runtime.EnableVCPUsPinning - config.GuestSeLinuxLabel = tomlConf.Runtime.GuestSeLinuxLabel - config.StaticSandboxResourceMgmt = tomlConf.Runtime.StaticSandboxResourceMgmt -+ config.StaticSandboxWorkloadDefaultMem = tomlConf.Runtime.StaticSandboxWorkloadDefaultMem - config.SandboxCgroupOnly = tomlConf.Runtime.SandboxCgroupOnly - config.DisableNewNetNs = tomlConf.Runtime.DisableNewNetNs - config.EnablePprof = tomlConf.Runtime.EnablePprof -diff --git a/src/runtime/pkg/oci/utils.go b/src/runtime/pkg/oci/utils.go -index d2d713f..436a0d2 100644 ---- a/src/runtime/pkg/oci/utils.go -+++ b/src/runtime/pkg/oci/utils.go -@@ -143,6 +143,9 @@ type RuntimeConfig struct { - // any later resource updates. - StaticSandboxResourceMgmt bool - -+ // Memory to allocate for workloads within the sandbox when workload memory is unspecified -+ StaticSandboxWorkloadDefaultMem uint32 -+ - // Determines if create a netns for hypervisor process - DisableNewNetNs bool - -@@ -952,6 +955,8 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid st - - StaticResourceMgmt: runtime.StaticSandboxResourceMgmt, - -+ StaticWorkloadDefaultMem: runtime.StaticSandboxWorkloadDefaultMem, -+ - ShmSize: shmSize, - - VfioMode: runtime.VfioMode, -@@ -976,6 +981,12 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid st - // with the base number of CPU/memory (which is equal to the default CPU/memory specified for the runtime - // configuration or annotations) as well as any specified workload resources. - if sandboxConfig.StaticResourceMgmt { -+ // If no Limits are set in pod config, use StaticWorkloadDefaultMem to ensure the containers generally -+ // have a reasonable amount of memory available -+ if sandboxConfig.SandboxResources.WorkloadMemMB == 0 { -+ sandboxConfig.SandboxResources.WorkloadMemMB = sandboxConfig.StaticWorkloadDefaultMem -+ } -+ - sandboxConfig.SandboxResources.BaseCPUs = sandboxConfig.HypervisorConfig.NumVCPUs - sandboxConfig.SandboxResources.BaseMemMB = sandboxConfig.HypervisorConfig.MemorySize - -diff --git a/src/runtime/virtcontainers/hypervisor.go b/src/runtime/virtcontainers/hypervisor.go -index dee5fec..0d86807 100644 ---- a/src/runtime/virtcontainers/hypervisor.go -+++ b/src/runtime/virtcontainers/hypervisor.go -@@ -74,7 +74,7 @@ const ( - vSockLogsPort = 1025 - - // MinHypervisorMemory is the minimum memory required for a VM. -- MinHypervisorMemory = 256 -+ MinHypervisorMemory = 64 - - defaultMsize9p = 8192 - -diff --git a/src/runtime/virtcontainers/sandbox.go b/src/runtime/virtcontainers/sandbox.go -index 523c072..bb36af0 100644 ---- a/src/runtime/virtcontainers/sandbox.go -+++ b/src/runtime/virtcontainers/sandbox.go -@@ -160,6 +160,9 @@ type SandboxConfig struct { - - HypervisorConfig HypervisorConfig - -+ StaticWorkloadDefaultMem uint32 -+ -+ // Memory to allocate for workloads within the sandbox when workload memory is unspecified - ShmSize uint64 - - SandboxResources SandboxResourceSizing --- -2.25.1 - diff --git a/SPECS/kernel/CVE-2018-20169.nopatch b/SPECS/kernel/CVE-2018-20169.nopatch new file mode 100644 index 00000000000..8b65301f1fd --- /dev/null +++ b/SPECS/kernel/CVE-2018-20169.nopatch @@ -0,0 +1,3 @@ +CVE-2018-20169 - patched in 5.15.148.2 - (generated by autopatch tool) +upstream 704620afc70cf47abb9d6a1a57f3825d2bca49cf - stable 704620afc70cf47abb9d6a1a57f3825d2bca49cf + diff --git a/SPECS/kernel/CVE-2024-1312.nopatch b/SPECS/kernel/CVE-2024-1312.nopatch new file mode 100644 index 00000000000..9d379097188 --- /dev/null +++ b/SPECS/kernel/CVE-2024-1312.nopatch @@ -0,0 +1,4 @@ +CVE-2024-1312 - Introducing commit(s) not present in LTS - (generated by autopatch tool) +upstream fix commit: 657b5146955eba331e01b9a6ae89ce2e716ba306 +upstream introducing commit: 5e31275cc997f8ec5d9e8d65fe9840ebed89db19 + diff --git a/SPECS/kubernetes/CVE-2023-48795.patch b/SPECS/kubernetes/CVE-2023-48795.patch new file mode 100644 index 00000000000..b0b6000f36e --- /dev/null +++ b/SPECS/kubernetes/CVE-2023-48795.patch @@ -0,0 +1,270 @@ +From 8e6ebb46718646cadb06d60713aad3b5bdb936a5 Mon Sep 17 00:00:00 2001 +From: Nan Liu +Date: Thu, 15 Feb 2024 18:49:45 +0000 +Subject: [PATCH] address CVE-2023-48795 + +--- +ssh: implement strict KEX protocol changes + +Implement the "strict KEX" protocol changes, as described in section +1.9 of the OpenSSH PROTOCOL file (as of OpenSSH version 9.6/9.6p1). + +Namely this makes the following changes: + * Both the server and the client add an additional algorithm to the + initial KEXINIT message, indicating support for the strict KEX mode. + * When one side of the connection sees the strict KEX extension + algorithm, the strict KEX mode is enabled for messages originating + from the other side of the connection. If the sequence number for + the side which requested the extension is not 1 (indicating that it + has already received non-KEXINIT packets), the connection is + terminated. + * When strict kex mode is enabled, unexpected messages during the + handshake are considered fatal. Additionally when a key change + occurs (on the receipt of the NEWKEYS message) the message sequence + numbers are reset. + +Thanks to Fabian Bäumer, Marcus Brinkmann, and Jörg Schwenk from Ruhr +University Bochum for reporting this issue. + +Fixes CVE-2023-48795 +Fixes golang/go#64784 + +Change-Id: I96b53afd2bd2fb94d2b6f2a46a5dacf325357604 +Reviewed-on: https://go-review.googlesource.com/c/crypto/+/550715 +Reviewed-by: Nicola Murino +Reviewed-by: Tatiana Bradley +TryBot-Result: Gopher Robot +Run-TryBot: Roland Shoemaker +Reviewed-by: Damien Neil +LUCI-TryBot-Result: Go LUCI + +--- + vendor/golang.org/x/crypto/ssh/handshake.go | 56 +++++++++++++++++++-- + vendor/golang.org/x/crypto/ssh/transport.go | 32 ++++++++++-- + 2 files changed, 79 insertions(+), 9 deletions(-) + +diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go +index 653dc4d..c7ea70f 100644 +--- a/vendor/golang.org/x/crypto/ssh/handshake.go ++++ b/vendor/golang.org/x/crypto/ssh/handshake.go +@@ -34,6 +34,16 @@ type keyingTransport interface { + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error ++ ++ // setStrictMode sets the strict KEX mode, notably triggering ++ // sequence number resets on sending or receiving msgNewKeys. ++ // If the sequence number is already > 1 when setStrictMode ++ // is called, an error is returned. ++ setStrictMode() error ++ ++ // setInitialKEXDone indicates to the transport that the initial key exchange ++ // was completed ++ setInitialKEXDone() + } + + // handshakeTransport implements rekeying on top of a keyingTransport +@@ -94,6 +104,10 @@ type handshakeTransport struct { + + // The session ID or nil if first kex did not complete yet. + sessionID []byte ++ ++ // strictMode indicates if the other side of the handshake indicated ++ // that we should be following the strict KEX protocol restrictions. ++ strictMode bool + } + + type pendingKex struct { +@@ -201,7 +215,10 @@ func (t *handshakeTransport) readLoop() { + close(t.incoming) + break + } +- if p[0] == msgIgnore || p[0] == msgDebug { ++ // If this is the first kex, and strict KEX mode is enabled, ++ // we don't ignore any messages, as they may be used to manipulate ++ // the packet sequence numbers. ++ if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { + continue + } + t.incoming <- p +@@ -432,6 +449,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + return successPacket, nil + } + ++const ( ++ kexStrictClient = "kex-strict-c-v00@openssh.com" ++ kexStrictServer = "kex-strict-s-v00@openssh.com" ++) ++ + // sendKexInit sends a key change message. + func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() +@@ -445,7 +467,6 @@ func (t *handshakeTransport) sendKexInit() error { + } + + msg := &kexInitMsg{ +- KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, +@@ -455,6 +476,13 @@ func (t *handshakeTransport) sendKexInit() error { + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + ++ // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, ++ // and possibly to add the ext-info extension algorithm. Since the slice may be the ++ // user owned KeyExchanges, we create our own slice in order to avoid using user ++ // owned memory by mistake. ++ msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info ++ msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) ++ + isServer := len(t.hostKeys) > 0 + if isServer { + for _, k := range t.hostKeys { +@@ -474,17 +502,24 @@ func (t *handshakeTransport) sendKexInit() error { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } + } ++ ++ if t.sessionID == nil { ++ msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) ++ } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. ++ // ++ // We also send the strict KEX mode extension algorithm, in order to opt ++ // into the strict KEX mode. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { +- msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) +- msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") ++ msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) + } ++ + } + + packet := Marshal(msg) +@@ -581,6 +616,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + return err + } + ++ if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { ++ t.strictMode = true ++ if err := t.conn.setStrictMode(); err != nil { ++ return err ++ } ++ } ++ + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for +@@ -632,6 +674,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + ++ if firstKeyExchange { ++ // Indicates to the transport that the first key exchange is completed ++ // after receiving SSH_MSG_NEWKEYS. ++ t.conn.setInitialKEXDone() ++ } ++ + return nil + } + +diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go +index acf5a21..4df45fc 100644 +--- a/vendor/golang.org/x/crypto/ssh/transport.go ++++ b/vendor/golang.org/x/crypto/ssh/transport.go +@@ -48,6 +48,9 @@ type transport struct { + rand io.Reader + isClient bool + io.Closer ++ ++ strictMode bool ++ initialKEXDone bool + } + + // packetCipher represents a combination of SSH encryption/MAC +@@ -73,6 +76,18 @@ type connectionState struct { + pendingKeyChange chan packetCipher + } + ++func (t *transport) setStrictMode() error { ++ if t.reader.seqNum != 1 { ++ return errors.New("ssh: sequence number != 1 when strict KEX mode requested") ++ } ++ t.strictMode = true ++ return nil ++} ++ ++func (t *transport) setInitialKEXDone() { ++ t.initialKEXDone = true ++} ++ + // prepareKeyChange sets up key material for a keychange. The key changes in + // both directions are triggered by reading and writing a msgNewKey packet + // respectively. +@@ -111,11 +126,12 @@ func (t *transport) printPacket(p []byte, write bool) { + // Read and decrypt next packet. + func (t *transport) readPacket() (p []byte, err error) { + for { +- p, err = t.reader.readPacket(t.bufReader) ++ p, err = t.reader.readPacket(t.bufReader, t.strictMode) + if err != nil { + break + } +- if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { ++ // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX ++ if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } +@@ -126,7 +142,7 @@ func (t *transport) readPacket() (p []byte, err error) { + return p, err + } + +-func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { ++func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { +@@ -139,6 +155,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher ++ if strictMode { ++ s.seqNum = 0 ++ } + default: + return nil, errors.New("ssh: got bogus newkeys message") + } +@@ -169,10 +188,10 @@ func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } +- return t.writer.writePacket(t.bufWriter, t.rand, packet) ++ return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) + } + +-func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { ++func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) +@@ -187,6 +206,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet [] + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher ++ if strictMode { ++ s.seqNum = 0 ++ } + default: + panic("ssh: no key material for msgNewKeys") + } +-- +2.25.1 + diff --git a/SPECS/kubernetes/kubernetes.spec b/SPECS/kubernetes/kubernetes.spec index 636da216b93..2ce1567efc2 100644 --- a/SPECS/kubernetes/kubernetes.spec +++ b/SPECS/kubernetes/kubernetes.spec @@ -10,7 +10,7 @@ Summary: Microsoft Kubernetes Name: kubernetes Version: 1.28.4 -Release: 3%{?dist} +Release: 4%{?dist} License: ASL 2.0 Vendor: Microsoft Corporation Distribution: Mariner @@ -19,6 +19,7 @@ URL: https://kubernetes.io/ Source0: https://dl.k8s.io/v%{version}/kubernetes-src.tar.gz#/%{name}-v%{version}.tar.gz Source1: kubelet.service Patch0: CVE-2024-21626.patch +Patch1: CVE-2023-48795.patch BuildRequires: flex-devel BuildRequires: glibc-static >= 2.35-6%{?dist} BuildRequires: golang @@ -93,6 +94,7 @@ Pause component for Microsoft Kubernetes %{version}. %prep %setup -q -c -n %{name} %patch 0 -p1 +%patch 1 -p1 %build # set version information using KUBE_GIT_VERSION @@ -265,6 +267,9 @@ fi %{_exec_prefix}/local/bin/pause %changelog +* Thu Feb 15 2024 Nan Liu - 1.28.4-4 +- Address CVE-2023-48795 by patching golang.org/x/crypto + * Thu Feb 15 2024 CBL-Mariner Servicing Account - 1.28.4-3 - Bump release to rebuild with go 1.21.6 diff --git a/SPECS/libgit2/libgit2.signatures.json b/SPECS/libgit2/libgit2.signatures.json index ce4927c7562..468f4647fa9 100644 --- a/SPECS/libgit2/libgit2.signatures.json +++ b/SPECS/libgit2/libgit2.signatures.json @@ -1,5 +1,5 @@ { "Signatures": { - "libgit2-1.4.5.tar.gz": "8487bdda44bb43141d6798f71cab0d071a33fe75aa02a5a31c66ae8f4c9c5adb" + "libgit2-1.6.5.tar.gz": "0f09dd49e409913c94df00eeb5b54f8b597905071b454c7f614f8c6e1ddb8d75" } } \ No newline at end of file diff --git a/SPECS/libgit2/libgit2.spec b/SPECS/libgit2/libgit2.spec index dfa75f2c56a..8032d607ef5 100644 --- a/SPECS/libgit2/libgit2.spec +++ b/SPECS/libgit2/libgit2.spec @@ -1,7 +1,7 @@ Summary: C implementation of the Git core methods as a library with a solid API Name: libgit2 -Version: 1.4.5 -Release: 3%{?dist} +Version: 1.6.5 +Release: 1%{?dist} License: GPLv2 with exceptions Vendor: Microsoft Corporation Distribution: Mariner @@ -66,7 +66,8 @@ rm -vr deps %files %license COPYING -%{_libdir}/libgit2.so.* +%{_libdir}/libgit2.so.1.6* +%{_bindir}/git2 %files devel %doc AUTHORS docs examples README.md @@ -76,6 +77,9 @@ rm -vr deps %{_includedir}/git2/ %changelog +* Wed Feb 21 2024 Sam Meluch - 1.6.5-1 +- Upgrade to version 1.6.5 to fix CVE-2024-24575 + * Wed Jan 17 2024 Harshit Gupta - 1.4.5-3 - Release bump with no changes to force a rebuild and consume new libssh2 build diff --git a/SPECS/libuv/CVE-2024-24806.patch b/SPECS/libuv/CVE-2024-24806.patch new file mode 100644 index 00000000000..ac0db33f8d8 --- /dev/null +++ b/SPECS/libuv/CVE-2024-24806.patch @@ -0,0 +1,53 @@ +From 2c127bf21e7c76e783944b3aae974167099cbad3 Mon Sep 17 00:00:00 2001 +From: Suresh Thelkar +Date: Mon, 19 Feb 2024 10:08:20 +0530 +Subject: [PATCH] Patch for CVE-2024-24806 + +Upstream patch details are given below +https://github.com/libuv/libuv/commit/0f2d7e784a256b54b2385043438848047bc2a629 +--- + src/idna.c | 5 +++-- + test/test-idna.c | 4 ++++ + 2 files changed, 7 insertions(+), 2 deletions(-) + +diff --git a/src/idna.c b/src/idna.c +index b44cb16..9526f85 100644 +--- a/src/idna.c ++++ b/src/idna.c +@@ -307,8 +307,9 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) { + return rc; + } + +- if (d < de) +- *d++ = '\0'; ++ if (d >= de) ++ return UV_EINVAL; + ++ *d++ = '\0'; + return d - ds; /* Number of bytes written. */ + } +diff --git a/test/test-idna.c b/test/test-idna.c +index f4fad96..d079be5 100644 +--- a/test/test-idna.c ++++ b/test/test-idna.c +@@ -99,6 +99,7 @@ TEST_IMPL(utf8_decode1) { + TEST_IMPL(utf8_decode1_overrun) { + const char* p; + char b[1]; ++ char c[1]; + + /* Single byte. */ + p = b; +@@ -112,6 +113,9 @@ TEST_IMPL(utf8_decode1_overrun) { + ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + 1)); + ASSERT_EQ(p, b + 1); + ++ b[0] = 0x7F; ++ ASSERT_EQ(UV_EINVAL, uv__idna_toascii(b, b + 1, c, c + 1)); ++ + return 0; + } + +-- +2.34.1 + diff --git a/SPECS/libuv/libuv.spec b/SPECS/libuv/libuv.spec index 9daa233f738..938fa027d21 100644 --- a/SPECS/libuv/libuv.spec +++ b/SPECS/libuv/libuv.spec @@ -1,13 +1,14 @@ Summary: Cross-platform asynchronous I/O Name: libuv Version: 1.43.0 -Release: 1%{?dist} +Release: 2%{?dist} License: MIT AND CC-BY Vendor: Microsoft Corporation Distribution: Mariner Group: Applications/System URL: https://libuv.org/ Source0: https://dist.libuv.org/dist/v%{version}/%{name}-v%{version}.tar.gz +Patch0: CVE-2024-24806.patch BuildRequires: build-essential BuildRequires: coreutils %if %{with_check} @@ -35,7 +36,7 @@ Group: Development/Libraries %{summary}. %prep -%setup -q -n %{name}-v%{version} +%autosetup -p1 -n %{name}-v%{version} %build ./autogen.sh @@ -75,6 +76,9 @@ sudo -u test make -k check %{_libdir}/%{name}.a %changelog +* Mon Feb 19 2024 Suresh Thelkar - 1.43.0-2 +- Patch for CVE-2024-24806 + * Tue Jan 25 2022 Henry Li - 1.43.0-1 - Upgrade to version 1.43.0 - License Verified diff --git a/SPECS/mariner-release/mariner-release.spec b/SPECS/mariner-release/mariner-release.spec index 92ead0b6c39..189bba601e5 100644 --- a/SPECS/mariner-release/mariner-release.spec +++ b/SPECS/mariner-release/mariner-release.spec @@ -1,7 +1,7 @@ Summary: CBL-Mariner release files Name: mariner-release Version: 2.0 -Release: 59%{?dist} +Release: 60%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Mariner @@ -62,6 +62,9 @@ EOF %config(noreplace) %{_sysconfdir}/issue.net %changelog +* Thu Feb 29 2024 CBL-Mariner Servicing Account - 2.0-60 +- Bump release for March 2024 Release + * Thu Feb 01 2024 CBL-Mariner Servicing Account - 2.0-59 - Bump release for February 2024 Release diff --git a/SPECS/moby-compose/Change-server-stream-context-handling.patch b/SPECS/moby-compose/Change-server-stream-context-handling.patch new file mode 100644 index 00000000000..2f2f0397203 --- /dev/null +++ b/SPECS/moby-compose/Change-server-stream-context-handling.patch @@ -0,0 +1,354 @@ +From 4ef370839829840761e091b510c9e7e7b5fea022 Mon Sep 17 00:00:00 2001 +From: Sam Meluch +Date: Thu, 22 Feb 2024 14:49:41 -0800 +Subject: [PATCH 2/3] Change server stream context handling + +--- + .../grpc/internal/transport/handler_server.go | 2 +- + .../grpc/internal/transport/http2_server.go | 7 +- + .../grpc/internal/transport/transport.go | 2 +- + vendor/google.golang.org/grpc/server.go | 116 ++++++++---------- + 4 files changed, 56 insertions(+), 71 deletions(-) + +diff --git a/google.golang.org/grpc/internal/transport/handler_server.go b/google.golang.org/grpc/internal/transport/handler_server.go +index 0901209..3af9b4a 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +@@ -326,7 +326,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return err + } + +-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() +diff --git a/google.golang.org/grpc/internal/transport/http2_server.go b/google.golang.org/grpc/internal/transport/http2_server.go +index 3dd1564..94d441c 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -345,7 +345,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + } + + // operateHeader takes action on the decoded headers. +-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { ++func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() +@@ -565,7 +565,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } +- s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ +@@ -603,7 +602,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + // HandleStreams receives incoming streams using the given handler. This is + // typically run in a separate goroutine. + // traceCtx attaches trace to ctx and returns the new context. +-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (t *http2Server) HandleStreams(handle func(*Stream)) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() +@@ -641,7 +640,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: +- if t.operateHeaders(frame, handle, traceCtx) { ++ if t.operateHeaders(frame, handle){ + t.Close() + break + } +diff --git a/google.golang.org/grpc/internal/transport/transport.go b/google.golang.org/grpc/internal/transport/transport.go +index 6c3ba85..992fc25 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/transport.go ++++ b/vendor/google.golang.org/grpc/internal/transport/transport.go +@@ -674,7 +674,7 @@ type ClientTransport interface { + // Write methods for a given Stream will be called serially. + type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. +- HandleStreams(func(*Stream), func(context.Context, string) context.Context) ++ HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. +diff --git a/google.golang.org/grpc/server.go b/google.golang.org/grpc/server.go +index b8f9b5e..5bccd48 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -577,7 +577,7 @@ func (s *Server) serverWorker() { + + func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() +- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) ++ s.handleStream(data.st, data.stream) + } + + // initServerWorkers creates worker goroutines and a channel to process incoming +@@ -955,14 +955,8 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + } + go func() { + defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) ++ s.handleStream(st, stream) + }() +- }, func(ctx context.Context, method string) context.Context { +- if !EnableTracing { +- return ctx +- } +- tr := trace.New("grpc.Recv."+methodFamily(method), method) +- return trace.NewContext(ctx, tr) + }) + wg.Wait() + } +@@ -1010,30 +1004,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.serveStreams(st) + } + +-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +-// If tracing is not enabled, it returns nil. +-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { +- if !EnableTracing { +- return nil +- } +- tr, ok := trace.FromContext(stream.Context()) +- if !ok { +- return nil +- } +- +- trInfo = &traceInfo{ +- tr: tr, +- firstLine: firstLine{ +- client: false, +- remoteAddr: st.RemoteAddr(), +- }, +- } +- if dl, ok := stream.Context().Deadline(); ok { +- trInfo.firstLine.deadline = time.Until(dl) +- } +- return trInfo +-} +- + func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() +@@ -1094,7 +1064,7 @@ func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) + } + +-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { ++func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) +@@ -1113,7 +1083,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { +- sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) ++ sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + } + } + return err +@@ -1160,7 +1130,7 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn + } + } + +-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { +@@ -1174,7 +1144,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + IsClientStream: false, + IsServerStream: false, + } +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) +@@ -1206,7 +1176,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { +@@ -1228,7 +1198,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + } + if len(binlogs) != 0 { +- ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, +@@ -1307,7 +1276,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), &stats.InPayload{ ++ sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, +@@ -1328,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + return nil + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) +@@ -1371,7 +1340,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + opts := &transport.Options{Last: true} + +- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { ++ if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err +@@ -1480,7 +1449,7 @@ func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServe + } + } + +-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } +@@ -1494,10 +1463,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, +@@ -1533,7 +1502,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + end.Error = toRPCErr(err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + } + +@@ -1672,27 +1641,44 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + return err + } + +-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { ++func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ++ ctx := stream.Context() ++ var ti *traceInfo ++ if EnableTracing { ++ tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) ++ ctx = trace.NewContext(ctx, tr) ++ ti = &traceInfo{ ++ tr: tr, ++ firstLine: firstLine{ ++ client: false, ++ remoteAddr: t.RemoteAddr(), ++ }, ++ } ++ if dl, ok := ctx.Deadline(); ok { ++ ti.firstLine.deadline = time.Until(dl) ++ } ++ } ++ + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) ++ ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + return + } +@@ -1702,17 +1688,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { +- s.processUnaryRPC(t, stream, srv, md, trInfo) ++ s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { +- s.processStreamingRPC(t, stream, srv, sd, trInfo) ++ s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { +- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) ++ s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string +@@ -1721,19 +1707,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } +- if trInfo != nil { +- trInfo.tr.LazyPrintf("%s", errDesc) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyPrintf("%s", errDesc) ++ ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + } + +-- +2.34.1 + diff --git a/SPECS/moby-compose/generate_source_tarball.sh b/SPECS/moby-compose/generate_source_tarball.sh old mode 100644 new mode 100755 diff --git a/SPECS/moby-compose/moby-compose.signatures.json b/SPECS/moby-compose/moby-compose.signatures.json index e87579e6c1c..42b793ada19 100644 --- a/SPECS/moby-compose/moby-compose.signatures.json +++ b/SPECS/moby-compose/moby-compose.signatures.json @@ -1,6 +1,6 @@ { "Signatures": { - "moby-compose-2.17.2-govendor-v1.tar.gz": "439fded1938c7dfc8d18a4750e8d240a559763b17ef967734aa7c44570092993", - "moby-compose-2.17.2.tar.gz": "d6e6de858ecdb0104991c86c66dde5dd4fb6a1160d707308d8ad3167450c8094" + "moby-compose-2.17.3-govendor-v1.tar.gz": "8abc1f732e9ac9a0843c1c7edf2a0dcd23f7805b859a0e3059bc2f5d4edbe3c8", + "moby-compose-2.17.3.tar.gz": "e5e9bdfc3a827240381b656da88f92b408ea2e203c3f8cfd9e0bbfe03f825f16" } } \ No newline at end of file diff --git a/SPECS/moby-compose/moby-compose.spec b/SPECS/moby-compose/moby-compose.spec index 042f757e56b..3eaa0fa5a5f 100644 --- a/SPECS/moby-compose/moby-compose.spec +++ b/SPECS/moby-compose/moby-compose.spec @@ -1,7 +1,7 @@ Summary: Define and run multi-container applications with Docker Name: moby-compose -Version: 2.17.2 -Release: 7%{?dist} +Version: 2.17.3 +Release: 1%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Mariner @@ -9,6 +9,14 @@ Group: Tools/Container URL: https://github.com/docker/compose Source0: https://github.com/docker/compose/archive/refs/tags/v%{version}.tar.gz#/%{name}-%{version}.tar.gz Patch0: CVE-2023-44487.patch +# Patch can be removed when grpc go module is updated to version v1.62.0, patches backported to v1.50.0 +# These are the patches backported in order to get access to the security fix +# https://github.com/grpc/grpc-go/commit/6eabd7e1834e47b20f55cbe9d473fc607c693358 +# https://github.com/grpc/grpc-go/commit/8eb4ac4c1514c190ee0b5d01a91c63218dac93c0 +# https://github.com/grpc/grpc-go/commit/f2180b4d5403d2210b30b93098eb7da31c05c721 +Patch1: patch-server.go-to-support-single-serverWorkerChannel.patch +Patch2: Change-server-stream-context-handling.patch +Patch3: prohibit-more-than-MaxConcurrentStreams-handlers.patch # Leverage the `generate_source_tarball.sh` to create the vendor sources # NOTE: govendor-v1 format is for inplace CVE updates so that we do not have to overwrite in the blob-store. @@ -47,6 +55,10 @@ install -D -m0755 bin/build/docker-compose %{buildroot}/%{_libexecdir}/docker/cl %{_libexecdir}/docker/cli-plugins/docker-compose %changelog +* Wed Feb 21 2024 Sam Meluch - 2.17.3-1 +- Upgrade to version 2.17.3 +- Add patch for vendored golang.org/grpc + * Fri Feb 02 2024 Daniel McIlvaney - 2.17.2-7 - Address CVE-2023-44487 by patching vendored golang.org/x/net diff --git a/SPECS/moby-compose/patch-server.go-to-support-single-serverWorkerChannel.patch b/SPECS/moby-compose/patch-server.go-to-support-single-serverWorkerChannel.patch new file mode 100644 index 00000000000..ae95f9d98ad --- /dev/null +++ b/SPECS/moby-compose/patch-server.go-to-support-single-serverWorkerChannel.patch @@ -0,0 +1,122 @@ +From 0af681d0bba0369a9864a4872e8784f69ccaa5b4 Mon Sep 17 00:00:00 2001 +From: Sam Meluch +Date: Thu, 22 Feb 2024 12:24:25 -0800 +Subject: [PATCH 1/3] patch server.go to support single serverWorkerChannel + +--- + vendor/google.golang.org/grpc/server.go | 52 ++++++++++++++------------------ + 1 file changed, 22 insertions(+), 30 deletions(-) + +diff --git a/google.golang.org/grpc/server.go b/google.golang.org/grpc/server.go +index f4dde72..b8f9b5e 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -43,7 +43,6 @@ import ( + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" +- "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" +@@ -145,7 +144,7 @@ type Server struct { + channelzID *channelz.Identifier + czData *channelzData + +- serverWorkerChannels []chan *serverWorkerData ++ serverWorkerChannel chan *serverWorkerData + } + + type serverOptions struct { +@@ -560,40 +559,38 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { + const serverWorkerResetThreshold = 1 << 16 + + // serverWorkers blocks on a *transport.Stream channel forever and waits for +-// data to be fed by serveStreams. This allows different requests to be ++// data to be fed by serveStreams. This allows multiple requests to be + // processed by the same goroutine, removing the need for expensive stack + // re-allocations (see the runtime.morestack problem [1]). + // + // [1] https://github.com/golang/go/issues/18138 +-func (s *Server) serverWorker(ch chan *serverWorkerData) { +- // To make sure all server workers don't reset at the same time, choose a +- // random number of iterations before resetting. +- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) +- for completed := 0; completed < threshold; completed++ { +- data, ok := <-ch ++func (s *Server) serverWorker() { ++ for completed := 0; completed < serverWorkerResetThreshold; completed++ { ++ data, ok := <-s.serverWorkerChannel + if !ok { + return + } +- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +- data.wg.Done() ++ s.handleSingleStream(data) + } +- go s.serverWorker(ch) ++ go s.serverWorker() + } + +-// initServerWorkers creates worker goroutines and channels to process incoming ++func (s *Server) handleSingleStream(data *serverWorkerData) { ++ defer data.wg.Done() ++ s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) ++} ++ ++// initServerWorkers creates worker goroutines and a channel to process incoming + // connections to reduce the time spent overall on runtime.morestack. + func (s *Server) initServerWorkers() { +- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) ++ s.serverWorkerChannel = make(chan *serverWorkerData) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- s.serverWorkerChannels[i] = make(chan *serverWorkerData) +- go s.serverWorker(s.serverWorkerChannels[i]) ++ go s.serverWorker() + } + } + + func (s *Server) stopServerWorkers() { +- for i := uint32(0); i < s.opts.numServerWorkers; i++ { +- close(s.serverWorkerChannels[i]) +- } ++ close(s.serverWorkerChannel) + } + + // NewServer creates a gRPC server which has no service registered and has not +@@ -945,26 +942,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + +- var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { +- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: ++ case s.serverWorkerChannel <- data: ++ return + default: + // If all stream workers are busy, fallback to the default code path. +- go func() { +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- wg.Done() +- }() + } +- } else { +- go func() { +- defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) +- }() + } ++ go func() { ++ defer wg.Done() ++ s.handleStream(st, stream, s.traceInfo(st, stream)) ++ }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx +-- +2.34.1 + diff --git a/SPECS/moby-compose/prohibit-more-than-MaxConcurrentStreams-handlers.patch b/SPECS/moby-compose/prohibit-more-than-MaxConcurrentStreams-handlers.patch new file mode 100644 index 00000000000..7298f746cb4 --- /dev/null +++ b/SPECS/moby-compose/prohibit-more-than-MaxConcurrentStreams-handlers.patch @@ -0,0 +1,185 @@ +From e74d387714f6695e7710d53144d88696374f06ab Mon Sep 17 00:00:00 2001 +From: Sam Meluch +Date: Thu, 22 Feb 2024 15:08:47 -0800 +Subject: [PATCH 3/3] prohibit more than MaxConcurrentStreams handlers from + running at once + +--- + .../grpc/internal/transport/http2_server.go | 11 +-- + vendor/google.golang.org/grpc/server.go | 71 +++++++++++++------ + 2 files changed, 53 insertions(+), 29 deletions(-) + +diff --git a/google.golang.org/grpc/internal/transport/http2_server.go b/google.golang.org/grpc/internal/transport/http2_server.go +index 94d441c..2c006a1 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} +- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is +- // permitted in the HTTP2 spec. +- maxStreams := config.MaxStreams +- if maxStreams == 0 { +- maxStreams = math.MaxUint32 +- } else { ++ if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, +- Val: maxStreams, ++ Val: config.MaxStreams, + }) + } + dynamicWindow := true +@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), +- maxStreams: maxStreams, ++ maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, +diff --git a/google.golang.org/grpc/server.go b/google.golang.org/grpc/server.go +index 5bccd48..659361e 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -114,12 +114,6 @@ type serviceInfo struct { + mdata interface{} + } + +-type serverWorkerData struct { +- st transport.ServerTransport +- wg *sync.WaitGroup +- stream *transport.Stream +-} +- + // Server is a gRPC server to serve RPC requests. + type Server struct { + opts serverOptions +@@ -144,7 +138,7 @@ type Server struct { + channelzID *channelz.Identifier + czData *channelzData + +- serverWorkerChannel chan *serverWorkerData ++ serverWorkerChannel chan func() + } + + type serverOptions struct { +@@ -176,6 +170,7 @@ type serverOptions struct { + } + + var defaultServerOptions = serverOptions{ ++ maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, +@@ -386,6 +381,9 @@ func MaxSendMsgSize(m int) ServerOption { + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number + // of concurrent streams to each ServerTransport. + func MaxConcurrentStreams(n uint32) ServerOption { ++ if n == 0 { ++ n = math.MaxUint32 ++ } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +@@ -566,24 +564,19 @@ const serverWorkerResetThreshold = 1 << 16 + // [1] https://github.com/golang/go/issues/18138 + func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { +- data, ok := <-s.serverWorkerChannel ++ f, ok := <-s.serverWorkerChannel + if !ok { + return + } +- s.handleSingleStream(data) ++ f() + } + go s.serverWorker() + } + +-func (s *Server) handleSingleStream(data *serverWorkerData) { +- defer data.wg.Done() +- s.handleStream(data.st, data.stream) +-} +- + // initServerWorkers creates worker goroutines and a channel to process incoming + // connections to reduce the time spent overall on runtime.morestack. + func (s *Server) initServerWorkers() { +- s.serverWorkerChannel = make(chan *serverWorkerData) ++ s.serverWorkerChannel = make(chan func()) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +@@ -942,21 +935,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + ++ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) ++ ++ streamQuota.acquire() ++ f := func() { ++ defer streamQuota.release() ++ defer wg.Done() ++ s.handleStream(st, stream) ++ } ++ + if s.opts.numServerWorkers > 0 { +- data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { +- case s.serverWorkerChannel <- data: ++ case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } +- go func() { +- defer wg.Done() +- s.handleStream(st, stream) +- }() ++ go f() + }) + wg.Wait() + } +@@ -1956,3 +1954,34 @@ type channelzServer struct { + func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() + } ++ ++// atomicSemaphore implements a blocking, counting semaphore. acquire should be ++// called synchronously; release may be called asynchronously. ++type atomicSemaphore struct { ++ n atomic.Int64 ++ wait chan struct{} ++} ++ ++func (q *atomicSemaphore) acquire() { ++ if q.n.Add(-1) < 0 { ++ // We ran out of quota. Block until a release happens. ++ <-q.wait ++ } ++} ++ ++func (q *atomicSemaphore) release() { ++ // N.B. the "<= 0" check below should allow for this to work with multiple ++ // concurrent calls to acquire, but also note that with synchronous calls to ++ // acquire, as our system does, n will never be less than -1. There are ++ // fairness issues (queuing) to consider if this was to be generalized. ++ if q.n.Add(1) <= 0 { ++ // An acquire was waiting on us. Unblock it. ++ q.wait <- struct{}{} ++ } ++} ++ ++func newHandlerQuota(n uint32) *atomicSemaphore { ++ a := &atomicSemaphore{wait: make(chan struct{}, 1)} ++ a.n.Store(int64(n)) ++ return a ++} +-- +2.34.1 + diff --git a/SPECS/nodejs/CVE-2023-42282.patch b/SPECS/nodejs/CVE-2023-42282.patch new file mode 100644 index 00000000000..3b97b26bf4f --- /dev/null +++ b/SPECS/nodejs/CVE-2023-42282.patch @@ -0,0 +1,111 @@ +From 32f468f1245574785ec080705737a579be1223aa Mon Sep 17 00:00:00 2001 +From: Luke McFarlane +Date: Mon, 12 Feb 2024 13:22:18 +1100 +Subject: [PATCH] lib: fixed CVE-2023-42282 and added unit test + +Unit test code is not applicable for NodeJS sources hence not included. + +diff --git a/deps/npm/node_modules/ip/lib/ip.js b/deps/npm/node_modules/ip/lib/ip.js +index 4b2adb5add..9022443ae5 100644 +--- a/deps/npm/node_modules/ip/lib/ip.js ++++ b/deps/npm/node_modules/ip/lib/ip.js +@@ -306,12 +306,26 @@ ip.isEqual = function (a, b) { + }; + + ip.isPrivate = function (addr) { +- return /^(::f{4}:)?10\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/i +- .test(addr) ++ // check loopback addresses first ++ if (ip.isLoopback(addr)) { ++ return true; ++ } ++ ++ // ensure the ipv4 address is valid ++ if (!ip.isV6Format(addr)) { ++ const ipl = ip.normalizeToLong(addr); ++ if (ipl < 0) { ++ throw new Error('invalid ipv4 address'); ++ } ++ // normalize the address for the private range checks that follow ++ addr = ip.fromLong(ipl); ++ } ++ ++ // check private ranges ++ return /^(::f{4}:)?10\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) + || /^(::f{4}:)?192\.168\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) + || /^(::f{4}:)?172\.(1[6-9]|2\d|30|31)\.([0-9]{1,3})\.([0-9]{1,3})$/i + .test(addr) +- || /^(::f{4}:)?127\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) + || /^(::f{4}:)?169\.254\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) + || /^f[cd][0-9a-f]{2}:/i.test(addr) + || /^fe80:/i.test(addr) +@@ -324,9 +338,16 @@ ip.isPublic = function (addr) { + }; + + ip.isLoopback = function (addr) { ++ // If addr is an IPv4 address in long integer form (no dots and no colons), convert it ++ if (!/\./.test(addr) && !/:/.test(addr)) { ++ addr = ip.fromLong(Number(addr)); ++ } ++ + return /^(::f{4}:)?127\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/ + .test(addr) +- || /^fe80::1$/.test(addr) ++ || /^0177\./.test(addr) ++ || /^0x7f\./i.test(addr) ++ || /^fe80::1$/i.test(addr) + || /^::1$/.test(addr) + || /^::$/.test(addr); + }; +@@ -420,3 +441,51 @@ ip.fromLong = function (ipl) { + ipl >> 8 & 255}.${ + ipl & 255}`); + }; ++ ++ip.normalizeToLong = function (addr) { ++ const parts = addr.split('.').map(part => { ++ // Handle hexadecimal format ++ if (part.startsWith('0x') || part.startsWith('0X')) { ++ return parseInt(part, 16); ++ } ++ // Handle octal format (strictly digits 0-7 after a leading zero) ++ else if (part.startsWith('0') && part !== '0' && /^[0-7]+$/.test(part)) { ++ return parseInt(part, 8); ++ } ++ // Handle decimal format, reject invalid leading zeros ++ else if (/^[1-9]\d*$/.test(part) || part === '0') { ++ return parseInt(part, 10); ++ } ++ // Return NaN for invalid formats to indicate parsing failure ++ else { ++ return NaN; ++ } ++ }); ++ ++ if (parts.some(isNaN)) return -1; // Indicate error with -1 ++ ++ let val = 0; ++ const n = parts.length; ++ ++ switch (n) { ++ case 1: ++ val = parts[0]; ++ break; ++ case 2: ++ if (parts[0] > 0xff || parts[1] > 0xffffff) return -1; ++ val = (parts[0] << 24) | (parts[1] & 0xffffff); ++ break; ++ case 3: ++ if (parts[0] > 0xff || parts[1] > 0xff || parts[2] > 0xffff) return -1; ++ val = (parts[0] << 24) | (parts[1] << 16) | (parts[2] & 0xffff); ++ break; ++ case 4: ++ if (parts.some(part => part > 0xff)) return -1; ++ val = (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]; ++ break; ++ default: ++ return -1; // Error case ++ } ++ ++ return val >>> 0; ++}; diff --git a/SPECS/nodejs/CVE-2024-24806.patch b/SPECS/nodejs/CVE-2024-24806.patch new file mode 100644 index 00000000000..f183ff3f72b --- /dev/null +++ b/SPECS/nodejs/CVE-2024-24806.patch @@ -0,0 +1,31 @@ +From 9c2cf90e5b3952a202a0fb8435470eaa527d3f63 Mon Sep 17 00:00:00 2001 +From: Suresh Thelkar +Date: Tue, 27 Feb 2024 10:24:03 +0530 +Subject: [PATCH] Patch CVE-2024-24806 + +Upstream patch details are given below. +https://github.com/libuv/libuv/commit/0f2d7e784a256b54b2385043438848047bc2a629 +--- + deps/uv/src/idna.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/deps/uv/src/idna.c b/deps/uv/src/idna.c +index 93d982ca..197650af 100644 +--- a/deps/uv/src/idna.c ++++ b/deps/uv/src/idna.c +@@ -308,8 +308,10 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) { + return rc; + } + +- if (d < de) +- *d++ = '\0'; ++ if (d >= de) ++ return UV_EINVAL; ++ ++ *d++ = '\0'; + + return d - ds; /* Number of bytes written. */ + } +-- +2.34.1 + diff --git a/SPECS/nodejs/nodejs.spec b/SPECS/nodejs/nodejs.spec index 9659860ce38..f4c4fb3b2b9 100644 --- a/SPECS/nodejs/nodejs.spec +++ b/SPECS/nodejs/nodejs.spec @@ -5,7 +5,7 @@ Name: nodejs # WARNINGS: MUST check and update the 'npm_version' macro for every version update of this package. # The version of NPM can be found inside the sources under 'deps/npm/package.json'. Version: 16.20.2 -Release: 2%{?dist} +Release: 3%{?dist} License: BSD AND MIT AND Public Domain AND NAIST-2003 AND Artistic-2.0 Vendor: Microsoft Corporation Distribution: Mariner @@ -18,6 +18,7 @@ Source0: https://nodejs.org/download/release/v%{version}/node-v%{version} Patch0: disable-tlsv1-tlsv1-1.patch Patch1: CVE-2022-25883.patch Patch2: CVE-2023-35945.patch +Patch3: CVE-2023-42282.patch BuildRequires: brotli-devel BuildRequires: c-ares-devel BuildRequires: coreutils >= 8.22 @@ -115,6 +116,10 @@ make cctest %{_datadir}/systemtap/tapset/node.stp %changelog +* Mon Feb 26 2024 Suresh Babu Chalamalasetty - 16.20.2-3 +- Patch CVE-2023-42282 +- Unit test code is not applicable for this NodeJS version sources + * Wed Sep 06 2023 Brian Fjeldstad - 16.20.2-2 - Patch CVE-2023-35945 diff --git a/SPECS/nodejs/nodejs18.spec b/SPECS/nodejs/nodejs18.spec index 4fafb0f54aa..f1b7c2eb028 100644 --- a/SPECS/nodejs/nodejs18.spec +++ b/SPECS/nodejs/nodejs18.spec @@ -6,7 +6,7 @@ Name: nodejs18 # WARNINGS: MUST check and update the 'npm_version' macro for every version update of this package. # The version of NPM can be found inside the sources under 'deps/npm/package.json'. Version: 18.18.2 -Release: 2%{?dist} +Release: 4%{?dist} License: BSD and MIT and Public Domain and NAIST-2003 and Artistic-2.0 Group: Applications/System Vendor: Microsoft Corporation @@ -17,7 +17,8 @@ URL: https://github.com/nodejs/node # !!! => use clean-source-tarball.sh script to create a clean and reproducible source tarball. Source0: https://nodejs.org/download/release/v%{version}/node-v%{version}.tar.xz Patch0: disable-tlsv1-tlsv1-1.patch - +Patch1: CVE-2023-42282.patch +Patch2: CVE-2024-24806.patch BuildRequires: brotli-devel BuildRequires: coreutils >= 8.22 BuildRequires: gcc @@ -116,6 +117,13 @@ make cctest %{_datadir}/systemtap/tapset/node.stp %changelog +* Tue Feb 27 2024 Suresh Thelkar - 18.18.2-4 +- Patch CVE-2024-24806 + +* Mon Feb 26 2024 Suresh Babu Chalamalasetty - 18.18.2-3 +- Patch CVE-2023-42282 +- Unit test code is not applicable for this NodeJS version sources + * Thu Oct 19 2023 Dan Streetman - 18.18.2-2 - Re-enable building debuginfo. We can just ignore the dirs conflict failure in the pipelines! :) diff --git a/SPECS/osslsigncode/osslsigncode.signatures.json b/SPECS/osslsigncode/osslsigncode.signatures.json new file mode 100644 index 00000000000..0131771b8f7 --- /dev/null +++ b/SPECS/osslsigncode/osslsigncode.signatures.json @@ -0,0 +1,5 @@ +{ + "Signatures": { + "osslsigncode-2.7.tar.gz": "00fc2b43395d89a2d07ebbd4981e7a9dbc676c7115d122a1385441c0294239b8" + } + } diff --git a/SPECS/osslsigncode/osslsigncode.spec b/SPECS/osslsigncode/osslsigncode.spec new file mode 100644 index 00000000000..423183db9a5 --- /dev/null +++ b/SPECS/osslsigncode/osslsigncode.spec @@ -0,0 +1,39 @@ +Summary: Verify and sign routines for PE binaries +Name: osslsigncode +Version: 2.7 +Release: 1%{?dist} +License: MIT +Group: Applications/System +Vendor: Microsoft Corporation +Distribution: Mariner +Source0: https://github.com/mtrojnar/%{name}/archive/refs/tags/%{version}.tar.gz#/%{name}-%{version}.tar.gz +BuildRequires: python3 +BuildRequires: cmake +BuildRequires: openssl-devel +BuildRequires: libcurl-devel +BuildRequires: zlib-devel + +%description +Verify and sign routines for PE binaries (EXE,DLL) + +%prep +%autosetup -p1 + +%build +mkdir build +cd build +cmake -S .. +cmake --build . + +%install +install -d %{buildroot}%{_bindir} +install -D -m 755 ./build/osslsigncode %{buildroot}%{_bindir}/osslsigncode + +%files +%license LICENSE.txt +%{_bindir}/osslsigncode + +%changelog +* Tue Feb 13 2024 Cameron Baird 2.7-1 +- Original version for CBL-Mariner (license: MIT). +- License verified diff --git a/SPECS/postgresql/postgresql.signatures.json b/SPECS/postgresql/postgresql.signatures.json index d9352fe4d23..caedd1b84e1 100644 --- a/SPECS/postgresql/postgresql.signatures.json +++ b/SPECS/postgresql/postgresql.signatures.json @@ -1,5 +1,5 @@ { - "Signatures": { - "postgresql-14.10.tar.bz2": "c99431c48e9d470b0d0ab946eb2141a3cd19130c2fb4dc4b3284a7774ecc8399" + "Signatures": { + "postgresql-14.11.tar.bz2": "a670bd7dce22dcad4297b261136b3b1d4a09a6f541719562aa14ca63bf2968a8" } -} +} \ No newline at end of file diff --git a/SPECS/postgresql/postgresql.spec b/SPECS/postgresql/postgresql.spec index f6afb4879a9..59b3f2b0861 100644 --- a/SPECS/postgresql/postgresql.spec +++ b/SPECS/postgresql/postgresql.spec @@ -1,6 +1,6 @@ Summary: PostgreSQL database engine Name: postgresql -Version: 14.10 +Version: 14.11 Release: 1%{?dist} License: PostgreSQL Vendor: Microsoft Corporation @@ -35,6 +35,12 @@ Requires: zlib %description PostgreSQL is an object-relational database management system. +%package docs +Summary: Extra documentation for PostgreSQL + +%description docs +The postgresql-docs package includes the documentation. + %package libs Summary: Libraries for use with PostgreSQL Group: Applications/Databases @@ -64,10 +70,11 @@ The postgresql-devel package contains libraries and header files for developing applications that use postgresql. %prep -%setup -q +%autosetup -p1 %build -sed -i '/DEFAULT_PGSOCKET_DIR/s@/tmp@/run/postgresql@' src/include/pg_config_manual.h && +sed -i '/DEFAULT_PGSOCKET_DIR/s@/tmp@/run/postgresql@' src/include/pg_config_manual.h + ./configure \ --enable-thread-safety \ --prefix=%{_prefix} \ @@ -78,13 +85,11 @@ sed -i '/DEFAULT_PGSOCKET_DIR/s@/tmp@/run/postgresql@' src/include/pg_config_man --with-readline \ --with-system-tzdata=%{_datadir}/zoneinfo \ --docdir=%{_docdir}/postgresql -make -C ./src/backend generated-headers -make %{?_smp_mflags} -cd contrib && make %{?_smp_mflags} + +%make_build world %install -make install DESTDIR=%{buildroot} -cd contrib && make install DESTDIR=%{buildroot} +%make_install install-world # For postgresql 10+, commands are renamed # Ref: https://wiki.postgresql.org/wiki/New_in_postgres_10 @@ -93,9 +98,19 @@ ln -sf pg_resetwal %{buildroot}%{_bindir}/pg_resetxlog ln -sf pg_waldump %{buildroot}%{_bindir}/pg_xlogdump %{_fixperms} %{buildroot}/* +# Remove anything related to Python 2. These have no need to be +# around as only Python 3 is supported. +rm -f %{buildroot}%{_pgdatadir}/extension/*plpython2u* \ + %{buildroot}%{_pgdatadir}/extension/*plpythonu-* \ + %{buildroot}%{_pgdatadir}/extension/*_plpythonu.control + +# Remove currently unnecessary man pages. +rm -f %{buildroot}%{_mandir}/man1/* \ + %{buildroot}%{_mandir}/man3/* \ + %{buildroot}%{_mandir}/man7/* + %check -sed -i '2219s/",/ ; EXIT_STATUS=$? ; sleep 5 ; exit $EXIT_STATUS",/g' src/test/regress/pg_regress.c -chown -Rv nobody . +chown -Rv nobody:nogroup . sudo -u nobody -s /bin/bash -c "PATH=$PATH make -k check" %ldconfig_scriptlets @@ -133,6 +148,10 @@ sudo -u nobody -s /bin/bash -c "PATH=$PATH make -k check" %exclude %{_datadir}/postgresql/pg_service.conf.sample %exclude %{_datadir}/postgresql/psqlrc.sample +%files docs +%defattr(-,root,root) +%{_docdir}/postgresql/* + %files libs %{_bindir}/clusterdb %{_bindir}/createdb @@ -172,6 +191,10 @@ sudo -u nobody -s /bin/bash -c "PATH=$PATH make -k check" %{_libdir}/libpgtypes.a %changelog +* Tue Feb 27 2024 Thien Trung Vuong - 14.11-1 +- Update to version 14.11 to fix CVE-2024-0985 +- Added the 'docs' subpackage. + * Fri Dec 29 2023 Neha Agarwal - 14.10-1 - Upgrade to 14.10 to fix CVE-2023-5868, CVE-2023-5869 and CVE-2023-5870 diff --git a/SPECS/rust/rust.spec b/SPECS/rust/rust.spec index 62f7196dc73..9313179fced 100644 --- a/SPECS/rust/rust.spec +++ b/SPECS/rust/rust.spec @@ -9,7 +9,7 @@ Summary: Rust Programming Language Name: rust Version: 1.72.0 -Release: 5%{?dist} +Release: 6%{?dist} License: (ASL 2.0 OR MIT) AND BSD AND CC-BY-3.0 Vendor: Microsoft Corporation Distribution: Mariner @@ -168,6 +168,9 @@ rm %{buildroot}%{_bindir}/*.old %{_mandir}/man1/* %changelog +* Wed Feb 21 2024 Sam Meluch - 1.72.0-6 +- Dash roll package to rebuild with new libgit2 + * Mon Oct 30 2023 Rohit Rawat - 1.72.0-5 - Patch CVE-2023-45853 in vendor/libz-sys/src/zlib diff --git a/SPECS/telegraf/CVE-2023-48795.patch b/SPECS/telegraf/CVE-2023-48795.patch new file mode 100644 index 00000000000..b0b6000f36e --- /dev/null +++ b/SPECS/telegraf/CVE-2023-48795.patch @@ -0,0 +1,270 @@ +From 8e6ebb46718646cadb06d60713aad3b5bdb936a5 Mon Sep 17 00:00:00 2001 +From: Nan Liu +Date: Thu, 15 Feb 2024 18:49:45 +0000 +Subject: [PATCH] address CVE-2023-48795 + +--- +ssh: implement strict KEX protocol changes + +Implement the "strict KEX" protocol changes, as described in section +1.9 of the OpenSSH PROTOCOL file (as of OpenSSH version 9.6/9.6p1). + +Namely this makes the following changes: + * Both the server and the client add an additional algorithm to the + initial KEXINIT message, indicating support for the strict KEX mode. + * When one side of the connection sees the strict KEX extension + algorithm, the strict KEX mode is enabled for messages originating + from the other side of the connection. If the sequence number for + the side which requested the extension is not 1 (indicating that it + has already received non-KEXINIT packets), the connection is + terminated. + * When strict kex mode is enabled, unexpected messages during the + handshake are considered fatal. Additionally when a key change + occurs (on the receipt of the NEWKEYS message) the message sequence + numbers are reset. + +Thanks to Fabian Bäumer, Marcus Brinkmann, and Jörg Schwenk from Ruhr +University Bochum for reporting this issue. + +Fixes CVE-2023-48795 +Fixes golang/go#64784 + +Change-Id: I96b53afd2bd2fb94d2b6f2a46a5dacf325357604 +Reviewed-on: https://go-review.googlesource.com/c/crypto/+/550715 +Reviewed-by: Nicola Murino +Reviewed-by: Tatiana Bradley +TryBot-Result: Gopher Robot +Run-TryBot: Roland Shoemaker +Reviewed-by: Damien Neil +LUCI-TryBot-Result: Go LUCI + +--- + vendor/golang.org/x/crypto/ssh/handshake.go | 56 +++++++++++++++++++-- + vendor/golang.org/x/crypto/ssh/transport.go | 32 ++++++++++-- + 2 files changed, 79 insertions(+), 9 deletions(-) + +diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go +index 653dc4d..c7ea70f 100644 +--- a/vendor/golang.org/x/crypto/ssh/handshake.go ++++ b/vendor/golang.org/x/crypto/ssh/handshake.go +@@ -34,6 +34,16 @@ type keyingTransport interface { + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error ++ ++ // setStrictMode sets the strict KEX mode, notably triggering ++ // sequence number resets on sending or receiving msgNewKeys. ++ // If the sequence number is already > 1 when setStrictMode ++ // is called, an error is returned. ++ setStrictMode() error ++ ++ // setInitialKEXDone indicates to the transport that the initial key exchange ++ // was completed ++ setInitialKEXDone() + } + + // handshakeTransport implements rekeying on top of a keyingTransport +@@ -94,6 +104,10 @@ type handshakeTransport struct { + + // The session ID or nil if first kex did not complete yet. + sessionID []byte ++ ++ // strictMode indicates if the other side of the handshake indicated ++ // that we should be following the strict KEX protocol restrictions. ++ strictMode bool + } + + type pendingKex struct { +@@ -201,7 +215,10 @@ func (t *handshakeTransport) readLoop() { + close(t.incoming) + break + } +- if p[0] == msgIgnore || p[0] == msgDebug { ++ // If this is the first kex, and strict KEX mode is enabled, ++ // we don't ignore any messages, as they may be used to manipulate ++ // the packet sequence numbers. ++ if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { + continue + } + t.incoming <- p +@@ -432,6 +449,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + return successPacket, nil + } + ++const ( ++ kexStrictClient = "kex-strict-c-v00@openssh.com" ++ kexStrictServer = "kex-strict-s-v00@openssh.com" ++) ++ + // sendKexInit sends a key change message. + func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() +@@ -445,7 +467,6 @@ func (t *handshakeTransport) sendKexInit() error { + } + + msg := &kexInitMsg{ +- KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, +@@ -455,6 +476,13 @@ func (t *handshakeTransport) sendKexInit() error { + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + ++ // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, ++ // and possibly to add the ext-info extension algorithm. Since the slice may be the ++ // user owned KeyExchanges, we create our own slice in order to avoid using user ++ // owned memory by mistake. ++ msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info ++ msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) ++ + isServer := len(t.hostKeys) > 0 + if isServer { + for _, k := range t.hostKeys { +@@ -474,17 +502,24 @@ func (t *handshakeTransport) sendKexInit() error { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } + } ++ ++ if t.sessionID == nil { ++ msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) ++ } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. ++ // ++ // We also send the strict KEX mode extension algorithm, in order to opt ++ // into the strict KEX mode. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { +- msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) +- msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") ++ msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) + } ++ + } + + packet := Marshal(msg) +@@ -581,6 +616,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + return err + } + ++ if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { ++ t.strictMode = true ++ if err := t.conn.setStrictMode(); err != nil { ++ return err ++ } ++ } ++ + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for +@@ -632,6 +674,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + ++ if firstKeyExchange { ++ // Indicates to the transport that the first key exchange is completed ++ // after receiving SSH_MSG_NEWKEYS. ++ t.conn.setInitialKEXDone() ++ } ++ + return nil + } + +diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go +index acf5a21..4df45fc 100644 +--- a/vendor/golang.org/x/crypto/ssh/transport.go ++++ b/vendor/golang.org/x/crypto/ssh/transport.go +@@ -48,6 +48,9 @@ type transport struct { + rand io.Reader + isClient bool + io.Closer ++ ++ strictMode bool ++ initialKEXDone bool + } + + // packetCipher represents a combination of SSH encryption/MAC +@@ -73,6 +76,18 @@ type connectionState struct { + pendingKeyChange chan packetCipher + } + ++func (t *transport) setStrictMode() error { ++ if t.reader.seqNum != 1 { ++ return errors.New("ssh: sequence number != 1 when strict KEX mode requested") ++ } ++ t.strictMode = true ++ return nil ++} ++ ++func (t *transport) setInitialKEXDone() { ++ t.initialKEXDone = true ++} ++ + // prepareKeyChange sets up key material for a keychange. The key changes in + // both directions are triggered by reading and writing a msgNewKey packet + // respectively. +@@ -111,11 +126,12 @@ func (t *transport) printPacket(p []byte, write bool) { + // Read and decrypt next packet. + func (t *transport) readPacket() (p []byte, err error) { + for { +- p, err = t.reader.readPacket(t.bufReader) ++ p, err = t.reader.readPacket(t.bufReader, t.strictMode) + if err != nil { + break + } +- if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { ++ // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX ++ if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } +@@ -126,7 +142,7 @@ func (t *transport) readPacket() (p []byte, err error) { + return p, err + } + +-func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { ++func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { +@@ -139,6 +155,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher ++ if strictMode { ++ s.seqNum = 0 ++ } + default: + return nil, errors.New("ssh: got bogus newkeys message") + } +@@ -169,10 +188,10 @@ func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } +- return t.writer.writePacket(t.bufWriter, t.rand, packet) ++ return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) + } + +-func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { ++func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) +@@ -187,6 +206,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet [] + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher ++ if strictMode { ++ s.seqNum = 0 ++ } + default: + panic("ssh: no key material for msgNewKeys") + } +-- +2.25.1 + diff --git a/SPECS/telegraf/telegraf.spec b/SPECS/telegraf/telegraf.spec index 1a25029797c..60ce99a4a4c 100644 --- a/SPECS/telegraf/telegraf.spec +++ b/SPECS/telegraf/telegraf.spec @@ -1,7 +1,7 @@ Summary: agent for collecting, processing, aggregating, and writing metrics. Name: telegraf Version: 1.28.5 -Release: 2%{?dist} +Release: 3%{?dist} License: MIT Vendor: Microsoft Corporation Distribution: Mariner @@ -10,6 +10,7 @@ URL: https://github.com/influxdata/telegraf Source0: %{url}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz # Use the generate_source_tarball.sh script to get the vendored sources. Source1: %{name}-%{version}-vendor.tar.gz +Patch0: CVE-2023-48795.patch BuildRequires: golang BuildRequires: iana-etc BuildRequires: systemd-devel @@ -33,8 +34,7 @@ the community can easily add support for collecting metrics from well known serv Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). %prep -%autosetup -p1 -tar -xf %{SOURCE1} +%autosetup -a 1 -p1 %build go build -buildvcs=false -mod=vendor ./cmd/telegraf @@ -81,6 +81,9 @@ fi %dir %{_sysconfdir}/%{name}/telegraf.d %changelog +* Thu Feb 15 2024 Nan Liu - 1.28.5-3 +- Address CVE-2023-48795 by patching vendored golang.org/x/crypto + * Fri Feb 02 2024 CBL-Mariner Servicing Account - 1.28.5-2 - Bump release to rebuild with go 1.21.6 diff --git a/SPECS/unbound/unbound.signatures.json b/SPECS/unbound/unbound.signatures.json index dbd29a4ebc3..9af5948e3f7 100644 --- a/SPECS/unbound/unbound.signatures.json +++ b/SPECS/unbound/unbound.signatures.json @@ -1,6 +1,6 @@ { - "Signatures": { - "unbound-release-1.16.3.tar.gz": "df6359aadca02148f3ad0cc08edc7bdd031fb1dec73f0c51ed82bfec502bcb56", - "unbound.service": "563389e2bf92e13541d68c7bcac6bc6635931aa86509d45393864d24aacc7147" - } + "Signatures": { + "unbound.service": "563389e2bf92e13541d68c7bcac6bc6635931aa86509d45393864d24aacc7147", + "unbound-release-1.19.1.tar.gz": "cc1231e6756c9ec88fadf8425f7302f8884ca8781fb108275f2ad476c284edd8" + } } \ No newline at end of file diff --git a/SPECS/unbound/unbound.spec b/SPECS/unbound/unbound.spec index 5b2c3498865..4acc01c2541 100644 --- a/SPECS/unbound/unbound.spec +++ b/SPECS/unbound/unbound.spec @@ -1,13 +1,13 @@ Summary: unbound dns server Name: unbound -Version: 1.16.3 +Version: 1.19.1 Release: 1%{?dist} License: BSD Vendor: Microsoft Corporation Distribution: Mariner Group: System/Servers URL: https://nlnetlabs.nl/projects/unbound/about/ -Source0: https://github.com/NLnetLabs/%{name}/archive/release-%{version}.tar.gz#/%{name}-release-%{version}.tar.gz +Source0: https://github.com/nlnetlabs/%{name}/archive/release-%{version}.tar.gz#/%{name}-release-%{version}.tar.gz Source1: %{name}.service BuildRequires: expat-devel BuildRequires: libevent-devel @@ -96,6 +96,9 @@ useradd -r -g unbound -d %{_sysconfdir}/unbound -s /sbin/nologin \ %{_mandir}/* %changelog +* Wed Feb 28 2024 CBL-Mariner Servicing Account - 1.19.1-1 +- Auto-upgrade to 1.19.1 - Fix CVE-2023-50387 + * Wed Oct 12 2022 Henry Li - 1.16.3-1 - Upgrade to version 1.16.3 to resolve CVE-2022-3204 diff --git a/SPECS/vim/CVE-2024-22667.patch b/SPECS/vim/CVE-2024-22667.patch new file mode 100644 index 00000000000..b6fb074f910 --- /dev/null +++ b/SPECS/vim/CVE-2024-22667.patch @@ -0,0 +1,400 @@ +From c1c94475a13790d420c03d67d336dfb171ab3aec Mon Sep 17 00:00:00 2001 +From: Christian Brabandt +Date: Wed, 29 Nov 2023 11:34:05 +0100 +Subject: [PATCH] patch 9.0.2142: [security]: stack-buffer-overflow in option + callback functions + +Problem: [security]: stack-buffer-overflow in option callback functions +Solution: pass size of errbuf down the call stack, use snprintf() + instead of sprintf() + +We pass the error buffer down to the option callback functions, but in +some parts of the code, we simply use sprintf(buf) to write into the error +buffer, which can overflow. + +So let's pass down the length of the error buffer and use sprintf(buf, size) +instead. + +Reported by @henices, thanks! + +Signed-off-by: Christian Brabandt +--- + src/map.c | 2 +- + src/option.c | 14 +++++---- + src/option.h | 2 ++ + src/optionstr.c | 59 ++++++++++++++++++++++++-------------- + src/proto/optionstr.pro | 4 +-- + src/structs.h | 2 ++ + src/testdir/test_crash.vim | 8 ++++++ + 7 files changed, 60 insertions(+), 31 deletions(-) + +diff --git a/src/map.c b/src/map.c +index 5988445..98785e7 100644 +--- a/src/map.c ++++ b/src/map.c +@@ -3114,7 +3114,7 @@ did_set_langmap(optset_T *args UNUSED) + { + if (p[0] != ',') + { +- sprintf(args->os_errbuf, ++ snprintf(args->os_errbuf, args->os_errbuflen, + _(e_langmap_extra_characters_after_semicolon_str), + p); + return args->os_errbuf; +diff --git a/src/option.c b/src/option.c +index d5d20d7..5727885 100644 +--- a/src/option.c ++++ b/src/option.c +@@ -1932,6 +1932,7 @@ do_set_option_string( + int cp_val, + char_u *varp_arg, + char *errbuf, ++ int errbuflen, + int *value_checked, + char **errmsg) + { +@@ -2030,7 +2031,7 @@ do_set_option_string( + // be triggered that can cause havoc. + *errmsg = did_set_string_option( + opt_idx, (char_u **)varp, oldval, newval, errbuf, +- opt_flags, op, value_checked); ++ errbuflen, opt_flags, op, value_checked); + + secure = secure_saved; + } +@@ -2287,7 +2288,7 @@ do_set_option_value( + { + // string option + if (do_set_option_string(opt_idx, opt_flags, &arg, nextchar, op, +- flags, cp_val, varp, errbuf, ++ flags, cp_val, varp, errbuf, errbuflen, + &value_checked, &errmsg) == FAIL) + { + if (errmsg != NULL) +@@ -2579,12 +2580,12 @@ do_set( + { + int stopopteval = FALSE; + char *errmsg = NULL; +- char errbuf[80]; ++ char errbuf[ERR_BUFLEN]; + char_u *startarg = arg; + + errmsg = do_set_option(opt_flags, &arg, arg_start, &startarg, + &did_show, &stopopteval, errbuf, +- sizeof(errbuf)); ++ ERR_BUFLEN); + if (stopopteval) + break; + +@@ -5347,7 +5348,8 @@ set_option_value( + int opt_idx; + char_u *varp; + long_u flags; +- static char errbuf[80]; ++ static char errbuf[ERR_BUFLEN]; ++ int errbuflen = ERR_BUFLEN; + + opt_idx = findoption(name); + if (opt_idx < 0) +@@ -5390,7 +5392,7 @@ set_option_value( + } + #endif + if (flags & P_STRING) +- return set_string_option(opt_idx, string, opt_flags, errbuf); ++ return set_string_option(opt_idx, string, opt_flags, errbuf, errbuflen); + + varp = get_varp_scope(&(options[opt_idx]), opt_flags); + if (varp != NULL) // hidden option is not changed +diff --git a/src/option.h b/src/option.h +index 396c568..f620e13 100644 +--- a/src/option.h ++++ b/src/option.h +@@ -1321,4 +1321,6 @@ enum + // Value for b_p_ul indicating the global value must be used. + #define NO_LOCAL_UNDOLEVEL (-123456) + ++#define ERR_BUFLEN 80 ++ + #endif // _OPTION_H_ +diff --git a/src/optionstr.c b/src/optionstr.c +index b7cdcc4..84c77cb 100644 +--- a/src/optionstr.c ++++ b/src/optionstr.c +@@ -229,11 +229,12 @@ trigger_optionset_string( + #endif + + static char * +-illegal_char(char *errbuf, int c) ++illegal_char(char *errbuf, int errbuflen, int c) + { + if (errbuf == NULL) + return ""; +- sprintf((char *)errbuf, _(e_illegal_character_str), (char *)transchar(c)); ++ snprintf((char *)errbuf, errbuflen, _(e_illegal_character_str), ++ (char *)transchar(c)); + return errbuf; + } + +@@ -525,7 +526,8 @@ set_string_option( + int opt_idx, + char_u *value, + int opt_flags, // OPT_LOCAL and/or OPT_GLOBAL +- char *errbuf) ++ char *errbuf, ++ int errbuflen) + { + char_u *s; + char_u **varp; +@@ -579,7 +581,7 @@ set_string_option( + } + #endif + if ((errmsg = did_set_string_option(opt_idx, varp, oldval, value, errbuf, +- opt_flags, OP_NONE, &value_checked)) == NULL) ++ errbuflen, opt_flags, OP_NONE, &value_checked)) == NULL) + did_set_option(opt_idx, opt_flags, TRUE, value_checked); + + #if defined(FEAT_EVAL) +@@ -615,7 +617,8 @@ valid_filetype(char_u *val) + check_stl_option(char_u *s) + { + int groupdepth = 0; +- static char errbuf[80]; ++ static char errbuf[ERR_BUFLEN]; ++ int errbuflen = ERR_BUFLEN; + + while (*s) + { +@@ -656,7 +659,7 @@ check_stl_option(char_u *s) + } + if (vim_strchr(STL_ALL, *s) == NULL) + { +- return illegal_char(errbuf, *s); ++ return illegal_char(errbuf, errbuflen, *s); + } + if (*s == '{') + { +@@ -664,7 +667,7 @@ check_stl_option(char_u *s) + + if (reevaluate && *++s == '}') + // "}" is not allowed immediately after "%{%" +- return illegal_char(errbuf, '}'); ++ return illegal_char(errbuf, errbuflen, '}'); + while ((*s != '}' || (reevaluate && s[-1] != '%')) && *s) + s++; + if (*s != '}') +@@ -719,13 +722,17 @@ did_set_opt_strings(char_u *val, char **values, int list) + * An option which is a list of flags is set. Valid values are in 'flags'. + */ + static char * +-did_set_option_listflag(char_u *val, char_u *flags, char *errbuf) ++did_set_option_listflag( ++ char_u *val, ++ char_u *flags, ++ char *errbuf, ++ int errbuflen) + { + char_u *s; + + for (s = val; *s; ++s) + if (vim_strchr(flags, *s) == NULL) +- return illegal_char(errbuf, *s); ++ return illegal_char(errbuf, errbuflen, *s); + + return NULL; + } +@@ -1461,7 +1468,7 @@ did_set_comments(optset_T *args) + if (vim_strchr((char_u *)COM_ALL, *s) == NULL + && !VIM_ISDIGIT(*s) && *s != '-') + { +- errmsg = illegal_char(args->os_errbuf, *s); ++ errmsg = illegal_char(args->os_errbuf, args->os_errbuflen, *s); + break; + } + ++s; +@@ -1517,7 +1524,7 @@ did_set_complete(optset_T *args) + if (!*s) + break; + if (vim_strchr((char_u *)".wbuksid]tU", *s) == NULL) +- return illegal_char(args->os_errbuf, *s); ++ return illegal_char(args->os_errbuf, args->os_errbuflen, *s); + if (*++s != NUL && *s != ',' && *s != ' ') + { + if (s[-1] == 'k' || s[-1] == 's') +@@ -1534,7 +1541,7 @@ did_set_complete(optset_T *args) + { + if (args->os_errbuf != NULL) + { +- sprintf((char *)args->os_errbuf, ++ snprintf((char *)args->os_errbuf, args->os_errbuflen, + _(e_illegal_character_after_chr), *--s); + return args->os_errbuf; + } +@@ -1634,7 +1641,8 @@ did_set_concealcursor(optset_T *args) + { + char_u **varp = (char_u **)args->os_varp; + +- return did_set_option_listflag(*varp, (char_u *)COCU_ALL, args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)COCU_ALL, args->os_errbuf, ++ args->os_errbuflen); + } + + int +@@ -1652,7 +1660,8 @@ did_set_cpoptions(optset_T *args) + { + char_u **varp = (char_u **)args->os_varp; + +- return did_set_option_listflag(*varp, (char_u *)CPO_ALL, args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)CPO_ALL, args->os_errbuf, ++ args->os_errbuflen); + } + + int +@@ -2281,7 +2290,8 @@ did_set_formatoptions(optset_T *args) + { + char_u **varp = (char_u **)args->os_varp; + +- return did_set_option_listflag(*varp, (char_u *)FO_ALL, args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)FO_ALL, args->os_errbuf, ++ args->os_errbuflen); + } + + int +@@ -2422,7 +2432,8 @@ did_set_guioptions(optset_T *args) + char_u **varp = (char_u **)args->os_varp; + char *errmsg; + +- errmsg = did_set_option_listflag(*varp, (char_u *)GO_ALL, args->os_errbuf); ++ errmsg = did_set_option_listflag(*varp, (char_u *)GO_ALL, args->os_errbuf, ++ args->os_errbuflen); + if (errmsg != NULL) + return errmsg; + +@@ -2926,8 +2937,8 @@ did_set_mouse(optset_T *args) + { + char_u **varp = (char_u **)args->os_varp; + +- return did_set_option_listflag(*varp, (char_u *)MOUSE_ALL, +- args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)MOUSE_ALL, args->os_errbuf, ++ args->os_errbuflen); + } + + int +@@ -3364,7 +3375,8 @@ did_set_shortmess(optset_T *args) + { + char_u **varp = (char_u **)args->os_varp; + +- return did_set_option_listflag(*varp, (char_u *)SHM_ALL, args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)SHM_ALL, args->os_errbuf, ++ args->os_errbuflen); + } + + int +@@ -4030,7 +4042,7 @@ did_set_viminfo(optset_T *args) + // Check it's a valid character + if (vim_strchr((char_u *)"!\"%'/:<@cfhnrs", *s) == NULL) + { +- errmsg = illegal_char(args->os_errbuf, *s); ++ errmsg = illegal_char(args->os_errbuf, args->os_errbuflen, *s); + break; + } + if (*s == 'n') // name is always last one +@@ -4057,7 +4069,7 @@ did_set_viminfo(optset_T *args) + { + if (args->os_errbuf != NULL) + { +- sprintf(args->os_errbuf, ++ snprintf(args->os_errbuf, args->os_errbuflen, + _(e_missing_number_after_angle_str_angle), + transchar_byte(*(s - 1))); + errmsg = args->os_errbuf; +@@ -4140,7 +4152,8 @@ did_set_whichwrap(optset_T *args) + + // Add ',' to the list flags because 'whichwrap' is a flag + // list that is comma-separated. +- return did_set_option_listflag(*varp, (char_u *)(WW_ALL ","), args->os_errbuf); ++ return did_set_option_listflag(*varp, (char_u *)(WW_ALL ","), ++ args->os_errbuf, args->os_errbuflen); + } + + int +@@ -4341,6 +4354,7 @@ did_set_string_option( + char_u *oldval, // previous value of the option + char_u *value, // new value of the option + char *errbuf, // buffer for errors, or NULL ++ int errbuflen, // length of error buffer + int opt_flags, // OPT_LOCAL and/or OPT_GLOBAL + set_op_T op, // OP_ADDING/OP_PREPENDING/OP_REMOVING + int *value_checked) // value was checked to be safe, no +@@ -4385,6 +4399,7 @@ did_set_string_option( + args.os_oldval.string = oldval; + args.os_newval.string = value; + args.os_errbuf = errbuf; ++ args.os_errbuflen = errbuflen; + // Invoke the option specific callback function to validate and apply + // the new option value. + errmsg = did_set_cb(&args); +diff --git a/src/proto/optionstr.pro b/src/proto/optionstr.pro +index 22601ba..4ce9321 100644 +--- a/src/proto/optionstr.pro ++++ b/src/proto/optionstr.pro +@@ -8,7 +8,7 @@ void check_string_option(char_u **pp); + void set_string_option_direct(char_u *name, int opt_idx, char_u *val, int opt_flags, int set_sid); + void set_string_option_direct_in_win(win_T *wp, char_u *name, int opt_idx, char_u *val, int opt_flags, int set_sid); + void set_string_option_direct_in_buf(buf_T *buf, char_u *name, int opt_idx, char_u *val, int opt_flags, int set_sid); +-char *set_string_option(int opt_idx, char_u *value, int opt_flags, char *errbuf); ++char *set_string_option(int opt_idx, char_u *value, int opt_flags, char *errbuf, int errbuflen); + char *did_set_ambiwidth(optset_T *args); + char *did_set_background(optset_T *args); + char *did_set_backspace(optset_T *args); +@@ -121,7 +121,7 @@ char *did_set_wildmode(optset_T *args); + char *did_set_wildoptions(optset_T *args); + char *did_set_winaltkeys(optset_T *args); + char *did_set_wincolor(optset_T *args); +-char *did_set_string_option(int opt_idx, char_u **varp, char_u *oldval, char_u *value, char *errbuf, int opt_flags, set_op_T op, int *value_checked); ++char *did_set_string_option(int opt_idx, char_u **varp, char_u *oldval, char_u *value, char *errbuf, int errbuflen, int opt_flags, set_op_T op, int *value_checked); + int expand_set_ambiwidth(optexpand_T *args, int *numMatches, char_u ***matches); + int expand_set_background(optexpand_T *args, int *numMatches, char_u ***matches); + int expand_set_backspace(optexpand_T *args, int *numMatches, char_u ***matches); +diff --git a/src/structs.h b/src/structs.h +index 4e081b8..6d9dcbb 100644 +--- a/src/structs.h ++++ b/src/structs.h +@@ -4968,6 +4968,8 @@ typedef struct + // is parameterized, then the "os_errbuf" buffer is used to store the error + // message (when it is not NULL). + char *os_errbuf; ++ // length of the error buffer ++ int os_errbuflen; + } optset_T; + + /* +diff --git a/src/testdir/test_crash.vim b/src/testdir/test_crash.vim +index b093b05..ff0898f 100644 +--- a/src/testdir/test_crash.vim ++++ b/src/testdir/test_crash.vim +@@ -86,6 +86,13 @@ func Test_crash1() + call delete('Xerr') + call delete('@') + ++ let file = 'crash/poc_did_set_langmap' ++ let cmn_args = "%s -u NONE -i NONE -n -X -m -n -e -s -S %s -c ':qa!'" ++ let args = printf(cmn_args, vim, file) ++ call term_sendkeys(buf, args .. ++ \ ' ; echo "crash 11: [OK]" >> '.. result .. "\") ++ call TermWait(buf, 150) ++ + " clean up + exe buf .. "bw!" + +@@ -102,6 +109,7 @@ func Test_crash1() + \ 'crash 8: [OK]', + \ 'crash 9: [OK]', + \ 'crash 10: [OK]', ++ \ 'crash 11: [OK]', + \ ] + + call assert_equal(expected, getline(1, '$')) +-- +2.34.1 + diff --git a/SPECS/vim/vim.spec b/SPECS/vim/vim.spec index 5da5c0d759b..1c1341ed621 100644 --- a/SPECS/vim/vim.spec +++ b/SPECS/vim/vim.spec @@ -2,13 +2,14 @@ Summary: Text editor Name: vim Version: 9.0.2121 -Release: 1%{?dist} +Release: 2%{?dist} License: Vim Vendor: Microsoft Corporation Distribution: Mariner Group: Applications/Editors URL: https://www.vim.org Source0: https://github.com/%{name}/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz +Patch0: CVE-2024-22667.patch BuildRequires: ncurses-devel BuildRequires: python3-devel Requires(post): sed @@ -196,6 +197,9 @@ fi %{_bindir}/vimdiff %changelog +* Tue Feb 20 2024 Suresh Thelkar - 9.0.2121-2 +- Patch CVE-2024-22667 + * Tue Dec 05 2023 CBL-Mariner Servicing Account - 9.0.2121-1 - Auto-upgrade to 9.0.2121 - Fix CVE-2023-48706 diff --git a/SPECS/virtiofsd/virtiofsd.spec b/SPECS/virtiofsd/virtiofsd.spec index aafd160e845..d5e5c3f469f 100644 --- a/SPECS/virtiofsd/virtiofsd.spec +++ b/SPECS/virtiofsd/virtiofsd.spec @@ -1,6 +1,6 @@ Name: virtiofsd Version: 1.8.0 -Release: 1%{?dist} +Release: 2%{?dist} Summary: Virtio-fs vhost-user device daemon (Rust version) License: Apache-2.0 AND BSD-3-Clause Vendor: Microsoft Corporation @@ -17,8 +17,6 @@ Source0: https://gitlab.com/virtio-fs/virtiofsd/-/archive/v%{version}/%{n Source1: %{name}-v%{version}-cargo.tar.gz Source2: config.toml -ExclusiveArch: x86_64 - BuildRequires: cargo BuildRequires: libcap-ng-devel BuildRequires: libseccomp-devel @@ -50,6 +48,9 @@ install -D -p -m 0755 target/release/virtiofsd %{buildroot}%{_libexecdir}/virtio %{_libexecdir}/virtiofsd-rs %changelog +* Fri Feb 16 2024 Muhammad Falak - 1.8.0-2 +- Drop ExclusiveArch: x86_64 to build on all supported platforms + * Tue Jan 9 2024 Aurélien Bombo - 1.8.0-1 - Initial CBL-Mariner import from Fedora 39 (license: MIT). - License verified. diff --git a/cgmanifest.json b/cgmanifest.json index 4d6c74589c2..b400c576972 100644 --- a/cgmanifest.json +++ b/cgmanifest.json @@ -1087,8 +1087,8 @@ "type": "other", "other": { "name": "bind", - "version": "9.16.44", - "downloadUrl": "https://ftp.isc.org/isc/bind9/9.16.44/bind-9.16.44.tar.xz" + "version": "9.16.48", + "downloadUrl": "https://ftp.isc.org/isc/bind9/9.16.48/bind-9.16.48.tar.xz" } } }, @@ -2748,8 +2748,8 @@ "type": "other", "other": { "name": "dnsmasq", - "version": "2.89", - "downloadUrl": "https://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.89.tar.xz" + "version": "2.90", + "downloadUrl": "https://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.90.tar.xz" } } }, @@ -8041,8 +8041,8 @@ "type": "other", "other": { "name": "kata-containers", - "version": "3.1.0", - "downloadUrl": "https://github.com/kata-containers/kata-containers/archive/refs/tags/3.1.0.tar.gz" + "version": "3.2.0.azl0", + "downloadUrl": "https://github.com/microsoft/kata-containers/archive/refs/tags/3.2.0.azl0.tar.gz" } } }, @@ -8051,8 +8051,8 @@ "type": "other", "other": { "name": "kata-containers-cc", - "version": "0.6.3", - "downloadUrl": "https://github.com/microsoft/kata-containers/archive/refs/tags/cc-0.6.3.tar.gz" + "version": "3.2.0.azl0", + "downloadUrl": "https://github.com/microsoft/kata-containers/archive/refs/tags/3.2.0.azl0.tar.gz" } } }, @@ -9441,8 +9441,8 @@ "type": "other", "other": { "name": "libgit2", - "version": "1.4.5", - "downloadUrl": "https://github.com/libgit2/libgit2/archive/v1.4.5/libgit2-1.4.5.tar.gz" + "version": "1.6.5", + "downloadUrl": "https://github.com/libgit2/libgit2/archive/v1.6.5/libgit2-1.6.5.tar.gz" } } }, @@ -13363,8 +13363,8 @@ "type": "other", "other": { "name": "moby-compose", - "version": "2.17.2", - "downloadUrl": "https://github.com/docker/compose/archive/refs/tags/v2.17.2.tar.gz" + "version": "2.17.3", + "downloadUrl": "https://github.com/docker/compose/archive/refs/tags/v2.17.3.tar.gz" } } }, @@ -15709,6 +15709,16 @@ } } }, + { + "component": { + "type": "other", + "other": { + "name": "osslsigncode", + "version": "2.7", + "downloadUrl": "https://github.com/mtrojnar/osslsigncode/archive/refs/tags/2.7.tar.gz" + } + } + }, { "component": { "type": "other", @@ -21444,8 +21454,8 @@ "type": "other", "other": { "name": "postgresql", - "version": "14.10", - "downloadUrl": "https://ftp.postgresql.org/pub/source/v14.10/postgresql-14.10.tar.bz2" + "version": "14.11", + "downloadUrl": "https://ftp.postgresql.org/pub/source/v14.11/postgresql-14.11.tar.bz2" } } }, @@ -29117,8 +29127,8 @@ "type": "other", "other": { "name": "unbound", - "version": "1.16.3", - "downloadUrl": "https://github.com/NLnetLabs/unbound/archive/release-1.16.3.tar.gz" + "version": "1.19.1", + "downloadUrl": "https://github.com/nlnetlabs/unbound/archive/release-1.19.1.tar.gz" } } }, diff --git a/CodeQL.yml b/codeql3000.yml similarity index 100% rename from CodeQL.yml rename to codeql3000.yml diff --git a/toolkit/scripts/check_entangled_specs.py b/toolkit/scripts/check_entangled_specs.py index 133e15b67cc..0d2e608e9c7 100755 --- a/toolkit/scripts/check_entangled_specs.py +++ b/toolkit/scripts/check_entangled_specs.py @@ -25,6 +25,14 @@ "SPECS-SIGNED/kernel-azure-signed/kernel-azure-signed.spec", "SPECS/kernel-azure/kernel-azure.spec" ]), + frozenset([ + "SPECS-SIGNED/kernel-mshv-signed/kernel-mshv-signed.spec", + "SPECS/kernel-mshv/kernel-mshv.spec" + ]), + frozenset([ + "SPECS-SIGNED/hvloader-signed/hvloader-signed.spec", + "SPECS/hvloader/hvloader.spec" + ]), frozenset([ "SPECS-SIGNED/grub2-efi-binary-signed/grub2-efi-binary-signed.spec", "SPECS/grub2/grub2.spec" diff --git a/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go b/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go index 138124dacc6..7d0ac77693d 100644 --- a/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go +++ b/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go @@ -194,8 +194,8 @@ func (g *GraphBuildState) RecordBuildResult(res *BuildResult, allowToolchainRebu delete(g.activeBuilds, res.Node.ID()) - failure := (res.Err != nil) || res.CheckFailed - if failure { + available := res.Err == nil + if !available || res.CheckFailed { g.failures = append(g.failures, res) } @@ -209,7 +209,7 @@ func (g *GraphBuildState) RecordBuildResult(res *BuildResult, allowToolchainRebu } state := &nodeState{ - available: !failure, + available: available, cached: res.UsedCache, usedDelta: res.WasDelta, freshness: freshness, diff --git a/toolkit/tools/scheduler/schedulerutils/printresults.go b/toolkit/tools/scheduler/schedulerutils/printresults.go index 8431b556b84..89984c303bb 100644 --- a/toolkit/tools/scheduler/schedulerutils/printresults.go +++ b/toolkit/tools/scheduler/schedulerutils/printresults.go @@ -59,7 +59,7 @@ func RecordBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, b failedSRPMs, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, blockedSRPMs := getSRPMsState(pkgGraph, buildState) failedBuildNodes := buildResultsSetToNodesSet(failedSRPMs) - failedSRPMsTests, _, testedSRPMs, blockedSRPMsTests := getSRPMsTestsState(pkgGraph, buildState) + failedSRPMsTests, _, passedSRPMsTests, blockedSRPMsTests := getSRPMsTestsState(pkgGraph, buildState) failedTestNodes := buildResultsSetToNodesSet(failedSRPMsTests) csvBlob := [][]string{{"Package", "State", "Blocker", "IsTest"}} @@ -71,7 +71,7 @@ func RecordBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, b csvBlob = append(csvBlob, unbuiltPackagesCSVRows(pkgGraph, failedBuildNodes, failedBuildNodes, blockedSRPMs, false)...) csvBlob = append(csvBlob, unbuiltPackagesCSVRows(pkgGraph, blockedSRPMs, failedBuildNodes, blockedSRPMs, false)...) - csvBlob = append(csvBlob, successfulPackagesCSVRows(testedSRPMs, "Built", true)...) + csvBlob = append(csvBlob, successfulPackagesCSVRows(passedSRPMsTests, "Built", true)...) csvBlob = append(csvBlob, unbuiltPackagesCSVRows(pkgGraph, failedTestNodes, failedTestNodes, blockedSRPMsTests, true)...) csvBlob = append(csvBlob, unbuiltPackagesCSVRows(pkgGraph, blockedSRPMsTests, failedTestNodes, blockedSRPMsTests, true)...) @@ -95,7 +95,7 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu defer graphMutex.RUnlock() failedSRPMs, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, blockedSRPMs := getSRPMsState(pkgGraph, buildState) - failedSRPMsTests, skippedSRPMsTests, testedSRPMs, blockedSRPMsTests := getSRPMsTestsState(pkgGraph, buildState) + failedSRPMsTests, skippedSRPMsTests, passedSRPMsTests, blockedSRPMsTests := getSRPMsTestsState(pkgGraph, buildState) unresolvedDependencies := make(map[string]bool) rpmConflicts := buildState.ConflictingRPMs() @@ -112,7 +112,7 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu } } - printSummary(failedSRPMs, failedSRPMsTests, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, testedSRPMs, skippedSRPMsTests, unresolvedDependencies, blockedSRPMs, blockedSRPMsTests, rpmConflicts, srpmConflicts, allowToolchainRebuilds, conflictsLogger) + printSummary(failedSRPMs, failedSRPMsTests, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, passedSRPMsTests, skippedSRPMsTests, unresolvedDependencies, blockedSRPMs, blockedSRPMsTests, rpmConflicts, srpmConflicts, allowToolchainRebuilds, conflictsLogger) if len(prebuiltSRPMs) != 0 { logger.Log.Info(color.GreenString("Prebuilt SRPMs:")) @@ -146,9 +146,9 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu } } - if len(testedSRPMs) != 0 { + if len(passedSRPMsTests) != 0 { logger.Log.Info(color.GreenString("Passed SRPMs tests:")) - keys := mapToSortedSlice(testedSRPMs) + keys := mapToSortedSlice(passedSRPMsTests) for _, testedSRPM := range keys { logger.Log.Infof("--> %s", filepath.Base(testedSRPM)) } @@ -212,7 +212,7 @@ func PrintBuildSummary(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, bu } } - printSummary(failedSRPMs, failedSRPMsTests, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, testedSRPMs, skippedSRPMsTests, unresolvedDependencies, blockedSRPMs, blockedSRPMsTests, rpmConflicts, srpmConflicts, allowToolchainRebuilds, conflictsLogger) + printSummary(failedSRPMs, failedSRPMsTests, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, passedSRPMsTests, skippedSRPMsTests, unresolvedDependencies, blockedSRPMs, blockedSRPMsTests, rpmConflicts, srpmConflicts, allowToolchainRebuilds, conflictsLogger) } func buildResultsSetToNodesSet(statesSet map[string]*BuildResult) (result map[string]*pkggraph.PkgNode) { @@ -261,10 +261,10 @@ func getSRPMsState(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState) (fa return } -func getSRPMsTestsState(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState) (failedSRPMsTests map[string]*BuildResult, skippedSRPMsTests, testedSRPMs map[string]bool, blockedSRPMsTests map[string]*pkggraph.PkgNode) { +func getSRPMsTestsState(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState) (failedSRPMsTests map[string]*BuildResult, skippedSRPMsTests, passedSRPMsTests map[string]bool, blockedSRPMsTests map[string]*pkggraph.PkgNode) { failedSRPMsTests = make(map[string]*BuildResult) skippedSRPMsTests = make(map[string]bool) - testedSRPMs = make(map[string]bool) + passedSRPMsTests = make(map[string]bool) blockedSRPMsTests = make(map[string]*pkggraph.PkgNode) for _, failure := range buildState.BuildFailures() { @@ -277,13 +277,15 @@ func getSRPMsTestsState(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState if buildState.IsNodeCached(node) { skippedSRPMsTests[node.SrpmPath] = true continue - } else if buildState.IsNodeAvailable(node) { - testedSRPMs[node.SrpmPath] = true + } + + if _, testFailed := failedSRPMsTests[node.SrpmPath]; testFailed { continue } - _, found := failedSRPMsTests[node.SrpmPath] - if !found { + if buildState.IsNodeAvailable(node) { + passedSRPMsTests[node.SrpmPath] = true + } else { blockedSRPMsTests[node.SrpmPath] = node } } @@ -337,7 +339,7 @@ func unbuiltPackagesCSVRows(pkgGraph *pkggraph.PkgGraph, unbuiltPackages, failed } // printSummary prints summarized numbers of the build to the logger. -func printSummary(failedSRPMs, failedSRPMsTests map[string]*BuildResult, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, testedSRPMs, skippedSRPMsTests, unresolvedDependencies map[string]bool, blockedSRPMs, blockedSRPMsTests map[string]*pkggraph.PkgNode, rpmConflicts, srpmConflicts []string, allowToolchainRebuilds bool, conflictsLogger func(format string, args ...interface{})) { +func printSummary(failedSRPMs, failedSRPMsTests map[string]*BuildResult, prebuiltSRPMs, prebuiltDeltaSRPMs, builtSRPMs, passedSRPMsTests, skippedSRPMsTests, unresolvedDependencies map[string]bool, blockedSRPMs, blockedSRPMsTests map[string]*pkggraph.PkgNode, rpmConflicts, srpmConflicts []string, allowToolchainRebuilds bool, conflictsLogger func(format string, args ...interface{})) { logger.Log.Info("---------------------------") logger.Log.Info("--------- Summary ---------") logger.Log.Info("---------------------------") @@ -346,7 +348,7 @@ func printSummary(failedSRPMs, failedSRPMsTests map[string]*BuildResult, prebuil logger.Log.Infof(color.GreenString(summaryLine("Number of prebuilt delta SRPMs:", len(prebuiltDeltaSRPMs)))) logger.Log.Infof(color.GreenString(summaryLine("Number of skipped SRPMs tests:", len(skippedSRPMsTests)))) logger.Log.Infof(color.GreenString(summaryLine("Number of built SRPMs:", len(builtSRPMs)))) - logger.Log.Infof(color.GreenString(summaryLine("Number of passed SRPMs tests:", len(testedSRPMs)))) + logger.Log.Infof(color.GreenString(summaryLine("Number of passed SRPMs tests:", len(passedSRPMsTests)))) printErrorInfoByCondition(len(unresolvedDependencies) > 0, summaryLine("Number of unresolved dependencies:", len(unresolvedDependencies))) printErrorInfoByCondition(len(blockedSRPMs) > 0, summaryLine("Number of blocked SRPMs:", len(blockedSRPMs))) printErrorInfoByCondition(len(blockedSRPMsTests) > 0, summaryLine("Number of blocked SRPMs tests:", len(blockedSRPMsTests)))