diff --git a/.gitignore b/.gitignore index 5abb9ee6337..8a85858a472 100644 --- a/.gitignore +++ b/.gitignore @@ -31,7 +31,7 @@ cloud-init_*.buildinfo cloud-init_*.changes cloud-init_*.deb cloud-init_*.dsc -cloud-init_*.orig.tar.gz +cloud-init_*.tar.gz cloud-init_*.tar.xz cloud-init_*.upload diff --git a/.pylintrc b/.pylintrc index c8e2577a010..38a47cb6581 100644 --- a/.pylintrc +++ b/.pylintrc @@ -7,7 +7,6 @@ jobs=4 [MESSAGES CONTROL] # Errors and warnings with some filtered: -# W0201(attribute-defined-outside-init) # W0212(protected-access) # W0221(arguments-differ) # W0222(signature-differs) @@ -27,7 +26,7 @@ jobs=4 # W1514(unspecified-encoding) # E0012(bad-option-value) -disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514, E0012 +disable=C, F, I, R, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514, E0012 [REPORTS] @@ -66,4 +65,3 @@ ignored-classes=argparse.Namespace,optparse.Values,thread._local,ImageManager,Co # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=types,http.client,command_handlers,m_.*,enter_context - diff --git a/ChangeLog b/ChangeLog index 2a1596d30ff..777dd055de0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,28 @@ +24.1.2 + - test: Don't assume ordering of ThreadPoolExecutor submissions (#5052) + - refactor(ec2): simplify convert_ec2_metadata_network_config + - tests: drop CiTestCase and convert to pytest + - bug(tests): mock reads of host's /sys/class/net via get_sys_class_path + - fix: Fix breaking changes in package install (#5069) + - fix: Undeprecate 'network' in schema route definition (#5072) + - fix(ec2): fix ipv6 policy routing + - fix: document and add 'accept-ra' to network schema (#5060) + - bug(maas): register the correct DatasourceMAASLocal in init-local + (#5068) (LP: #2057763) + +24.1.1 + - fix: Include DataSourceCloudStack attribute in unpickle test (#5039) + - bug(vmware): initialize new DataSourceVMware attributes at unpickle (#5021) + - fix(apt): Don't warn on apt 822 source format (#5028) + - fix: Add "broadcast" to network v1 schema (#5034) + - pro: honor but warn on custom ubuntu_advantage in /etc/cloud/cloud.cfg (#5030) + - net/dhcp: handle timeouts for dhcpcd (#5022) + - fix: Make wait_for_url respect explicit arguments + - bug(wait_for_url): when exceptions occur url is unset, use url_exc + - test: Fix scaleway retry assumptions + - fix: Make DataSourceOracle more resilient to early network issues (#5025) + - tests: Fix wsl test (#5008) + 24.1 - fix: Don't warn on vendor directory (#4986) - apt: kill spawned keyboxd after gpg cmd interaction diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index f309d26e2ec..8d355b3c243 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -7,6 +7,8 @@ import tempfile from base64 import b64decode, b64encode +from cloudinit import util + _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) @@ -43,9 +45,9 @@ def write_file( tf = None try: - tf = tempfile.NamedTemporaryFile( - dir=os.path.dirname(filename), delete=False, mode=omode - ) + dirname = os.path.dirname(filename) + util.ensure_dir(dirname) + tf = tempfile.NamedTemporaryFile(dir=dirname, delete=False, mode=omode) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index e077a7fc610..7aa6b445e16 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -35,14 +35,7 @@ from cloudinit.config import cc_set_hostname from cloudinit.config.modules import Modules from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit.log import ( - LogExporter, - setup_basic_logging, - setup_logging, - reset_logging, - configure_root_logger, - DEPRECATED, -) +from cloudinit import log from cloudinit.reporting import events from cloudinit.safeyaml import load from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG @@ -223,7 +216,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: if is_cloud_cfg: if cmdline_name == "url": return ( - DEPRECATED, + log.DEPRECATED, str( util.deprecate( deprecated="The kernel command line key `url`", @@ -348,8 +341,8 @@ def main_init(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(init.cfg) + log.reset_logging() + log.setup_logging(init.cfg) apply_reporting_cfg(init.cfg) # Any log usage prior to setup_logging above did not have local user log @@ -510,7 +503,7 @@ def main_init(name, args): (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") - setup_logging(mods.cfg) + log.setup_logging(mods.cfg) # give the activated datasource a chance to adjust init.activate_datasource() @@ -615,13 +608,20 @@ def main_modules(action_name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) + if name == "init": + util.deprecate( + deprecated="`--mode init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + ) + # Stage 5 return run_module_section(mods, name, name) @@ -677,8 +677,8 @@ def main_single(name, args): LOG.debug( "Logging being reset, this logger may no longer be active shortly" ) - reset_logging() - setup_logging(mods.cfg) + log.reset_logging() + log.setup_logging(mods.cfg) apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome @@ -768,7 +768,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): v1["stage"] = mode v1[mode]["start"] = time.time() v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) + filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) ).export_logs() # Write status.json prior to running init / module code @@ -798,7 +798,7 @@ def status_wrapper(name, args, data_d=None, link_d=None): # Write status.json after running init / module code v1[mode]["recoverable_errors"] = next( - filter(lambda h: isinstance(h, LogExporter), root_logger.handlers) + filter(lambda h: isinstance(h, log.LogExporter), root_logger.handlers) ).export_logs() atomic_helper.write_json(status_path, status) @@ -856,7 +856,7 @@ def main_features(name, args): def main(sysv_args=None): - configure_root_logger() + log.configure_root_logger() if not sysv_args: sysv_args = sys.argv parser = argparse.ArgumentParser(prog=sysv_args.pop(0)) @@ -918,11 +918,20 @@ def main(sysv_args=None): parser_mod = subparsers.add_parser( "modules", help="Activate modules using a given configuration key." ) + extra_help = util.deprecate( + deprecated="`init`", + deprecated_version="24.1", + extra_message="Use `cloud-init init` instead.", + return_log=True, + ) parser_mod.add_argument( "--mode", "-m", action="store", - help="Module configuration name to use (default: %(default)s).", + help=( + f"Module configuration name to use (default: %(default)s)." + f" {extra_help}" + ), default="config", choices=("init", "config", "final"), ) @@ -1080,9 +1089,11 @@ def main(sysv_args=None): # - if --debug is passed, logging.DEBUG # - if --debug is not passed, logging.WARNING if name not in ("init", "modules"): - setup_basic_logging(logging.DEBUG if args.debug else logging.WARNING) + log.setup_basic_logging( + logging.DEBUG if args.debug else logging.WARNING + ) elif args.debug: - setup_basic_logging() + log.setup_basic_logging() # Setup signal handlers before running signal_handler.attach_handlers() @@ -1132,6 +1143,12 @@ def main(sysv_args=None): args=(name, args), ) reporting.flush_events() + + # handle return code for main_modules, as it is not wrapped by + # status_wrapped when mode == init + if "modules" == name and "init" == args.mode: + retval = len(retval) + return retval diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py index 659420aff30..2878a68ac42 100644 --- a/cloudinit/config/cc_ansible.py +++ b/cloudinit/config/cc_ansible.py @@ -9,12 +9,12 @@ from textwrap import dedent from typing import Optional +from cloudinit import subp from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema, get_meta_doc from cloudinit.distros import ALL_DISTROS, Distro from cloudinit.settings import PER_INSTANCE -from cloudinit.subp import subp, which from cloudinit.util import Version, get_cfg_by_path meta: MetaSchema = { @@ -100,7 +100,7 @@ def do_as(self, command: list, **kwargs): return self.distro.do_as(command, self.run_user, **kwargs) def subp(self, command, **kwargs): - return subp(command, update_env=self.env, **kwargs) + return subp.subp(command, update_env=self.env, **kwargs) @abc.abstractmethod def is_installed(self): @@ -165,7 +165,7 @@ def install(self, pkg_name: str): self.distro.install_packages([pkg_name]) def is_installed(self) -> bool: - return bool(which("ansible")) + return bool(subp.which("ansible")) def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 65e2b9e5582..99a8f556c4d 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -14,14 +14,14 @@ import pathlib import re import shutil -import signal from textwrap import dedent, indent from typing import Dict, Iterable, List, Mapping -from cloudinit import features, gpg, subp, templater, util +from cloudinit import features, subp, templater, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import MetaSchema, get_meta_doc +from cloudinit.gpg import GPG from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -222,13 +222,13 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: if not isinstance(apt_cfg, dict): raise ValueError( - "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(apt_cfg) - ) + "Expected dictionary for 'apt' config, " + "found {config_type}".format(config_type=type(apt_cfg)) ) apply_debconf_selections(apt_cfg) - apply_apt(apt_cfg, cloud) + with GPG() as gpg_context: + apply_apt(apt_cfg, cloud, gpg_context) def _should_configure_on_empty_apt(): @@ -240,7 +240,7 @@ def _should_configure_on_empty_apt(): return True, "Apt is available." -def apply_apt(cfg, cloud): +def apply_apt(cfg, cloud, gpg): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: should_config, msg = _should_configure_on_empty_apt() @@ -262,7 +262,7 @@ def apply_apt(cfg, cloud): _ensure_dependencies(cfg, matcher, cloud) if util.is_false(cfg.get("preserve_sources_list", False)): - add_mirror_keys(cfg, cloud) + add_mirror_keys(cfg, cloud, gpg) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, arch) @@ -280,32 +280,10 @@ def apply_apt(cfg, cloud): add_apt_sources( cfg["sources"], cloud, + gpg, template_params=params, aa_repo_match=matcher, ) - # GH: 4344 - stop gpg-agent/dirmgr daemons spawned by gpg key imports. - # Daemons spawned by cloud-config.service on systemd v253 report (running) - gpg_process_out, _err = subp.subp( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) - gpg_pids = re.findall(r"(?P\d+)\s+(?P\d+)", gpg_process_out) - root_gpg_pids = [int(pid[1]) for pid in gpg_pids if pid[0] == "1"] - if root_gpg_pids: - LOG.debug("Killing gpg-agent and dirmngr pids: %s", root_gpg_pids) - for gpg_pid in root_gpg_pids: - os.kill(gpg_pid, signal.SIGKILL) def debconf_set_selections(selections): @@ -558,11 +536,11 @@ def disable_suites(disabled, src, release) -> str: return retsrc -def add_mirror_keys(cfg, cloud): +def add_mirror_keys(cfg, cloud, gpg): """Adds any keys included in the primary/security mirror clauses""" for key in ("primary", "security"): for mirror in cfg.get(key, []): - add_apt_key(mirror, cloud, file_name=key) + add_apt_key(mirror, cloud, gpg, file_name=key) def is_deb822_sources_format(apt_src_content: str) -> bool: @@ -708,7 +686,7 @@ def generate_sources_list(cfg, release, mirrors, cloud): ) if expected_content: if expected_content != util.load_text_file(apt_sources_list): - LOG.warning( + LOG.info( "Replacing %s to favor deb822 source format", apt_sources_list, ) @@ -716,13 +694,13 @@ def generate_sources_list(cfg, release, mirrors, cloud): apt_sources_list, UBUNTU_DEFAULT_APT_SOURCES_LIST ) else: - LOG.warning( + LOG.info( "Removing %s to favor deb822 source format", apt_sources_list ) util.del_file(apt_sources_list) -def add_apt_key_raw(key, file_name, hardened=False): +def add_apt_key_raw(key, file_name, gpg, hardened=False): """ actual adding of a key as defined in key argument to the system @@ -730,7 +708,9 @@ def add_apt_key_raw(key, file_name, hardened=False): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key("add", output_file=name, data=key, hardened=hardened) + return apt_key( + "add", gpg, output_file=name, data=key, hardened=hardened + ) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -770,7 +750,7 @@ def _ensure_dependencies(cfg, aa_repo_match, cloud): cloud.distro.install_packages(sorted(missing_packages)) -def add_apt_key(ent, cloud, hardened=False, file_name=None): +def add_apt_key(ent, cloud, gpg, hardened=False, file_name=None): """ Add key to the system as defined in ent (if any). Supports raw keys or keyid's @@ -785,7 +765,7 @@ def add_apt_key(ent, cloud, hardened=False, file_name=None): if "key" in ent: return add_apt_key_raw( - ent["key"], file_name or ent["filename"], hardened=hardened + ent["key"], file_name or ent["filename"], gpg, hardened=hardened ) @@ -793,7 +773,9 @@ def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, gpg, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -834,10 +816,10 @@ def add_apt_sources(srcdict, cloud, template_params=None, aa_repo_match=None): ent["filename"] = filename if "source" in ent and "$KEY_FILE" in ent["source"]: - key_file = add_apt_key(ent, cloud, hardened=True) + key_file = add_apt_key(ent, cloud, gpg, hardened=True) template_params["KEY_FILE"] = key_file else: - add_apt_key(ent, cloud) + add_apt_key(ent, cloud, gpg) if "source" not in ent: continue @@ -1187,7 +1169,12 @@ def apply_apt_config(cfg, proxy_fname, config_fname): def apt_key( - command, output_file=None, data=None, hardened=False, human_output=True + command, + gpg, + output_file=None, + data=None, + hardened=False, + human_output=True, ): """apt-key replacement @@ -1215,7 +1202,7 @@ def _get_key_files(): key_files.append(APT_TRUSTED_GPG_DIR + file) return key_files if key_files else "" - def apt_key_add(): + def apt_key_add(gpg_context): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs @@ -1230,7 +1217,7 @@ def apt_key_add(): key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR ) - stdout = gpg.dearmor(data) + stdout = gpg_context.dearmor(data) file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: @@ -1243,7 +1230,7 @@ def apt_key_add(): ) return file_name - def apt_key_list(): + def apt_key_list(gpg_context): """apt-key list returns string of all trusted keys (in /etc/apt/trusted.gpg and @@ -1252,15 +1239,17 @@ def apt_key_list(): key_list = [] for key_file in _get_key_files(): try: - key_list.append(gpg.list(key_file, human_output=human_output)) + key_list.append( + gpg_context.list_keys(key_file, human_output=human_output) + ) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) return "\n".join(key_list) if command == "add": - return apt_key_add() + return apt_key_add(gpg) elif command == "finger" or command == "list": - return apt_key_list() + return apt_key_list(gpg) else: raise ValueError( "apt_key() commands add, list, and finger are currently supported" diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 1f636d09a8e..40e32338613 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,6 +25,7 @@ distros = [ "almalinux", "alpine", + "azurelinux", "centos", "cloudlinux", "cos", @@ -109,6 +110,15 @@ "service_name": "ntpd", }, }, + "azurelinux": { + "chrony": { + "service_name": "chronyd", + }, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", + }, + }, "centos": { "ntp": { "service_name": "ntpd", diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 3bf1ce0b3b7..42d8c004379 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -120,7 +120,9 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: try: cloud.distro.install_packages(pkglist) except Exception as e: - util.logexc(LOG, "Failed to install packages: %s", pkglist) + util.logexc( + LOG, "Failure when attempting to install packages: %s", pkglist + ) errors.append(e) # TODO(smoser): handle this less violently diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index aa88919cc33..c32e8a4e5cd 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -58,6 +58,7 @@ "description": MODULE_DESCRIPTION, "distros": [ "alpine", + "azurelinux", "fedora", "mariner", "opensuse", diff --git a/cloudinit/config/cc_ubuntu_autoinstall.py b/cloudinit/config/cc_ubuntu_autoinstall.py index c75f7a979f9..ff5286370db 100644 --- a/cloudinit/config/cc_ubuntu_autoinstall.py +++ b/cloudinit/config/cc_ubuntu_autoinstall.py @@ -6,7 +6,7 @@ import re from textwrap import dedent -from cloudinit import util +from cloudinit import subp, util from cloudinit.cloud import Cloud from cloudinit.config import Config from cloudinit.config.schema import ( @@ -16,7 +16,6 @@ get_meta_doc, ) from cloudinit.settings import PER_ONCE -from cloudinit.subp import subp LOG = logging.getLogger(__name__) @@ -85,7 +84,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: return util.wait_for_snap_seeded(cloud) - snap_list, _ = subp(["snap", "list"]) + snap_list, _ = subp.subp(["snap", "list"]) installer_present = None for snap_name in LIVE_INSTALLER_SNAPS: if re.search(snap_name, snap_list): diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 1ab5008f037..4df2234a479 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -27,6 +27,7 @@ distros = [ "almalinux", + "azurelinux", "centos", "cloudlinux", "eurolinux", diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py index d19f9994c1a..e1fec671e2f 100644 --- a/cloudinit/config/modules.py +++ b/cloudinit/config/modules.py @@ -35,6 +35,10 @@ "cc_rightscale_userdata", # Removed in 24.1 ] +RENAMED_MODULES = { + "cc_ubuntu_advantage": "cc_ubuntu_pro", # Renamed 24.1 +} + class ModuleDetails(NamedTuple): module: ModuleType @@ -190,14 +194,26 @@ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]: if not mod_name: continue if freq and freq not in FREQUENCIES: - LOG.warning( - "Config specified module %s has an unknown frequency %s", - raw_name, - freq, + util.deprecate( + deprecated=( + f"Config specified module {raw_name} has an unknown" + f" frequency {freq}" + ), + deprecated_version="22.1", ) # Misconfigured in /etc/cloud/cloud.cfg. Reset so cc_* module # default meta attribute "frequency" value is used. freq = None + if mod_name in RENAMED_MODULES: + util.deprecate( + deprecated=( + f"Module has been renamed from {mod_name} to " + f"{RENAMED_MODULES[mod_name][1]}. Update any" + " references in /etc/cloud/cloud.cfg" + ), + deprecated_version="24.1", + ) + mod_name = RENAMED_MODULES[mod_name] mod_locs, looked_locs = importer.find_module( mod_name, ["", type_utils.obj_name(config)], ["handle"] ) diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index ff61dcaa60b..a71cec99629 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -835,6 +835,13 @@ "vault_password_file": { "type": "string" }, + "verify_commit": { + "type": "boolean", + "default": false + }, + "inventory": { + "type": "string" + }, "module_name": { "type": "string" }, diff --git a/cloudinit/config/schemas/schema-network-config-v1.json b/cloudinit/config/schemas/schema-network-config-v1.json index 43097af4858..78628178eb6 100644 --- a/cloudinit/config/schemas/schema-network-config-v1.json +++ b/cloudinit/config/schemas/schema-network-config-v1.json @@ -36,6 +36,10 @@ "items": { "$ref": "#/$defs/config_type_subnet" } + }, + "accept-ra": { + "type": "boolean", + "description": "Whether to accept IPv6 Router Advertisements (RA) on this interface. If unset, it will not be rendered" } } }, @@ -446,10 +450,7 @@ }, "network": { "type": "string", - "description": "IPv4 network address with CIDR netmask notation or IPv6 with prefix length. Alias for ``destination`` and only read when ``destination`` key is absent.", - "deprecated": true, - "deprecated_version": "23.3", - "deprecated_description": "Use ``destination`` instead." + "description": "IPv4 network address with CIDR netmask notation or IPv6 with prefix length. Alias for ``destination`` and only read when ``destination`` key is absent. This exists for OpenStack support. OpenStack route definitions are passed through to v1 config and OpenStack's ``network_data.json`` uses ``network`` instead of ``destination``." }, "destination": { "type": "string", @@ -503,6 +504,10 @@ "type": "string", "description": "IPv4 subnet mask in dotted format or CIDR notation" }, + "broadcast": { + "type": "string", + "description": "IPv4 broadcast address in dotted format." + }, "gateway": { "type": "string", "description": "IPv4 address of the default gateway for this subnet." diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8b4c7e9345c..2c4d54871ea 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -68,6 +68,7 @@ "redhat": [ "almalinux", "amazon", + "azurelinux", "centos", "cloudlinux", "eurolinux", @@ -238,21 +239,29 @@ def install_packages(self, pkglist: PackageList): # First install packages using package manager(s) # supported by the distro - uninstalled = [] + total_failed: Set[str] = set() for manager in self.package_managers: - to_try = ( - packages_by_manager.get(manager.__class__, set()) - | generic_packages + + manager_packages = packages_by_manager.get( + manager.__class__, set() ) + + to_try = manager_packages | generic_packages + # Remove any failed we will try for this package manager + total_failed.difference_update(to_try) + if not manager.available(): + LOG.debug("Package manager '%s' not available", manager.name) + total_failed.update(to_try) + continue if not to_try: continue - uninstalled = manager.install_packages(to_try) - failed = { - pkg for pkg in uninstalled if pkg not in generic_packages - } + failed = manager.install_packages(to_try) + total_failed.update(failed) if failed: LOG.info(error_message, failed) - generic_packages = set(uninstalled) + # Ensure we don't attempt to install packages specific to + # one particular package manager using another package manager + generic_packages = set(failed) - manager_packages # Now attempt any specified package managers not explicitly supported # by distro @@ -260,14 +269,14 @@ def install_packages(self, pkglist: PackageList): if manager_type.name in [p.name for p in self.package_managers]: # We already installed/attempted these; don't try again continue - uninstalled.extend( + total_failed.update( manager_type.from_config( self._runner, self._cfg ).install_packages(pkglist=packages) ) - if uninstalled: - raise PackageInstallerError(error_message % uninstalled) + if total_failed: + raise PackageInstallerError(error_message % total_failed) @property def dhcp_client(self) -> dhcp.DhcpClient: diff --git a/cloudinit/distros/azurelinux.py b/cloudinit/distros/azurelinux.py new file mode 100644 index 00000000000..5098a45942d --- /dev/null +++ b/cloudinit/distros/azurelinux.py @@ -0,0 +1,72 @@ +# Copyright (C) 2024 Microsoft Corporation +# +# Author: Dan Streetman +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging + +from cloudinit import subp, util +from cloudinit.distros import rhel +from cloudinit.net.netplan import CLOUDINIT_NETPLAN_FILE + +LOG = logging.getLogger(__name__) + +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +""" + + +class Distro(rhel.Distro): + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + self.osfamily = "azurelinux" + + self.network_conf_dir = "/etc/systemd/network/" + self.systemd_locale_conf_fn = "/etc/locale.conf" + self.resolve_conf_fn = "/etc/systemd/resolved.conf" + self.init_cmd = ["systemctl"] + + self.network_conf_fn = {"netplan": CLOUDINIT_NETPLAN_FILE} + self.renderer_configs = { + "networkd": { + "resolv_conf_fn": self.resolve_conf_fn, + "network_conf_dir": self.network_conf_dir, + }, + "netplan": { + "netplan_path": self.network_conf_fn["netplan"], + "netplan_header": NETWORK_FILE_HEADER, + "postcmds": "True", + }, + } + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + if subp.which("dnf"): + LOG.debug("Using DNF for package management") + cmd = ["dnf"] + else: + LOG.debug("Using TDNF for package management") + cmd = ["tdnf"] + # Determines whether or not dnf/tdnf prompts for confirmation + # of critical actions. We don't want to prompt... + cmd.append("-y") + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + cmd.append(command) + + pkglist = util.expand_package_list("%s-%s", pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + subp.subp(cmd, capture=False) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 53b4ac4eb3b..2d8fa02fea6 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -121,14 +121,10 @@ def add_user(self, name, **kwargs): pw_useradd_cmd.append("-d/nonexistent") log_pw_useradd_cmd.append("-d/nonexistent") else: - pw_useradd_cmd.append( - "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name) - ) + homedir = kwargs.get("homedir", f"{self.home_dir}/{name}") + pw_useradd_cmd.append("-d" + homedir) pw_useradd_cmd.append("-m") - log_pw_useradd_cmd.append( - "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name) - ) - + log_pw_useradd_cmd.append("-d" + homedir) log_pw_useradd_cmd.append("-m") # Run the command diff --git a/cloudinit/distros/package_management/apt.py b/cloudinit/distros/package_management/apt.py index 627c0348e69..7f39b808c58 100644 --- a/cloudinit/distros/package_management/apt.py +++ b/cloudinit/distros/package_management/apt.py @@ -3,6 +3,7 @@ import functools import logging import os +import re import time from typing import Any, Iterable, List, Mapping, Optional, Sequence, cast @@ -83,11 +84,11 @@ def __init__( ): super().__init__(runner) if apt_get_command is None: - apt_get_command = APT_GET_COMMAND + self.apt_get_command = APT_GET_COMMAND if apt_get_upgrade_subcommand is None: apt_get_upgrade_subcommand = "dist-upgrade" self.apt_command = tuple(apt_get_wrapper_command) + tuple( - apt_get_command + self.apt_get_command ) self.apt_get_upgrade_subcommand = apt_get_upgrade_subcommand @@ -104,6 +105,9 @@ def from_config(cls, runner: helpers.Runners, cfg: Mapping) -> "Apt": apt_get_upgrade_subcommand=cfg.get("apt_get_upgrade_subcommand"), ) + def available(self) -> bool: + return bool(subp.which(self.apt_get_command[0])) + def update_package_sources(self): self.runner.run( "update-sources", @@ -123,7 +127,17 @@ def get_all_packages(self): return set(resp.splitlines()) def get_unavailable_packages(self, pkglist: Iterable[str]): - return [pkg for pkg in pkglist if pkg not in self.get_all_packages()] + # Packages ending with `-` signify to apt to not install a transitive + # dependency. + # Anything after "/" refers to a target release + # "=" allows specifying a specific version + # Strip all off when checking for availability + return [ + pkg + for pkg in pkglist + if re.split("/|=", pkg)[0].rstrip("-") + not in self.get_all_packages() + ] def install_packages(self, pkglist: Iterable) -> UninstalledPackages: self.update_package_sources() @@ -131,11 +145,12 @@ def install_packages(self, pkglist: Iterable) -> UninstalledPackages: unavailable = self.get_unavailable_packages( [x.split("=")[0] for x in pkglist] ) - LOG.debug( - "The following packages were not found by APT so APT will " - "not attempt to install them: %s", - unavailable, - ) + if unavailable: + LOG.debug( + "The following packages were not found by APT so APT will " + "not attempt to install them: %s", + unavailable, + ) to_install = [p for p in pkglist if p not in unavailable] if to_install: self.run_package_command("install", pkgs=to_install) diff --git a/cloudinit/distros/package_management/package_manager.py b/cloudinit/distros/package_management/package_manager.py index 864555f6a4d..d92b11d5a6d 100644 --- a/cloudinit/distros/package_management/package_manager.py +++ b/cloudinit/distros/package_management/package_manager.py @@ -17,6 +17,10 @@ def __init__(self, runner: helpers.Runners, **kwargs): def from_config(cls, runner: helpers.Runners, cfg) -> "PackageManager": return cls(runner) + @abstractmethod + def available(self) -> bool: + """Return if package manager is installed on system.""" + @abstractmethod def update_package_sources(self): ... diff --git a/cloudinit/distros/package_management/snap.py b/cloudinit/distros/package_management/snap.py index 92eb1af8f5f..a5fc2a89db1 100644 --- a/cloudinit/distros/package_management/snap.py +++ b/cloudinit/distros/package_management/snap.py @@ -14,6 +14,9 @@ class Snap(PackageManager): name = "snap" + def available(self) -> bool: + return bool(subp.which("snap")) + def update_package_sources(self): pass diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index fca316821d8..6e7fc36a63a 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -8,132 +8,229 @@ """gpg.py - Collection of gpg key related functions""" import logging +import os +import re +import signal import time +from tempfile import TemporaryDirectory +from typing import Dict, Optional from cloudinit import subp LOG = logging.getLogger(__name__) -GPG_LIST = [ - "gpg", - "--with-fingerprint", - "--no-default-keyring", - "--list-keys", - "--keyring", -] - - -def export_armour(key): - """Export gpg key, armoured key gets returned""" - try: - (armour, _) = subp.subp( - ["gpg", "--export", "--armour", key], capture=True - ) - except subp.ProcessExecutionError as error: - # debug, since it happens for any key not on the system initially - LOG.debug('Failed to export armoured key "%s": %s', key, error) - armour = None - return armour - - -def dearmor(key): - """Dearmor gpg key, dearmored key gets returned - - note: man gpg(1) makes no mention of an --armour spelling, only --armor - """ - return subp.subp(["gpg", "--dearmor"], data=key, decode=False).stdout - - -def list(key_file, human_output=False): - """List keys from a keyring with fingerprints. Default to a stable machine - parseable format. - - @param key_file: a string containing a filepath to a key - @param human_output: return output intended for human parsing - """ - cmd = [] - cmd.extend(GPG_LIST) - if not human_output: - cmd.append("--with-colons") - - cmd.append(key_file) - (stdout, stderr) = subp.subp(cmd, capture=True) - if stderr: - LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr) - return stdout - - -def recv_key(key, keyserver, retries=(1, 1)): - """Receive gpg key from the specified keyserver. - - Retries are done by default because keyservers can be unreliable. - Additionally, there is no way to determine the difference between - a non-existent key and a failure. In both cases gpg (at least 2.2.4) - exits with status 2 and stderr: "keyserver receive failed: No data" - It is assumed that a key provided to cloud-init exists on the keyserver - so re-trying makes better sense than failing. - - @param key: a string key fingerprint (as passed to gpg --recv-keys). - @param keyserver: the keyserver to request keys from. - @param retries: an iterable of sleep lengths for retries. - Use None to indicate no retries.""" - LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver) - cmd = ["gpg", "--no-tty", "--keyserver=%s" % keyserver, "--recv-keys", key] - if retries is None: - retries = [] - trynum = 0 - error = None - sleeps = iter(retries) - while True: - trynum += 1 +HOME = "GNUPGHOME" + + +class GPG: + def __init__(self): + self.gpg_started = False + self._env = {} + self.temp_dir = TemporaryDirectory() + + def __enter__(self): + return self + + @property + def env(self) -> Dict[str, str]: + """when this env property gets invoked, set up our temporary + directory, and also set gpg_started to tell the cleanup() + method whether or not + + why put this here and not in __init__? pytest seems unhappy + and it's not obvious how to work around it + """ + if self._env: + return self._env + self.gpg_started = True + self._env = {HOME: self.temp_dir.name} + return self._env + + def __exit__(self, exc_typ, exc_value, traceback): + self.cleanup() + + def cleanup(self) -> None: + """cleanup the gpg temporary directory and kill gpg""" + self.kill_gpg() + if self.temp_dir and os.path.isdir(self.temp_dir.name): + self.temp_dir.cleanup() + + def export_armour(self, key: str) -> Optional[str]: + """Export gpg key, armoured key gets returned""" try: - subp.subp(cmd, capture=True) - LOG.debug( - "Imported key '%s' from keyserver '%s' on try %d", - key, - keyserver, - trynum, + return subp.subp( + ["gpg", "--export", "--armour", key], + capture=True, + update_env=self.env, + ).stdout + except subp.ProcessExecutionError as error: + # debug, since it happens for any key not on the system initially + LOG.debug('Failed to export armoured key "%s": %s', key, error) + return None + + def dearmor(self, key: str) -> str: + """Dearmor gpg key, dearmored key gets returned + + note: man gpg(1) makes no mention of an --armour spelling, only --armor + """ + return subp.subp( + ["gpg", "--dearmor"], data=key, decode=False, update_env=self.env + ).stdout + + def list_keys(self, key_file: str, human_output=False) -> str: + """List keys from a keyring with fingerprints. Default to a + stable machine parseable format. + + @param key_file: a string containing a filepath to a key + @param human_output: return output intended for human parsing + """ + cmd = [ + "gpg", + "--no-options", + "--with-fingerprint", + "--no-default-keyring", + "--list-keys", + "--keyring", + ] + if not human_output: + cmd.append("--with-colons") + + cmd.append(key_file) + stdout, stderr = subp.subp(cmd, update_env=self.env, capture=True) + if stderr: + LOG.warning( + 'Failed to export armoured key "%s": %s', key_file, stderr ) - return - except subp.ProcessExecutionError as e: - error = e + return stdout + + def recv_key(self, key: str, keyserver: str, retries=(1, 1)) -> None: + """Receive gpg key from the specified keyserver. + + Retries are done by default because keyservers can be unreliable. + Additionally, there is no way to determine the difference between + a non-existent key and a failure. In both cases gpg (at least 2.2.4) + exits with status 2 and stderr: "keyserver receive failed: No data" + It is assumed that a key provided to cloud-init exists on the keyserver + so re-trying makes better sense than failing. + + @param key: a string key fingerprint (as passed to gpg --recv-keys). + @param keyserver: the keyserver to request keys from. + @param retries: an iterable of sleep lengths for retries. + Use None to indicate no retries.""" + LOG.debug("Importing key '%s' from keyserver '%s'", key, keyserver) + trynum = 0 + error = None + sleeps = iter(retries or []) + while True: + trynum += 1 + try: + subp.subp( + [ + "gpg", + "--no-tty", + "--keyserver=%s" % keyserver, + "--recv-keys", + key, + ], + capture=True, + update_env=self.env, + ) + LOG.debug( + "Imported key '%s' from keyserver '%s' on try %d", + key, + keyserver, + trynum, + ) + return + except subp.ProcessExecutionError as e: + error = e + try: + naplen = next(sleeps) + LOG.debug( + "Import failed with exit code %d, will try again in %ss", + error.exit_code, + naplen, + ) + time.sleep(naplen) + except StopIteration as e: + raise ValueError( + "Failed to import key '%s' from keyserver '%s' " + "after %d tries: %s" % (key, keyserver, trynum, error) + ) from e + + def delete_key(self, key: str) -> None: + """Delete the specified key from the local gpg ring""" try: - naplen = next(sleeps) - LOG.debug( - "Import failed with exit code %d, will try again in %ss", - error.exit_code, - naplen, + subp.subp( + ["gpg", "--batch", "--yes", "--delete-keys", key], + capture=True, + update_env=self.env, ) - time.sleep(naplen) - except StopIteration as e: - raise ValueError( - "Failed to import key '%s' from keyserver '%s' " - "after %d tries: %s" % (key, keyserver, trynum, error) - ) from e - - -def delete_key(key): - """Delete the specified key from the local gpg ring""" - try: - subp.subp( - ["gpg", "--batch", "--yes", "--delete-keys", key], capture=True - ) - except subp.ProcessExecutionError as error: - LOG.warning('Failed delete key "%s": %s', key, error) - - -def getkeybyid(keyid, keyserver="keyserver.ubuntu.com"): - """get gpg keyid from keyserver""" - armour = export_armour(keyid) - if not armour: + except subp.ProcessExecutionError as error: + LOG.warning('Failed delete key "%s": %s', key, error) + + def getkeybyid( + self, keyid: str, keyserver: str = "keyserver.ubuntu.com" + ) -> Optional[str]: + """get gpg keyid from keyserver""" + armour = self.export_armour(keyid) + if not armour: + try: + self.recv_key(keyid, keyserver=keyserver) + armour = self.export_armour(keyid) + except ValueError: + LOG.exception("Failed to obtain gpg key %s", keyid) + raise + finally: + # delete just imported key to leave environment as it + # was before + self.delete_key(keyid) + return armour + + def kill_gpg(self) -> None: + """killing with gpgconf is best practice, but when it isn't available + failover is possible + + GH: 4344 - stop gpg-agent/dirmgr daemons spawned by gpg + key imports. Daemons spawned by cloud-config.service on systemd + v253 report (running) + """ try: - recv_key(keyid, keyserver=keyserver) - armour = export_armour(keyid) - except ValueError: - LOG.exception("Failed to obtain gpg key %s", keyid) - raise - finally: - # delete just imported key to leave environment as it was before - delete_key(keyid) - - return armour + if not self.gpg_started: + return + if subp.which("gpgconf"): + gpg_process_out = subp.subp( + ["gpgconf", "--kill", "all"], + capture=True, + update_env=self.env, + ).stdout + else: + gpg_process_out = subp.subp( + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], + capture=True, + rcs=[0, 1], + ).stdout + gpg_pids = re.findall( + r"(?P\d+)\s+(?P\d+)", gpg_process_out + ) + root_gpg_pids = [ + int(pid[1]) for pid in gpg_pids if pid[0] == "1" + ] + if root_gpg_pids: + LOG.debug( + "Killing gpg-agent and dirmngr pids: %s", root_gpg_pids + ) + for gpg_pid in root_gpg_pids: + os.kill(gpg_pid, signal.SIGKILL) + except subp.ProcessExecutionError as e: + LOG.warning("Failed to clean up gpg process: %s", e) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 1b80ade56eb..048dfb0549a 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -1283,6 +1283,48 @@ def is_ipv6_network(address: str) -> bool: ) +def is_ip_in_subnet(address: str, subnet: str) -> bool: + """Returns a bool indicating if ``s`` is in subnet. + + :param address: + The string of IP address. + + :param subnet: + The string of subnet. + + :return: + A bool indicating if the string is in subnet. + """ + ip_address = ipaddress.ip_address(address) + subnet_network = ipaddress.ip_network(subnet, strict=False) + return ip_address in subnet_network + + +def should_add_gateway_onlink_flag(gateway: str, subnet: str) -> bool: + """Returns a bool indicating if should add gateway onlink flag. + + :param gateway: + The string of gateway address. + + :param subnet: + The string of subnet. + + :return: + A bool indicating if the string is in subnet. + """ + try: + return not is_ip_in_subnet(gateway, subnet) + except ValueError as e: + LOG.warning( + "Failed to check whether gateway %s" + " is contained within subnet %s: %s", + gateway, + subnet, + e, + ) + return False + + def subnet_is_ipv6(subnet) -> bool: """Common helper for checking network_state subnets for ipv6.""" # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index a84d965f6a5..e544eae106b 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -108,7 +108,7 @@ def available(target=None) -> bool: """Return true if ifconfig can be used on this system.""" expected = "ifconfig" search = ["/sbin"] - return subp.which(expected, search=search, target=target) + return bool(subp.which(expected, search=search, target=target)) @staticmethod def bring_up_interface(device_name: str) -> bool: diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index ed323d7fdab..83b99803bfb 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -15,6 +15,7 @@ import time from contextlib import suppress from io import StringIO +from subprocess import TimeoutExpired from typing import Any, Callable, Dict, List, Optional, Tuple import configobj @@ -698,9 +699,17 @@ def dhcp_discovery( return lease raise NoDHCPLeaseError("No lease found") + except TimeoutExpired as error: + LOG.debug( + "dhcpcd timed out after %s seconds: stderr: %r stdout: %r", + error.timeout, + error.stderr, + error.stdout, + ) + raise NoDHCPLeaseError from error except subp.ProcessExecutionError as error: LOG.debug( - "dhclient exited with code: %s stderr: %r stdout: %r", + "dhcpcd exited with code: %s stderr: %r stdout: %r", error.exit_code, error.stderr, error.stdout, diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 9e36fe16a03..32047c0c90b 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -2,7 +2,6 @@ import copy import io -import ipaddress import logging import os import textwrap @@ -14,6 +13,7 @@ SYS_CLASS_NET, get_devicelist, renderer, + should_add_gateway_onlink_flag, subnet_is_ipv6, ) from cloudinit.net.network_state import NET_CONFIG_TO_V2, NetworkState @@ -123,28 +123,17 @@ def _listify(obj, token=" "): "via": subnet.get("gateway"), "to": "default", } - try: - subnet_gateway = ipaddress.ip_address(subnet["gateway"]) - subnet_network = ipaddress.ip_network(addr, strict=False) - # If the gateway is not contained within the subnet's - # network, mark it as on-link so that it can still be - # reached. - if subnet_gateway not in subnet_network: - LOG.debug( - "Gateway %s is not contained within subnet %s," - " adding on-link flag", - subnet["gateway"], - addr, - ) - new_route["on-link"] = True - except ValueError as e: - LOG.warning( - "Failed to check whether gateway %s" - " is contained within subnet %s: %s", + # If the gateway is not contained within the subnet's + # network, mark it as on-link so that it can still be + # reached. + if should_add_gateway_onlink_flag(subnet["gateway"], addr): + LOG.debug( + "Gateway %s is not contained within subnet %s," + " adding on-link flag", subnet["gateway"], addr, - e, ) + new_route["on-link"] = True routes.append(new_route) if "dns_nameservers" in subnet: nameservers += _listify(subnet.get("dns_nameservers", [])) diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 29f466eda54..7a511288077 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -9,7 +9,7 @@ from typing import Optional from cloudinit import subp, util -from cloudinit.net import renderer +from cloudinit.net import renderer, should_add_gateway_onlink_flag from cloudinit.net.network_state import NetworkState LOG = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def get_final_conf(self): contents += "[" + k + "]\n" for e in sorted(v[n]): contents += e + "\n" - contents += "\n" + contents += "\n" else: contents += "[" + k + "]\n" for e in sorted(v): @@ -169,6 +169,9 @@ def parse_subnets(self, iface, cfg: CfgParser): self.parse_routes(f"r{rid}", i, cfg) rid = rid + 1 if "address" in e: + addr = e["address"] + if "prefix" in e: + addr += "/" + str(e["prefix"]) subnet_cfg_map = { "address": "Address", "gateway": "Gateway", @@ -177,24 +180,30 @@ def parse_subnets(self, iface, cfg: CfgParser): } for k, v in e.items(): if k == "address": - if "prefix" in e: - v += "/" + str(e["prefix"]) - cfg.update_section("Address", subnet_cfg_map[k], v) + cfg.update_section("Address", subnet_cfg_map[k], addr) elif k == "gateway": # Use "a" as a dict key prefix for this route to # isolate it from other sources of routes cfg.update_route_section( "Route", f"a{rid}", subnet_cfg_map[k], v ) + if should_add_gateway_onlink_flag(v, addr): + LOG.debug( + "Gateway %s is not contained within subnet %s," + " adding GatewayOnLink flag", + v, + addr, + ) + cfg.update_route_section( + "Route", f"a{rid}", "GatewayOnLink", "yes" + ) rid = rid + 1 elif k == "dns_nameservers" or k == "dns_search": cfg.update_section(sec, subnet_cfg_map[k], " ".join(v)) cfg.update_section(sec, "DHCP", dhcp) - if dhcp in ["ipv6", "yes"] and isinstance( - iface.get("accept-ra", ""), bool - ): + if isinstance(iface.get("accept-ra", ""), bool): cfg.update_section(sec, "IPv6AcceptRA", iface["accept-ra"]) return dhcp diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 418e4ac69ff..aeecd15d05f 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -77,6 +77,12 @@ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None self.supported_seed_starts = ("/", "file://") + self.source = sources.METADATA_UNKNOWN + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if not hasattr(self, "source"): + self.source = sources.METADATA_UNKNOWN def __str__(self): root = sources.DataSource.__str__(self) @@ -167,8 +173,6 @@ def _get_data(self): def _get_subplatform(self): """Return the subplatform metadata details.""" cloud_type = self.get_cloud_type() - if not hasattr(self, "source"): - self.source = sources.METADATA_UNKNOWN if cloud_type == "RHEV": self.source = "/dev/fd0" return "%s (%s)" % (cloud_type.lower(), self.source) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 057daea776e..9ddf275d8d9 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -142,7 +142,6 @@ def _get_data(self): self.files.update(results.get("files", {})) vd = results.get("vendordata") - self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: @@ -150,7 +149,6 @@ def _get_data(self): self.vendordata_raw = None vd2 = results.get("vendordata2") - self.vendordata2_pure = vd2 try: self.vendordata2_raw = sources.convert_vendordata(vd2) except ValueError as e: diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5145bde7bd0..f445aefc6af 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -48,6 +48,7 @@ def __init__(self, sys_cfg, distro, paths): self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None + self.metadata_full = None def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 4a2f542bff4..1b49ad16525 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -17,6 +17,7 @@ from cloudinit import dmi, net, sources from cloudinit import url_helper as uhelp from cloudinit import util, warnings +from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType from cloudinit.net import activators from cloudinit.net.dhcp import NoDHCPLeaseError @@ -116,6 +117,7 @@ class DataSourceEc2(sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None + self.identity = None def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) @@ -202,9 +204,6 @@ def launch_index(self): @property def platform(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = DataSourceEc2.dsname.lower() if not self._platform_type: self._platform_type = DataSourceEc2.dsname.lower() return self._platform_type @@ -949,6 +948,82 @@ def _get_key_as_int_or(dikt, key, alt_value): } +def _configure_policy_routing( + dev_config: dict, + *, + nic_name: str, + nic_metadata: dict, + distro: Distro, + is_ipv4: bool, + table: int, +) -> None: + """ + Configure policy-based routing on secondary NICs / secondary IPs to + ensure outgoing packets are routed via the correct interface. + + @param: dev_config: network cfg v2 to be updated inplace. + @param: nic_name: nic name. Only used if ipv4. + @param: nic_metadata: nic metadata from IMDS. + @param: distro: Instance of Distro. Only used if ipv4. + @param: is_ipv4: Boolean indicating if we are acting over ipv4 or not. + @param: table: Routing table id. + """ + if not dev_config.get("routes"): + dev_config["routes"] = [] + if is_ipv4: + subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] + ips = nic_metadata["local-ipv4s"] + try: + lease = distro.dhcp_client.dhcp_discovery(nic_name, distro=distro) + gateway = lease["routers"] + except NoDHCPLeaseError as e: + LOG.warning( + "Could not perform dhcp discovery on %s to find its " + "gateway. Not adding default route via the gateway. " + "Error: %s", + nic_name, + e, + ) + else: + # Add default route via the NIC's gateway + dev_config["routes"].append( + { + "to": "0.0.0.0/0", + "via": gateway, + "table": table, + }, + ) + else: + subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-blocks"] + ips = nic_metadata["ipv6s"] + + subnet_prefix_routes = ( + [subnet_prefix_routes] + if isinstance(subnet_prefix_routes, str) + else subnet_prefix_routes + ) + for prefix_route in subnet_prefix_routes: + dev_config["routes"].append( + { + "to": prefix_route, + "table": table, + }, + ) + + if not dev_config.get("routing-policy"): + dev_config["routing-policy"] = [] + # Packets coming from any IP associated with the current NIC + # will be routed using `table` routing table + ips = [ips] if isinstance(ips, str) else ips + for ip in ips: + dev_config["routing-policy"].append( + { + "from": ip, + "table": table, + }, + ) + + def convert_ec2_metadata_network_config( network_md, distro, @@ -1015,72 +1090,29 @@ def convert_ec2_metadata_network_config( "match": {"macaddress": mac.lower()}, "set-name": nic_name, } - # Configure policy-based routing on secondary NICs / secondary IPs to - # ensure outgoing packets are routed via the correct interface. - # # This config only works on systems using Netplan because Networking # config V2 does not support `routing-policy`, but this config is # passed through on systems using Netplan. + # See: https://github.com/canonical/cloud-init/issues/4862 # # If device-number is not present (AliYun or other ec2-like platforms), # do not configure source-routing as we cannot determine which is the # primary NIC. + table = 100 + nic_idx if ( is_netplan and nic_metadata.get("device-number") and not is_primary_nic ): dhcp_override["use-routes"] = True - table = 100 + nic_idx - dev_config["routes"] = [] - try: - lease = distro.dhcp_client.dhcp_discovery( - nic_name, distro=distro - ) - gateway = lease["routers"] - except NoDHCPLeaseError as e: - LOG.warning( - "Could not perform dhcp discovery on %s to find its " - "gateway. Not adding default route via the gateway. " - "Error: %s", - nic_name, - e, - ) - else: - # Add default route via the NIC's gateway - dev_config["routes"].append( - { - "to": "0.0.0.0/0", - "via": gateway, - "table": table, - }, - ) - subnet_prefix_routes = nic_metadata["subnet-ipv4-cidr-block"] - subnet_prefix_routes = ( - [subnet_prefix_routes] - if isinstance(subnet_prefix_routes, str) - else subnet_prefix_routes + _configure_policy_routing( + dev_config, + distro=distro, + nic_name=nic_name, + nic_metadata=nic_metadata, + is_ipv4=True, + table=table, ) - for prefix_route in subnet_prefix_routes: - dev_config["routes"].append( - { - "to": prefix_route, - "table": table, - }, - ) - - dev_config["routing-policy"] = [] - # Packets coming from any IPv4 associated with the current NIC - # will be routed using `table` routing table - ipv4s = nic_metadata["local-ipv4s"] - ipv4s = [ipv4s] if isinstance(ipv4s, str) else ipv4s - for ipv4 in ipv4s: - dev_config["routing-policy"].append( - { - "from": ipv4, - "table": table, - }, - ) if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured dev_config["dhcp6"] = True dev_config["dhcp6-overrides"] = dhcp_override @@ -1089,31 +1121,14 @@ def convert_ec2_metadata_network_config( and nic_metadata.get("device-number") and not is_primary_nic ): - table = 100 + nic_idx - subnet_prefix_routes = nic_metadata["subnet-ipv6-cidr-block"] - subnet_prefix_routes = ( - [subnet_prefix_routes] - if isinstance(subnet_prefix_routes, str) - else subnet_prefix_routes + _configure_policy_routing( + dev_config, + distro=distro, + nic_name=nic_name, + nic_metadata=nic_metadata, + is_ipv4=False, + table=table, ) - for prefix_route in subnet_prefix_routes: - dev_config["routes"].append( - { - "to": prefix_route, - "table": table, - }, - ) - - dev_config["routing-policy"] = [] - ipv6s = nic_metadata["ipv6s"] - ipv6s = [ipv6s] if isinstance(ipv6s, str) else ipv6s - for ipv6 in ipv6s: - dev_config["routing-policy"].append( - { - "from": ipv6, - "table": table, - }, - ) dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) if not dev_config["addresses"]: dev_config.pop("addresses") # Since we found none configured diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 110f7ebeffc..36403e9a25f 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -48,6 +48,7 @@ def __init__(self, sys_cfg, distro, paths): self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = sources.UNSET self.dsmode = sources.DSMODE_NETWORK + self.metadata_full = None def _get_data(self): (on_hetzner, serial) = get_hcloud_data() diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 366f73ef98e..1bff11fa1db 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -147,7 +147,6 @@ def _get_data(self): self.userdata_raw = results.get("userdata") self.network_json = results.get("networkdata") vd = results.get("vendordata") - self.vendordata_pure = vd self.system_uuid = results["system-uuid"] try: self.vendordata_raw = sources.convert_vendordata(vd) diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index cd316101669..79e203b8924 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -112,6 +112,7 @@ class SocketHTTPConnection(HTTPConnection): def __init__(self, socket_path): super().__init__("localhost") self.socket_path = socket_path + self.sock = None def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -166,7 +167,7 @@ class DataSourceLXD(sources.DataSource): dsname = "LXD" _network_config: Union[Dict, str] = sources.UNSET - _crawled_metadata: Union[Dict, str] = sources.UNSET + _crawled_metadata: Optional[Union[Dict, str]] = sources.UNSET sensitive_metadata_keys: Tuple[ str, ... diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 1e88ac10a71..7125c723fb1 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -104,7 +104,6 @@ def _set_data(self, url, data): ud, md, vd = data self.userdata_raw = ud self.metadata = md - self.vendordata_pure = vd if vd: try: self.vendordata_raw = sources.convert_vendordata(vd) @@ -320,7 +319,7 @@ class MAASSeedDirMalformed(Exception): # Used to match classes to dependencies datasources = [ - (DataSourceMAAS, (sources.DEP_FILESYSTEM,)), + (DataSourceMAASLocal, (sources.DEP_FILESYSTEM,)), (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py index 1ebd6e82191..03a86254891 100644 --- a/cloudinit/sources/DataSourceNWCS.py +++ b/cloudinit/sources/DataSourceNWCS.py @@ -43,6 +43,7 @@ def __init__(self, sys_cfg, distro, paths): self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = sources.UNSET self.dsmode = sources.DSMODE_NETWORK + self.metadata_full = None def _get_data(self): md = self.get_metadata() diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 7581f0a508f..e40c2a2207a 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -32,6 +32,8 @@ def __init__(self, sys_cfg, distro, paths): ] self.seed_dir = None self.supported_seed_starts = ("/", "file://") + self._network_config = None + self._network_eni = None def __str__(self): root = sources.DataSource.__str__(self) @@ -209,9 +211,6 @@ def _pp2d_callback(mp, data): @property def platform_type(self): - # Handle upgrade path of pickled ds - if not hasattr(self, "_platform_type"): - self._platform_type = None if not self._platform_type: self._platform_type = "lxd" if util.is_lxd() else "nocloud" return self._platform_type diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index dd930473147..a78d861e52e 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -37,6 +37,7 @@ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None self.seed_dir = os.path.join(paths.seed_dir, "opennebula") + self.network = None def __str__(self): root = sources.DataSource.__str__(self) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index ef407bd31da..22ecee7af61 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -184,7 +184,6 @@ def _get_data(self): self.files.update(results.get("files", {})) vd = results.get("vendordata") - self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: @@ -192,7 +191,6 @@ def _get_data(self): self.vendordata_raw = None vd2 = results.get("vendordata2") - self.vendordata2_pure = vd2 try: self.vendordata2_raw = sources.convert_vendordata(vd2) except ValueError as e: diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 062c0059c56..8987278b94e 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -15,9 +15,11 @@ import base64 import ipaddress +import json import logging +import time from collections import namedtuple -from typing import Optional, Tuple +from typing import Dict, Optional, Tuple from cloudinit import atomic_helper, dmi, net, sources, util from cloudinit.distros.networking import NetworkConfig @@ -27,7 +29,7 @@ get_interfaces_by_mac, is_netfail_master, ) -from cloudinit.url_helper import UrlError, readurl +from cloudinit.url_helper import wait_for_url LOG = logging.getLogger(__name__) @@ -114,7 +116,6 @@ class DataSourceOracle(sources.DataSource): dsname = "Oracle" system_uuid = None - vendordata_pure = None network_config_sources: Tuple[sources.NetworkConfigSource, ...] = ( sources.NetworkConfigSource.CMD_LINE, sources.NetworkConfigSource.SYSTEM_CFG, @@ -123,6 +124,11 @@ class DataSourceOracle(sources.DataSource): ) _network_config: dict = {"config": [], "version": 1} + perform_dhcp_setup = True + + # Careful...these can be overridden in __init__ + url_max_wait = 30 + url_timeout = 5 def __init__(self, sys_cfg, *args, **kwargs): super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) @@ -136,6 +142,21 @@ def __init__(self, sys_cfg, *args, **kwargs): ) self._network_config_source = KlibcOracleNetworkConfigSource() + url_params = self.get_url_params() + self.url_max_wait = url_params.max_wait_seconds + self.url_timeout = url_params.timeout_seconds + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if not hasattr(self, "_vnics_data"): + setattr(self, "_vnics_data", None) + if not hasattr(self, "_network_config_source"): + setattr( + self, + "_network_config_source", + KlibcOracleNetworkConfigSource(), + ) + def _has_network_config(self) -> bool: return bool(self._network_config.get("config", [])) @@ -148,23 +169,31 @@ def _get_data(self): self.system_uuid = _read_system_uuid() - network_context = ephemeral.EphemeralDHCPv4( - self.distro, - iface=net.find_fallback_nic(), - connectivity_url_data={ - "url": METADATA_PATTERN.format(version=2, path="instance"), - "headers": V2_HEADERS, - }, - ) + if self.perform_dhcp_setup: + network_context = ephemeral.EphemeralDHCPv4( + self.distro, + iface=net.find_fallback_nic(), + connectivity_url_data={ + "url": METADATA_PATTERN.format(version=2, path="instance"), + "headers": V2_HEADERS, + }, + ) + else: + network_context = util.nullcontext() fetch_primary_nic = not self._is_iscsi_root() fetch_secondary_nics = self.ds_cfg.get( "configure_secondary_nics", BUILTIN_DS_CONFIG["configure_secondary_nics"], ) + with network_context: fetched_metadata = read_opc_metadata( - fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics + fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics, + max_wait=self.url_max_wait, + timeout=self.url_timeout, ) + if not fetched_metadata: + return False data = self._crawled_metadata = fetched_metadata.instance_data self.metadata_address = METADATA_ROOT.format( @@ -332,6 +361,10 @@ def _add_network_config_from_opc_imds(self, set_primary: bool = False): self._network_config["ethernets"][name] = interface_config +class DataSourceOracleNet(DataSourceOracle): + perform_dhcp_setup = False + + def _read_system_uuid() -> Optional[str]: sys_uuid = dmi.read_dmi_data("system-uuid") return None if sys_uuid is None else sys_uuid.lower() @@ -342,15 +375,20 @@ def _is_platform_viable() -> bool: return asset_tag == CHASSIS_ASSET_TAG -def _fetch(metadata_version: int, path: str, retries: int = 2) -> dict: - return readurl( - url=METADATA_PATTERN.format(version=metadata_version, path=path), - headers=V2_HEADERS if metadata_version > 1 else None, - retries=retries, - )._response.json() +def _url_version(url: str) -> int: + return 2 if url.startswith("http://169.254.169.254/opc/v2") else 1 + + +def _headers_cb(url: str) -> Optional[Dict[str, str]]: + return V2_HEADERS if _url_version(url) == 2 else None -def read_opc_metadata(*, fetch_vnics_data: bool = False) -> OpcMetadata: +def read_opc_metadata( + *, + fetch_vnics_data: bool = False, + max_wait=DataSourceOracle.url_max_wait, + timeout=DataSourceOracle.url_timeout, +) -> Optional[OpcMetadata]: """Fetch metadata from the /opc/ routes. :return: @@ -359,30 +397,60 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False) -> OpcMetadata: The JSON-decoded value of the instance data endpoint on the IMDS The JSON-decoded value of the vnics data endpoint if `fetch_vnics_data` is True, else None + or None if fetching metadata failed """ # Per Oracle, there are short windows (measured in milliseconds) throughout # an instance's lifetime where the IMDS is being updated and may 404 as a - # result. To work around these windows, we retry a couple of times. - metadata_version = 2 - try: - instance_data = _fetch(metadata_version, path="instance") - except UrlError: - metadata_version = 1 - instance_data = _fetch(metadata_version, path="instance") + # result. + urls = [ + METADATA_PATTERN.format(version=2, path="instance"), + METADATA_PATTERN.format(version=1, path="instance"), + ] + start_time = time.time() + instance_url, instance_response = wait_for_url( + urls, + max_wait=max_wait, + timeout=timeout, + headers_cb=_headers_cb, + sleep_time=0, + ) + if not instance_url: + LOG.warning("Failed to fetch IMDS metadata!") + return None + instance_data = json.loads(instance_response.decode("utf-8")) + + metadata_version = _url_version(instance_url) vnics_data = None if fetch_vnics_data: - try: - vnics_data = _fetch(metadata_version, path="vnics") - except UrlError: - util.logexc(LOG, "Failed to fetch IMDS network configuration!") + # This allows us to go over the max_wait time by the timeout length, + # but if we were able to retrieve instance metadata, that seems + # like a worthwhile tradeoff rather than having incomplete metadata. + vnics_url, vnics_response = wait_for_url( + [METADATA_PATTERN.format(version=metadata_version, path="vnics")], + max_wait=max_wait - (time.time() - start_time), + timeout=timeout, + headers_cb=_headers_cb, + sleep_time=0, + ) + if vnics_url: + vnics_data = json.loads(vnics_response.decode("utf-8")) + else: + LOG.warning("Failed to fetch IMDS network configuration!") return OpcMetadata(metadata_version, instance_data, vnics_data) # Used to match classes to dependencies datasources = [ (DataSourceOracle, (sources.DEP_FILESYSTEM,)), + ( + DataSourceOracleNet, + ( + sources.DEP_FILESYSTEM, + sources.DEP_NETWORK, + ), + ), ] diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py index 6729e846056..2fba1149d86 100644 --- a/cloudinit/sources/DataSourceRbxCloud.py +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -220,6 +220,8 @@ class DataSourceRbxCloud(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed = None + self.gratuitous_arp = None + self.cfg = None def __str__(self): root = sources.DataSource.__str__(self) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 221df256f7e..a2e1e089429 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -173,6 +173,7 @@ def __init__(self, sys_cfg, distro, paths): self.max_wait = int(self.ds_cfg.get("max_wait", DEF_MD_MAX_WAIT)) self._network_config = sources.UNSET self.metadata_urls = DS_BASE_URLS + self.metadata_url = None self.userdata_url = None self.vendordata_url = None self.ephemeral_fixed_address = None @@ -180,6 +181,20 @@ def __init__(self, sys_cfg, distro, paths): if "metadata_urls" in self.ds_cfg.keys(): self.metadata_urls += self.ds_cfg["metadata_urls"] + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + attr_defaults = { + "ephemeral_fixed_address": None, + "has_ipv4": True, + "max_wait": DEF_MD_MAX_WAIT, + "metadata_urls": DS_BASE_URLS, + "userdata_url": None, + "vendordata_url": None, + } + for attr in attr_defaults: + if not hasattr(self, attr): + setattr(self, attr, attr_defaults[attr]) + def _set_metadata_url(self, urls): """ Define metadata_url based upon api-metadata URL availability. diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7c526a160c6..801b56b135e 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -190,6 +190,7 @@ def __init__(self, sys_cfg, distro, paths): self.metadata = {} self.network_data = None self._network_config = None + self.routes_data = None self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py index cb47ca7df8c..c69678378e7 100644 --- a/cloudinit/sources/DataSourceUpCloud.py +++ b/cloudinit/sources/DataSourceUpCloud.py @@ -46,6 +46,7 @@ def __init__(self, sys_cfg, distro, paths): self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None + self.metadata_full = None def _get_sysinfo(self): return uc_helper.read_sysinfo() diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py index ff25b3fa074..2f8322c4dbb 100644 --- a/cloudinit/sources/DataSourceVMware.py +++ b/cloudinit/sources/DataSourceVMware.py @@ -113,6 +113,32 @@ def __init__(self, sys_cfg, distro, paths, ud_proc=None): (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True), ] + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + for attr in ("rpctool", "rpctool_fn"): + if not hasattr(self, attr): + setattr(self, attr, None) + if not hasattr(self, "cfg"): + setattr(self, "cfg", {}) + if not hasattr(self, "possible_data_access_method_list"): + setattr( + self, + "possible_data_access_method_list", + [ + ( + DATA_ACCESS_METHOD_ENVVAR, + self.get_envvar_data_fn, + False, + ), + ( + DATA_ACCESS_METHOD_GUESTINFO, + self.get_guestinfo_data_fn, + True, + ), + (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True), + ], + ) + def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.data_access_method) @@ -821,19 +847,18 @@ def is_valid_ip_addr(val): ) -def convert_to_netifaces_format(addr): +def convert_to_netifaces_ipv4_format(addr: dict) -> dict: """ Takes a cloudinit.netinfo formatted address and converts to netifaces format, since this module was originally written with netifaces as the network introspection module. - netifaces format: + netifaces ipv4 format: { "broadcast": "10.15.255.255", "netmask": "255.240.0.0", "addr": "10.0.1.4" } - - cloudinit.netinfo format: + cloudinit.netinfo ipv4 format: { "ip": "10.0.1.4", "mask": "255.240.0.0", @@ -841,10 +866,37 @@ def convert_to_netifaces_format(addr): "scope": "global", } """ + if not addr.get("ip"): + return {} + return { + "broadcast": addr.get("bcast"), + "netmask": addr.get("mask"), + "addr": addr.get("ip"), + } + + +def convert_to_netifaces_ipv6_format(addr: dict) -> dict: + """ + Takes a cloudinit.netinfo formatted address and converts to netifaces + format, since this module was originally written with netifaces as the + network introspection module. + netifaces ipv6 format: + { + "netmask": "ffff:ffff:ffff:ffff::/64", + "addr": "2001:db8:abcd:1234::1" + } + cloudinit.netinfo ipv6 format: + { + "ip": "2001:db8:abcd:1234::1/64", + "scope6": "global", + } + """ + if not addr.get("ip"): + return {} + ipv6 = ipaddress.IPv6Interface(addr.get("ip")) return { - "broadcast": addr["bcast"], - "netmask": addr["mask"], - "addr": addr["ip"], + "netmask": f"{ipv6.netmask}/{ipv6.network.prefixlen}", + "addr": str(ipv6.ip), } @@ -852,7 +904,6 @@ def get_host_info(): """ Returns host information such as the host name and network interfaces. """ - # TODO(look to promote netifices use up in cloud-init netinfo funcs) host_info = { "network": { "interfaces": { @@ -883,9 +934,9 @@ def get_host_info(): af_inet4 = [] af_inet6 = [] for addr in ifaces[dev_name]["ipv4"]: - af_inet4.append(convert_to_netifaces_format(addr)) + af_inet4.append(convert_to_netifaces_ipv4_format(addr)) for addr in ifaces[dev_name]["ipv6"]: - af_inet6.append(convert_to_netifaces_format(addr)) + af_inet6.append(convert_to_netifaces_ipv6_format(addr)) mac = ifaces[dev_name].get("hwaddr") diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index d03ae5882da..2d7f1f31a1e 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -42,6 +42,7 @@ def __init__(self, sys_cfg, distro, paths): BUILTIN_DS_CONFIG, ] ) + self.netcfg = None @staticmethod def ds_detect(): diff --git a/cloudinit/sources/DataSourceWSL.py b/cloudinit/sources/DataSourceWSL.py index 0fccf61f776..3d9cf58ebb4 100644 --- a/cloudinit/sources/DataSourceWSL.py +++ b/cloudinit/sources/DataSourceWSL.py @@ -11,6 +11,8 @@ from typing import List, cast from cloudinit import sources, subp, util +from cloudinit.distros import Distro +from cloudinit.helpers import Paths LOG = logging.getLogger(__name__) @@ -180,6 +182,10 @@ def load_instance_metadata(cloudinitdir: PurePath, instance_name: str) -> dict: class DataSourceWSL(sources.DataSource): dsname = "WSL" + def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): + super().__init__(sys_cfg, distro, paths, ud_proc) + self.instance_name = instance_name() + def find_user_data_file(self, seed_dir: PurePath) -> PurePath: """ Finds the most precendent of the candidate files that may contain @@ -233,7 +239,6 @@ def check_instance_id(self, sys_cfg) -> bool: def _get_data(self) -> bool: self.vendordata_raw = None seed_dir = cloud_init_data_dir() - self.instance_name = instance_name() try: self.metadata = load_instance_metadata( diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 4ea1fc561e3..65222b29b37 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -17,7 +17,7 @@ import re from collections import namedtuple from enum import Enum, unique -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union from cloudinit import atomic_helper, dmi, importer, net, type_utils from cloudinit import user_data as ud @@ -195,6 +195,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): # - seed-dir () _subplatform = None + _crawled_metadata: Optional[Union[Dict, str]] = None + # The network configuration sources that should be considered for this data # source. (The first source in this list that provides network # configuration will be used without considering any that follow.) This @@ -312,6 +314,9 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): self.vendordata2 = None self.vendordata_raw = None self.vendordata2_raw = None + self.metadata_address = None + self.network_json = UNSET + self.ec2_metadata = UNSET self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {} @@ -326,12 +331,22 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): def _unpickle(self, ci_pkl_version: int) -> None: """Perform deserialization fixes for Paths.""" - if not hasattr(self, "vendordata2"): - self.vendordata2 = None - if not hasattr(self, "vendordata2_raw"): - self.vendordata2_raw = None - if not hasattr(self, "skip_hotplug_detect"): - self.skip_hotplug_detect = False + expected_attrs = { + "_crawled_metadata": None, + "_platform_type": None, + "_subplatform": None, + "ec2_metadata": UNSET, + "extra_hotplug_udev_rules": None, + "metadata_address": None, + "network_json": UNSET, + "skip_hotplug_detect": False, + "vendordata2": None, + "vendordata2_raw": None, + } + for key, value in expected_attrs.items(): + if not hasattr(self, key): + setattr(self, key, value) + if hasattr(self, "userdata") and self.userdata is not None: # If userdata stores MIME data, on < python3.6 it will be # missing the 'policy' attribute that exists on >=python3.6. @@ -347,8 +362,6 @@ def _unpickle(self, ci_pkl_version: int) -> None: e, ) raise DatasourceUnpickleUserDataError() from e - if not hasattr(self, "extra_hotplug_udev_rules"): - self.extra_hotplug_udev_rules = None def __str__(self): return type_utils.obj_name(self) @@ -476,25 +489,19 @@ def persist_instance_data(self, write_cache=True): """ if write_cache and os.path.lexists(self.paths.instance_link): pkl_store(self, self.paths.get_ipath_cur("obj_pkl")) - if hasattr(self, "_crawled_metadata"): + if self._crawled_metadata is not None: # Any datasource with _crawled_metadata will best represent # most recent, 'raw' metadata - crawled_metadata = copy.deepcopy( - getattr(self, "_crawled_metadata") - ) + crawled_metadata = copy.deepcopy(self._crawled_metadata) crawled_metadata.pop("user-data", None) crawled_metadata.pop("vendor-data", None) instance_data = {"ds": crawled_metadata} else: instance_data = {"ds": {"meta_data": self.metadata}} - if hasattr(self, "network_json"): - network_json = getattr(self, "network_json") - if network_json != UNSET: - instance_data["ds"]["network_json"] = network_json - if hasattr(self, "ec2_metadata"): - ec2_metadata = getattr(self, "ec2_metadata") - if ec2_metadata != UNSET: - instance_data["ds"]["ec2_metadata"] = ec2_metadata + if self.network_json != UNSET: + instance_data["ds"]["network_json"] = self.network_json + if self.ec2_metadata != UNSET: + instance_data["ds"]["ec2_metadata"] = self.ec2_metadata instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT # Add merged cloud.cfg and sys info for jinja templates and cli query instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg) @@ -631,9 +638,6 @@ def get_vendordata2(self): @property def platform_type(self): - if not hasattr(self, "_platform_type"): - # Handle upgrade path where pickled datasource has no _platform. - self._platform_type = self.dsname.lower() if not self._platform_type: self._platform_type = self.dsname.lower() return self._platform_type @@ -650,17 +654,14 @@ def subplatform(self): nocloud: seed-dir (/seed/dir/path) lxd: nocloud (/seed/dir/path) """ - if not hasattr(self, "_subplatform"): - # Handle upgrade path where pickled datasource has no _platform. - self._subplatform = self._get_subplatform() if not self._subplatform: self._subplatform = self._get_subplatform() return self._subplatform def _get_subplatform(self): """Subclasses should implement to return a "slug (detail)" string.""" - if hasattr(self, "metadata_address"): - return "metadata (%s)" % getattr(self, "metadata_address") + if self.metadata_address: + return f"metadata ({self.metadata_address})" return METADATA_UNKNOWN @property diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 031ac8c9360..094c889caef 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -763,7 +763,7 @@ def convert_net_json(network_json=None, known_macs=None): cfg["type"] = "infiniband" for service in services: - cfg = service + cfg = copy.deepcopy(service) cfg.update({"type": "nameserver"}) config.append(cfg) diff --git a/cloudinit/subp.py b/cloudinit/subp.py index c94b44e7db1..2bfe6ab5200 100644 --- a/cloudinit/subp.py +++ b/cloudinit/subp.py @@ -8,7 +8,7 @@ import time from errno import ENOEXEC from io import TextIOWrapper -from typing import List, Union +from typing import List, Optional, Union LOG = logging.getLogger(__name__) @@ -322,7 +322,7 @@ def target_path(target=None, path=None): return os.path.join(target, path) -def which(program, search=None, target=None): +def which(program, search=None, target=None) -> Optional[str]: target = target_path(target) if os.path.sep in program and is_exe(target_path(target, program)): diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index a465252bb90..6853fc6a1b2 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -471,14 +471,14 @@ def dual_stack( def wait_for_url( urls, - max_wait=None, - timeout=None, + max_wait: float = float("inf"), + timeout: Optional[float] = None, status_cb: Callable = LOG.debug, # some sources use different log levels headers_cb: Optional[Callable] = None, headers_redact=None, - sleep_time: int = 1, + sleep_time: Optional[float] = None, exception_cb: Optional[Callable] = None, - sleep_time_cb: Optional[Callable[[Any, int], int]] = None, + sleep_time_cb: Optional[Callable[[Any, float], float]] = None, request_method: str = "", connect_synchronously: bool = True, async_delay: float = 0.150, @@ -494,10 +494,15 @@ def wait_for_url( headers_cb: call method with single argument of url to get headers for request. headers_redact: a list of header names to redact from the log + sleep_time: Amount of time to sleep between retries. If this and + sleep_time_cb are None, the default sleep time + defaults to 1 second and increases by 1 seconds every 5 + tries. Cannot be specified along with `sleep_time_cb`. exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that - generates the next sleep time. + generates the next sleep time. Cannot be specified + along with 'sleep_time`. request_method: indicate the type of HTTP request, GET, PUT, or POST connect_synchronously: if false, enables executing requests in parallel async_delay: delay before parallel metadata requests, see RFC 6555 @@ -518,17 +523,19 @@ def wait_for_url( data host (169.254.169.254) may be firewalled off Entirely for a system, meaning that the connection will block forever unless a timeout is set. - A value of None for max_wait will retry indefinitely. + The default value for max_wait will retry indefinitely. """ - def default_sleep_time(_, loop_number: int) -> int: - return int(loop_number / 5) + 1 + def default_sleep_time(_, loop_number: int) -> float: + return sleep_time if sleep_time is not None else loop_number // 5 + 1 - def timeup(max_wait, start_time): + def timeup(max_wait: float, start_time: float, sleep_time: float = 0): """Check if time is up based on start time and max wait""" - if max_wait is None: + if max_wait in (float("inf"), None): return False - return (max_wait <= 0) or (time.time() - start_time > max_wait) + return (max_wait <= 0) or ( + time.time() - start_time + sleep_time > max_wait + ) def handle_url_response(response, url): """Map requests response code/contents to internal "UrlError" type""" @@ -573,7 +580,7 @@ def read_url_handle_exceptions( time_taken = int(time.time() - start_time) max_wait_str = "%ss" % max_wait if max_wait else "unlimited" status_msg = "Calling '%s' failed [%s/%s]: %s" % ( - url, + url or getattr(url_exc, "url", "url ? None"), time_taken, max_wait_str, reason, @@ -639,6 +646,8 @@ def read_url_parallel(start_time, timeout, exc_cb, log_cb): return out start_time = time.time() + if sleep_time and sleep_time_cb: + raise ValueError("sleep_time and sleep_time_cb are mutually exclusive") # Dual-stack support factored out serial and parallel execution paths to # allow the retry loop logic to exist separately from the http calls. @@ -654,25 +663,30 @@ def read_url_parallel(start_time, timeout, exc_cb, log_cb): loop_n: int = 0 response = None while True: - sleep_time = calculate_sleep_time(response, loop_n) + current_sleep_time = calculate_sleep_time(response, loop_n) url = do_read_url(start_time, timeout, exception_cb, status_cb) if url: address, response = url return (address, response.contents) - if timeup(max_wait, start_time): + if timeup(max_wait, start_time, current_sleep_time): break loop_n = loop_n + 1 LOG.debug( - "Please wait %s seconds while we wait to try again", sleep_time + "Please wait %s seconds while we wait to try again", + current_sleep_time, ) - time.sleep(sleep_time) + time.sleep(current_sleep_time) # shorten timeout to not run way over max_time - # timeout=0.0 causes exceptions in urllib, set to None if zero - timeout = int((start_time + max_wait) - time.time()) or None + current_time = time.time() + if timeout and current_time + timeout > start_time + max_wait: + timeout = max_wait - (current_time - start_time) + if timeout <= 0: + # We've already exceeded our max_wait. Time to bail. + break LOG.error("Timed out, no response from urls: %s", urls) return False, None diff --git a/cloudinit/util.py b/cloudinit/util.py index eeed511d8ba..d9d5bef3540 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -36,15 +36,17 @@ import time from base64 import b64decode from collections import deque, namedtuple -from contextlib import suppress +from contextlib import contextmanager, suppress from errno import ENOENT from functools import lru_cache, total_ordering from pathlib import Path from typing import ( TYPE_CHECKING, + Any, Callable, Deque, Dict, + Generator, List, Mapping, Optional, @@ -649,6 +651,7 @@ def _get_variant(info): "almalinux", "alpine", "arch", + "azurelinux", "centos", "cloudlinux", "debian", @@ -3293,3 +3296,12 @@ def read_hotplug_enabled_file(paths: "Paths") -> dict: if "scopes" not in content: content["scopes"] = [] return content + + +@contextmanager +def nullcontext() -> Generator[None, Any, None]: + """Context manager that does nothing. + + Note: In python-3.7+, this can be substituted by contextlib.nullcontext + """ + yield diff --git a/cloudinit/version.py b/cloudinit/version.py index 56275dc13d1..032e18306a4 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "24.1" +__VERSION__ = "24.1.2" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index e21770326d0..5219b946205 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -4,14 +4,15 @@ {% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} {% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "miraclelinux", "rhel", "rocky", "virtuozzo"] %} -{% set gecos = ({"amazon": "EC2 Default User", "centos": "Cloud User", - "debian": "Debian", "dragonfly": "DragonFly", - "freebsd": "FreeBSD", "mariner": "MarinerOS", - "rhel": "Cloud User", "netbsd": "NetBSD", - "openbsd": "openBSD", "openmandriva": "OpenMandriva admin", - "photon": "PhotonOS", "ubuntu": "Ubuntu", - "unknown": "Ubuntu"}) %} +{% set gecos = ({"amazon": "EC2 Default User", "azurelinux": "Azure Linux", + "centos": "Cloud User", "debian": "Debian", + "dragonfly": "DragonFly", "freebsd": "FreeBSD", + "mariner": "MarinerOS", "rhel": "Cloud User", + "netbsd": "NetBSD", "openbsd": "openBSD", + "openmandriva": "OpenMandriva admin", "photon": "PhotonOS", + "ubuntu": "Ubuntu", "unknown": "Ubuntu"}) %} {% set groups = ({"alpine": "adm, wheel", "arch": "wheel, users", + "azurelinux": "wheel", "debian": "adm, audio, cdrom, dialout, dip, floppy, netdev, plugdev, sudo, video", "gentoo": "users, wheel", "mariner": "wheel", "photon": "wheel", @@ -165,8 +166,8 @@ cloud_config_modules: {% if variant == "ubuntu" %} - ubuntu_pro {% endif %} -{% elif variant in ["fedora", "mariner", "openeuler", "openmandriva", - "photon"] or is_rhel %} +{% elif variant in ["azurelinux", "fedora", "mariner", "openeuler", + "openmandriva", "photon"] or is_rhel %} {% if is_rhel %} - rh_subscription {% endif %} @@ -219,10 +220,10 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd", - "gentoo", "mariner", "netbsd", "openbsd", "OpenCloudOS", - "openeuler", "openmandriva", "photon", "suse", "TencentOS", - "ubuntu"] or is_rhel %} +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "freebsd", "gentoo", "mariner", "netbsd", "openbsd", + "OpenCloudOS", "openeuler", "openmandriva", "photon", "suse", + "TencentOS", "ubuntu"] or is_rhel %} distro: {{ variant }} {% elif variant == "dragonfly" %} distro: dragonflybsd @@ -237,9 +238,10 @@ system_info: {% else %} name: {{ variant }} {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_bsd or is_rhel %} lock_passwd: True {% endif %} @@ -292,7 +294,7 @@ system_info: {% elif variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] -{% elif variant in ["mariner", "photon"] %} +{% elif variant in ["azurelinux", "mariner", "photon"] %} network: renderers: ['networkd'] {% elif variant == "openmandriva" %} @@ -318,9 +320,10 @@ system_info: # Automatically discover the best ntp_client ntp_client: auto {% endif %} -{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS", "ubuntu", "unknown"] +{% if variant in ["alpine", "amazon", "arch", "azurelinux", "debian", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS", "ubuntu", + "unknown"] or is_rhel %} # Other config here will be given to the distro class and/or path classes paths: @@ -365,8 +368,9 @@ system_info: {% endif %} {% if variant in ["debian", "ubuntu", "unknown"] %} ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "fedora", "gentoo", - "mariner", "OpenCloudOS", "openeuler", "openmandriva", - "photon", "suse", "TencentOS"] or is_rhel %} +{% elif variant in ["alpine", "amazon", "arch", "azurelinux", "fedora", + "gentoo", "mariner", "OpenCloudOS", "openeuler", + "openmandriva", "photon", "suse", "TencentOS"] + or is_rhel %} ssh_svcname: sshd {% endif %} diff --git a/doc-requirements.txt b/doc-requirements.txt index fccef782051..e6ec2e7280a 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -2,6 +2,7 @@ doc8 furo m2r2 pyyaml +setuptools sphinx==7.1.2 sphinx-design sphinx-copybutton diff --git a/doc/rtd/explanation/failure_states.rst b/doc/rtd/explanation/failure_states.rst index 0f1680c52e2..21a1c442231 100644 --- a/doc/rtd/explanation/failure_states.rst +++ b/doc/rtd/explanation/failure_states.rst @@ -46,6 +46,8 @@ module-level keys: ``init-local``, ``init``, ``modules-config``, See :ref:`this more detailed explanation` for to learn how to use cloud-init's exported errors. +.. _error_codes: + Cloud-init error codes ---------------------- diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst index 0dd248c6db6..2d481ff212a 100644 --- a/doc/rtd/explanation/index.rst +++ b/doc/rtd/explanation/index.rst @@ -22,3 +22,4 @@ knowledge and become better at using and configuring ``cloud-init``. kernel-cmdline.rst failure_states.rst exported_errors.rst + return_codes.rst diff --git a/doc/rtd/explanation/return_codes.rst b/doc/rtd/explanation/return_codes.rst new file mode 100644 index 00000000000..31a0f40fb67 --- /dev/null +++ b/doc/rtd/explanation/return_codes.rst @@ -0,0 +1,150 @@ +.. _return_codes: + +Why did `cloud-init status` start returning exit code 2? +======================================================== + +Cloud-init introduced :ref:`a new error code` +in 23.4. This page describes the purpose of this change and +gives some context for why this change was made. + +.. _return_codes_history: + +Background +---------- + +Since cloud-init provides access to cloud instances, the +paradigm for handling errors was "log errors, but proceed". +Exiting on failure conditions doesn't make sense when that +may prevent one from accessing the system to debug it. + +Since cloud-init's behavior is heavily tied to specific cloud +platforms, reproducing cloud-init bugs without exactly +reproducing a specific cloud environment is often impossible, +and often requires guesswork. To make debugging cloud-init +possible without reproducing exactly, cloud-init logs are +quite verbose. + +.. _return_codes_pain_points: + +Pain points +----------- + +1) Invalid configurations were historically ignored. +2) Log verbosity is unfriendly to end users that may not know + what to look for. Verbose logs means users often ignore real + errors. +3) Cloud-init's reported status was only capable of telling the user + whether cloud-init crashed. Cloud-init would report a status of + "done" in the following cases: + + * a user's configuration was invalid + * if the operating system or cloud environment experienced some error that + prevented cloud-init from configuring the instance + * if cloud-init internally experienced an error - all of these previously + reported a status of "done". + +.. _return_codes_improvements: + +Efforts to improve cloud-init +----------------------------- + +Several changes have been introduced to cloud-init to address the pain +points described above. + +JSON schema +^^^^^^^^^^^ + +Cloud-init has defined a JSON schema which fully documents the user-data +cloud-config. This JSON schema may be used in several different ways: + +Text editor integration +""""""""""""""""""""""" + +Thanks to `yaml-language-server`_, cloud-init's JSON schema may be +used for YAML syntax checking, warnings when invalid keys are used, and +autocompletion. Several different text editors are capable of this. +See this `blog post on configuring this for neovim`_, or for VScode one +can install the `extension`_ and then a file named ``cloud-config.yaml`` +will automatically use cloud-init's JSON schema. + + +Cloud-init schema subcommand +"""""""""""""""""""""""""""" + +The cloud-init package includes a cloud-init subcommand, +:ref:`cloud-init schema` which uses the schema +to validate either the configuration passed to the instance that you are +running the command on, or to validate an arbitrary text file containing a +configuration. + +Return codes +^^^^^^^^^^^^ + +Cloud-init historically used two return codes from the +:code:`cloud-init status` subcommand: 0 to indicate success and 1 to indicate +failure. These return codes lacked nuance. Return code 0 (success) included +the in-between when something went wrong, but cloud-init was able to finish. + +Many users of cloud-init run :code:`cloud-init status --wait` and expect that +when complete, cloud-init has finished. Since cloud-init is not guaranteed to +succeed, users should also be check the return code of this command. + +As of 23.4, errors that do not crash cloud-init will have an exit code of 2. +Exit code of 1 means that cloud-init crashed, and an exit code 0 more correctly +means that cloud-init succeeded. Anyone that previously checked for exit code 0 +should probably update their assumptions in one of the following two ways: + +Users that wish to take advantage of cloud-init's error reporting +capabilities should check for exit code of 2 from :code:`cloud-init status`. +An example of this: + +.. code-block:: python + + from logging import getLogger + from json import loads + from subprocess import run + from sys import exit + + logger = getLogger(__name__) + completed = run("cloud-init status --format json") + output = loads(completed.stdout) + + if 2 == completed.return_code: + # something bad might have happened - we should check it out + logger.warning("cloud-init experienced a recoverable error") + logger.warning("status: %s", output.get("extended_status")) + logger.warning("recoverable error: %s", output.get("recoverable_errors")) + + elif 1 == completed.return_code: + # cloud-init completely failed + logger.error("cloud-init crashed, all bets are off!") + exit(1) + +Users that wish to use ignore cloud-init's errors and check the return code in +a backwards-compatible way should check that the return code is not equal to +1. This will provide the same behavior before and after the changed exit code. +See an example of this: + +.. code-block:: python + + from logging import getLogger + from subprocess import run + from sys import exit + + logger = getLogger(__name__) + completed = run("cloud-init status --format json") + + if 1 == completed.return_code: + # cloud-init completely failed + logger.error("cloud-init crashed, all bets are off!") + exit(1) + + # cloud-init might have failed, but this code ignores that possibility + # in preference of backwards compatibility + +See :ref:`our explanation of failure states` for more +information. + +.. _yaml-language-server: https://github.com/redhat-developer/yaml-language-server +.. _extension: https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml +.. _blog post on configuring this for neovim: https://phoenix-labs.xyz/blog/setup-neovim-cloud-init-completion/ diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst index 2488df5201b..441d415bd19 100644 --- a/doc/rtd/reference/cli.rst +++ b/doc/rtd/reference/cli.rst @@ -242,6 +242,10 @@ to semaphores in :file:`/var/lib/cloud/`. See :ref:`boot_stages` for more info. * :command:`--file` : Use additional yaml configuration files. +.. warning:: + `--mode init` is deprecated in 24.1 and scheduled to be removed in 29.1. + Use :command:`cloud-init init` instead. + .. _cli_query: :command:`query` @@ -400,12 +404,14 @@ module default frequency of ``instance``: :command:`status` ================= -Report whether ``cloud-init`` is running, done, disabled or errored. Exits -non-zero if an error is detected in ``cloud-init``. +Report cloud-init's current status. + +Exits 1 if ``cloud-init`` crashes, 2 if ``cloud-init`` finishes but experienced +recoverable errors, and 0 if ``cloud-init`` ran without error. * :command:`--long`: Detailed status information. * :command:`--wait`: Block until ``cloud-init`` completes. -* :command:`--format [yaml|json|tabular]`: Machine-readable JSON or YAML +* :command:`--format [yaml|json]`: Machine-readable JSON or YAML detailed output. The :command:`status` command can be used simply as follows: @@ -415,7 +421,8 @@ The :command:`status` command can be used simply as follows: $ cloud-init status Which shows whether ``cloud-init`` is currently running, done, disabled, or in -error, as in this example output: +error. Note that the ``extended_status`` key in ``--long`` or ``--format json`` +contains more accurate and complete status information. Example output: .. code-block:: @@ -432,19 +439,24 @@ Example output when ``cloud-init`` is running: .. code-block:: status: running - time: Fri, 26 Jan 2018 21:39:43 +0000 - detail: - Running in stage: init-local + extended_status: running + boot_status_code: enabled-by-generator + last_update: Wed, 13 Mar 2024 18:46:26 +0000 + detail: DataSourceLXD + errors: [] + recoverable_errors: {} Example output when ``cloud-init`` is done: .. code-block:: status: done + extended_status: done boot_status_code: enabled-by-generator - last_update: Tue, 16 Aug 2022 19:12:58 +0000 - detail: - DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] + last_update: Wed, 13 Mar 2024 18:46:26 +0000 + detail: DataSourceLXD + errors: [] + recoverable_errors: {} The detailed output can be shown in machine-readable JSON or YAML with the :command:`format` option, for example: @@ -457,13 +469,40 @@ Which would produce the following example output: .. code-block:: - { - "boot_status_code": "enabled-by-generator", - "datasource": "nocloud", - "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]", - "errors": [], - "last_update": "Tue, 16 Aug 2022 19:12:58 +0000", - "status": "done" - } + { + "boot_status_code": "enabled-by-generator", + "datasource": "lxd", + "detail": "DataSourceLXD", + "errors": [], + "extended_status": "done", + "init": { + "errors": [], + "finished": 1710355584.3603137, + "recoverable_errors": {}, + "start": 1710355584.2216876 + }, + "init-local": { + "errors": [], + "finished": 1710355582.279756, + "recoverable_errors": {}, + "start": 1710355582.2255273 + }, + "last_update": "Wed, 13 Mar 2024 18:46:26 +0000", + "modules-config": { + "errors": [], + "finished": 1710355585.5042186, + "recoverable_errors": {}, + "start": 1710355585.334438 + }, + "modules-final": { + "errors": [], + "finished": 1710355586.9038777, + "recoverable_errors": {}, + "start": 1710355586.8076844 + }, + "recoverable_errors": {}, + "stage": null, + "status": "done" + } .. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html diff --git a/doc/rtd/reference/datasources/oracle.rst b/doc/rtd/reference/datasources/oracle.rst index 74bfb3e393e..05b93fbe28e 100644 --- a/doc/rtd/reference/datasources/oracle.rst +++ b/doc/rtd/reference/datasources/oracle.rst @@ -39,6 +39,18 @@ to configure the non-primary network interface controllers in the system. If set to True on an OCI Bare Metal Machine, it will have no effect (though this may change in the future). +``max_wait`` +------------ + +An integer, defaulting to 30. The maximum time in seconds to wait for the +metadata service to become available. If the metadata service is not +available within this time, the datasource will fail. + +``timeout`` +----------- +An integer, defaulting to 5. The time in seconds to wait for a response from +the metadata service before retrying. + Example configuration --------------------- @@ -49,5 +61,7 @@ An example configuration with the default values is provided below: datasource: Oracle: configure_secondary_nics: false + max_wait: 30 + timeout: 5 .. _Oracle Compute Infrastructure: https://cloud.oracle.com/ diff --git a/doc/rtd/reference/network-config-format-v1.rst b/doc/rtd/reference/network-config-format-v1.rst index 42f2dc2265a..78e725fd2a8 100644 --- a/doc/rtd/reference/network-config-format-v1.rst +++ b/doc/rtd/reference/network-config-format-v1.rst @@ -82,6 +82,13 @@ be sent in a packet- or frame-based network. Specifying ``mtu`` is optional. configuration time. It's possible to specify a value too large or to small for a device, and may be ignored by the device. +``accept-ra: `` +^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``accept-ra`` key is a boolean value that specifies whether or not to +accept Router Advertisements (RA) for this interface. Specifying ``accept-ra`` +is optional. + Physical example ^^^^^^^^^^^^^^^^ @@ -296,6 +303,8 @@ Valid keys for ``subnets`` include the following: interface will be handled during boot. - ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation. - ``netmask``: IPv4 subnet mask in dotted format or CIDR notation. +- ``broadcast`` : IPv4 broadcast address in dotted format. This is + only rendered if :file:`/etc/network/interfaces` is used. - ``gateway``: IPv4 address of the default gateway for this subnet. - ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in :file:`resolv.conf`. diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst index d9e67cf7ea5..028f306f806 100644 --- a/doc/rtd/reference/network-config.rst +++ b/doc/rtd/reference/network-config.rst @@ -273,7 +273,7 @@ Example output: .. code-block:: usage: /usr/bin/cloud-init devel net-convert [-h] -p PATH -k {eni,network_data.json,yaml,azure-imds,vmware-imc} -d PATH -D - {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} + {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} [-m name,mac] [--debug] -O {eni,netplan,networkd,sysconfig,network-manager} options: @@ -284,7 +284,7 @@ Example output: The format of the given network config -d PATH, --directory PATH directory to place output in - -D {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openeuler}, --distro {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} + -D {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openeuler}, --distro {alpine,arch,azurelinux,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler} -m name,mac, --mac name,mac interface name to mac mapping --debug enable debug logging to stderr. diff --git a/packages/debian/control.in b/packages/debian/control.in index 30cf406b7bc..fb1cffc7151 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -12,7 +12,6 @@ Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, iproute2, - isc-dhcp-client, python3-debconf Recommends: eatmydata, sudo, software-properties-common, gdisk Suggests: ssh-import-id, openssh-server diff --git a/templates/hosts.azurelinux.tmpl b/templates/hosts.azurelinux.tmpl new file mode 100644 index 00000000000..8e3c23f6f12 --- /dev/null +++ b/templates/hosts.azurelinux.tmpl @@ -0,0 +1,22 @@ +## template:jinja +{# +This file /etc/cloud/templates/hosts.azurelinux.tmpl is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.azurelinux.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 + +# The following lines are desirable for IPv6 capable hosts +::1 {{fqdn}} {{hostname}} +::1 localhost6.localdomain6 localhost6 diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 1b09cba1273..d0c3c07bd72 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -7,6 +7,7 @@ from tempfile import NamedTemporaryFile from typing import Union +from pycloudlib.gce.instance import GceInstance from pycloudlib.instance import BaseInstance from pycloudlib.result import Result @@ -67,7 +68,10 @@ def __init__( self._ip = "" def destroy(self): - self.instance.delete() + if isinstance(self.instance, GceInstance): + self.instance.delete(wait=False) + else: + self.instance.delete() def restart(self): """Restart this instance (via cloud mechanism) and wait for boot. diff --git a/tests/integration_tests/modules/test_apt_functionality.py b/tests/integration_tests/modules/test_apt_functionality.py index b69c3445a41..06c6ef28ef7 100644 --- a/tests/integration_tests/modules/test_apt_functionality.py +++ b/tests/integration_tests/modules/test_apt_functionality.py @@ -5,7 +5,6 @@ import pytest -from cloudinit import gpg from cloudinit.config import cc_apt_configure from cloudinit.util import is_true from tests.integration_tests.clouds import IntegrationCloud @@ -19,6 +18,7 @@ DEB822_SOURCES_FILE = "/etc/apt/sources.list.d/ubuntu.sources" ORIG_SOURCES_FILE = "/etc/apt/sources.list" +GET_TEMPDIR = "python3 -c 'import tempfile;print(tempfile.mkdtemp());'" USER_DATA = """\ #cloud-config @@ -136,15 +136,26 @@ def get_keys(self, class_client: IntegrationInstance): """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg in human readable format. Mimics the output of apt-key finger """ - list_cmd = " ".join(gpg.GPG_LIST) + " " + class_client.execute("mkdir /root/tmpdir && chmod &00 /root/tmpdir") + GPG_LIST = [ + "gpg", + "--no-options", + "--with-fingerprint", + "--homedir /root/tmpdir", + "--no-default-keyring", + "--list-keys", + "--keyring", + ] + + list_cmd = " ".join(GPG_LIST) + " " keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS) - print(keys) files = class_client.execute( "ls " + cc_apt_configure.APT_TRUSTED_GPG_DIR ) for file in files.split(): path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file keys += class_client.execute(list_cmd + path) or "" + class_client.execute("gpgconf --homedir /root/tmpdir --kill all") return keys def test_sources_list(self, class_client: IntegrationInstance): @@ -203,8 +214,10 @@ def test_signed_by(self, class_client: IntegrationInstance): ) assert path_contents == source + temp = class_client.execute(GET_TEMPDIR) key = class_client.execute( - "gpg --no-default-keyring --with-fingerprint --list-keys " + f"gpg --no-options --homedir {temp} --no-default-keyring " + "--with-fingerprint --list-keys " "--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg" ) diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py index 81fbf1248f8..eecb72644c0 100644 --- a/tests/integration_tests/modules/test_cli.py +++ b/tests/integration_tests/modules/test_cli.py @@ -20,6 +20,14 @@ - echo 'hi' > /var/tmp/test """ +FAILING_USER_DATA = """\ +#cloud-config +bootcmd: + - exit 1 +runcmd: + - exit 1 +""" + # The '-' in 'hashed-password' fails schema validation INVALID_USER_DATA_SCHEMA = """\ #cloud-config @@ -36,19 +44,26 @@ @pytest.mark.user_data(VALID_USER_DATA) -def test_valid_userdata(client: IntegrationInstance): - """Test `cloud-init schema` with valid userdata. +class TestValidUserData: + def test_schema_status(self, class_client: IntegrationInstance): + """Test `cloud-init schema` with valid userdata. - PR #575 - """ - result = client.execute("cloud-init schema --system") - assert result.ok - assert "Valid schema user-data" in result.stdout.strip() - result = client.execute("cloud-init status --long") - assert 0 == result.return_code, ( - f"Unexpected exit {result.return_code} from cloud-init status:" - f" {result}" - ) + PR #575 + """ + result = class_client.execute("cloud-init schema --system") + assert result.ok + assert "Valid schema user-data" in result.stdout.strip() + result = class_client.execute("cloud-init status --long") + assert 0 == result.return_code, ( + f"Unexpected exit {result.return_code} from cloud-init status:" + f" {result}" + ) + + def test_modules_init(self, class_client: IntegrationInstance): + for mode in ("init", "config", "final"): + result = class_client.execute(f"cloud-init modules --mode {mode}") + assert result.ok + assert f"'modules:{mode}'" in result.stdout.strip() @pytest.mark.skipif( @@ -98,3 +113,12 @@ def test_invalid_userdata_schema(client: IntegrationInstance): ) assert warning in log assert "asdfasdf" not in log + + +@pytest.mark.user_data(FAILING_USER_DATA) +def test_failing_userdata_modules_exit_codes(client: IntegrationInstance): + """Test failing in modules representd in exit status""" + for mode in ("init", "config", "final"): + result = client.execute(f"cloud-init modules --mode {mode}") + assert result.failed if mode == "init" else result.ok + assert f"'modules:{mode}'" in result.stdout.strip() diff --git a/tests/integration_tests/modules/test_ubuntu_pro.py b/tests/integration_tests/modules/test_ubuntu_pro.py index 3127d17339b..c26ea699c99 100644 --- a/tests/integration_tests/modules/test_ubuntu_pro.py +++ b/tests/integration_tests/modules/test_ubuntu_pro.py @@ -144,6 +144,19 @@ def test_valid_token(self, client: IntegrationInstance): log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert is_attached(client) + client.execute("pro detach") + # Replace ubuntu_pro with previously named ubuntu_advantage + client.execute( + "sed -i 's/ubuntu_pro$/ubuntu_advantage/' /etc/cloud/cloud.cfg" + ) + client.restart() + status_resp = client.execute("cloud-init status --format json") + status = json.loads(status_resp.stdout) + assert ( + "Module has been renamed from cc_ubuntu_advantage to cc_ubuntu_pro" + in "\n".join(status["recoverable_errors"]["DEPRECATED"]) + ) + assert is_attached(client) @pytest.mark.user_data(ATTACH.format(token=CLOUD_INIT_UA_TOKEN)) def test_idempotency(self, client: IntegrationInstance): diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 79e7620bb48..8ee3631d0b6 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -70,9 +70,6 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): # Ubuntu lxd storage "thinpool by default on Ubuntu due to LP #1982780", "WARNING]: Could not match supplied host pattern, ignoring:", - # Old Ubuntu cloud-images contain /etc/apt/sources.list - "WARNING]: Replacing /etc/apt/sources.list to favor deb822 source" - " format", # https://bugs.launchpad.net/ubuntu/+source/netplan.io/+bug/2041727 "Cannot call Open vSwitch: ovsdb-server.service is not running.", ] diff --git a/tests/unittests/cmd/devel/test_net_convert.py b/tests/unittests/cmd/devel/test_net_convert.py index fb72963f842..be2fcdd6543 100644 --- a/tests/unittests/cmd/devel/test_net_convert.py +++ b/tests/unittests/cmd/devel/test_net_convert.py @@ -90,6 +90,18 @@ """ +@pytest.fixture +def mock_setup_logging(): + """Mock setup_basic_logging to avoid changing log level. + + net_convert.handle_args() can call setup_basic_logging() with a + WARNING level, which would be a side-effect for future tests. + It's behavior isn't checked in these tests, so mock it out. + """ + with mock.patch(f"{M_PATH}log.setup_basic_logging"): + yield + + class TestNetConvert: missing_required_args = itertools.combinations( @@ -155,7 +167,13 @@ def test_argparse_error_on_missing_args(self, cmdargs, capsys, tmpdir): ), ) def test_convert_output_kind_artifacts( - self, output_kind, outfile_content, debug, capsys, tmpdir + self, + output_kind, + outfile_content, + debug, + capsys, + tmpdir, + mock_setup_logging, ): """Assert proper output-kind artifacts are written.""" network_data = tmpdir.join("network_data") @@ -186,7 +204,9 @@ def test_convert_output_kind_artifacts( ] == chown.call_args_list @pytest.mark.parametrize("debug", (False, True)) - def test_convert_netplan_passthrough(self, debug, tmpdir): + def test_convert_netplan_passthrough( + self, debug, tmpdir, mock_setup_logging + ): """Assert that if the network config's version is 2 and the renderer is Netplan, then the config is passed through as-is. """ diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py index 2a9e063fe4d..7f580203e82 100644 --- a/tests/unittests/cmd/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -54,6 +54,18 @@ def setUp(self): self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr) + # Every cc_ module calls get_meta_doc on import. + # This call will fail if filesystem redirection mocks are in place + # and the module hasn't already been imported which can depend + # on test ordering. + self.m_doc = mock.patch( + "cloudinit.config.schema.get_meta_doc", return_value={} + ) + self.m_doc.start() + + def tearDown(self): + self.m_doc.stop() + super().tearDown() def test_main_init_run_net_runs_modules(self): """Modules like write_files are run in 'net' mode.""" diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py index 96c0a0dfa0b..76f37f2e098 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v1.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """ test_handler_apt_configure_sources_list Test templating of sources list @@ -9,6 +10,7 @@ from cloudinit import subp, util from cloudinit.config import cc_apt_configure +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud EXAMPLE_TMPL = """\ @@ -86,7 +88,7 @@ class TestAptSourceConfigSourceList: @pytest.fixture(autouse=True) def common_mocks(self, mocker): self.subp = mocker.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) mocker.patch("cloudinit.config.cc_apt_configure._ensure_dependencies") lsb = mocker.patch("cloudinit.util.lsb_release") @@ -160,22 +162,6 @@ def test_apt_v1_source_list_by_distro(self, distro, mirror, tmpdir): ) assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) - @staticmethod def myresolve(name): """Fake util.is_resolvable for mirrorfail tests""" @@ -229,21 +215,6 @@ def test_apt_v1_srcl_distro_mirrorfail( mockresolve.assert_any_call("http://does.not.exist") mockresolve.assert_any_call(mirrorcheck) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) @pytest.mark.parametrize( "deb822,cfg,apt_file,expected", @@ -301,18 +272,3 @@ def test_apt_v1_srcl_custom( sources_file = tmpdir.join(apt_file) assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) diff --git a/tests/unittests/config/test_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py index 3770b26c757..3ae086cd076 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v3.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """ test_apt_custom_sources_list Test templating of custom sources list @@ -10,6 +11,7 @@ from cloudinit import subp, util from cloudinit.config import cc_apt_configure from cloudinit.distros.debian import Distro +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud TARGET = "/" @@ -158,7 +160,7 @@ def common_mocks(self, mocker): self.subp = mocker.patch.object( subp, "subp", - return_value=("PPID PID", ""), + return_value=SubpResult("PPID PID", ""), ) lsb = mocker.patch("cloudinit.util.lsb_release") lsb.return_value = {"codename": "fakerel"} @@ -193,7 +195,7 @@ def test_apt_v3_empty_cfg_source_list_by_distro( mock_shouldcfg = mocker.patch.object( cc_apt_configure, "_should_configure_on_empty_apt", - return_value=(True, "test"), + return_value=SubpResult(True, "test"), ) cc_apt_configure.handle("test", {"apt": {}}, mycloud, None) @@ -330,18 +332,3 @@ def test_apt_v3_srcl_custom_deb822_feature_aware( sources_file = tmpdir.join(apt_file) assert expected == sources_file.read() assert 0o644 == stat.S_IMODE(sources_file.stat().mode) - self.subp.assert_called_once_with( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ) diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py index bbffe7d2299..4c6d613f3ee 100644 --- a/tests/unittests/config/test_apt_key.py +++ b/tests/unittests/config/test_apt_key.py @@ -41,85 +41,104 @@ class TestAptKey: @mock.patch.object(subp, "subp", return_value=SubpResult("fakekey", "")) @mock.patch.object(util, "write_file") - def _apt_key_add_success_helper(self, directory, *args, hardened=False): + def _apt_key_add_success_helper( + self, directory, gpg, *args, hardened=False + ): file = cc_apt_configure.apt_key( - "add", output_file="my-key", data="fakekey", hardened=hardened + "add", + gpg=gpg, + output_file="my-key", + data="fakekey", + hardened=hardened, ) assert file == directory + "/my-key.gpg" - def test_apt_key_add_success(self): + def test_apt_key_add_success(self, m_gpg): """Verify the right directory path gets returned for unhardened case""" - self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d") + self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d", m_gpg) - def test_apt_key_add_success_hardened(self): + def test_apt_key_add_success_hardened(self, m_gpg): """Verify the right directory path gets returned for hardened case""" self._apt_key_add_success_helper( - "/etc/apt/cloud-init.gpg.d", hardened=True + "/etc/apt/cloud-init.gpg.d", m_gpg, hardened=True ) - def test_apt_key_add_fail_no_file_name(self): + def test_apt_key_add_fail_no_file_name(self, m_gpg): """Verify that null filename gets handled correctly""" - file = cc_apt_configure.apt_key("add", output_file=None, data="") + file = cc_apt_configure.apt_key( + "add", gpg=m_gpg, output_file=None, data="" + ) assert "/dev/null" == file - def _apt_key_fail_helper(self): + def _apt_key_fail_helper(self, m_gpg): file = cc_apt_configure.apt_key( - "add", output_file="my-key", data="fakekey" + "add", gpg=m_gpg, output_file="my-key", data="fakekey" ) assert file == "/dev/null" - @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) - def test_apt_key_add_fail_no_file_name_subproc(self, *args): + def test_apt_key_add_fail_no_file_name_subproc(self, m_gpg): """Verify that bad key value gets handled correctly""" - self._apt_key_fail_helper() + m_gpg.dearmor = mock.Mock(side_effect=subp.ProcessExecutionError) + self._apt_key_fail_helper(m_gpg) - @mock.patch.object( - subp, "subp", side_effect=UnicodeDecodeError("test", b"", 1, 1, "") - ) - def test_apt_key_add_fail_no_file_name_unicode(self, *args): + def test_apt_key_add_fail_no_file_name_unicode(self, m_gpg): """Verify that bad key encoding gets handled correctly""" - self._apt_key_fail_helper() + m_gpg.dearmor = mock.Mock( + side_effect=UnicodeDecodeError("test", b"", 1, 1, "") + ) + self._apt_key_fail_helper(m_gpg) - def _apt_key_list_success_helper(self, finger, key, human_output=True): + def _apt_key_list_success_helper( + self, finger, key, gpg, human_output=True + ): @mock.patch.object(os, "listdir", return_value=("/fake/dir/key.gpg",)) @mock.patch.object(subp, "subp", return_value=(key, "")) def mocked_list(*a): - keys = cc_apt_configure.apt_key("list", human_output) + keys = cc_apt_configure.apt_key("list", gpg, human_output) assert finger in keys mocked_list() - def test_apt_key_list_success_human(self): + def test_apt_key_list_success_human(self, m_gpg): """Verify expected key output, human""" + m_gpg.list_keys = mock.Mock( + return_value="3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85" + ) self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN + TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN, m_gpg ) - def test_apt_key_list_success_machine(self): + def test_apt_key_list_success_machine(self, m_gpg): """Verify expected key output, machine""" + m_gpg.list_keys = mock.Mock( + return_value="3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" + ) self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_MACHINE, TEST_KEY_MACHINE, human_output=False + TEST_KEY_FINGERPRINT_MACHINE, + TEST_KEY_MACHINE, + m_gpg, + human_output=False, ) - @mock.patch.object(os, "listdir", return_value=()) - @mock.patch.object(subp, "subp", return_value=("", "")) - def test_apt_key_list_fail_no_keys(self, *args): + @mock.patch.object(cc_apt_configure.os, "listdir", return_value=()) + @mock.patch.object(cc_apt_configure.os.path, "isfile", return_value=False) + def test_apt_key_list_fail_no_keys(self, m_listdir, m_gpg): """Ensure falsy output for no keys""" - keys = cc_apt_configure.apt_key("list") + keys = cc_apt_configure.apt_key("list", m_gpg) assert not keys - @mock.patch.object(os, "listdir", return_value="file_not_gpg_key.txt") - @mock.patch.object(subp, "subp", return_value=("", "")) - def test_apt_key_list_fail_no_keys_file(self, *args): + @mock.patch.object(os, "listdir", return_value=["file_not_gpg_key.txt"]) + def test_apt_key_list_fail_no_keys_file(self, m_listdir, m_gpg, *args): """Ensure non-gpg file is not returned. apt-key used file extensions for this, so we do too """ - assert not cc_apt_configure.apt_key("list") + assert "file_not_gpg_key.txt" not in cc_apt_configure.apt_key( + "list", m_gpg + ) - @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) - @mock.patch.object(os, "listdir", return_value="bad_gpg_key.gpg") - def test_apt_key_list_fail_bad_key_file(self, *args): + def test_apt_key_list_fail_bad_key_file(self, m_gpg): """Ensure bad gpg key doesn't throw exeption.""" - assert not cc_apt_configure.apt_key("list") + m_gpg.list_keys = mock.Mock(side_effect=subp.ProcessExecutionError) + assert not cc_apt_configure.apt_key("list", m_gpg) diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py index aa00e245241..5ae83c84f5a 100644 --- a/tests/unittests/config/test_apt_source_v1.py +++ b/tests/unittests/config/test_apt_source_v1.py @@ -8,15 +8,16 @@ import os import pathlib import re -import signal from functools import partial +from textwrap import dedent from unittest import mock from unittest.mock import call import pytest -from cloudinit import gpg, subp, util +from cloudinit import subp, util from cloudinit.config import cc_apt_configure +from cloudinit.subp import SubpResult from tests.unittests.util import get_cloud original_join = os.path.join @@ -76,7 +77,9 @@ def common_mocks(self, mocker): mocker.patch( "cloudinit.util.get_dpkg_architecture", return_value="amd64" ) - mocker.patch.object(subp, "subp", return_value=("PPID PID", "")) + mocker.patch.object( + subp, "subp", return_value=SubpResult("PPID PID", "") + ) mocker.patch("cloudinit.config.cc_apt_configure._ensure_dependencies") def _get_default_params(self): @@ -109,13 +112,15 @@ def myjoin(self, tmpfile, *args, **kwargs): else: return original_join(*args, **kwargs) - def apt_src_basic(self, filename, cfg): + def apt_src_basic(self, filename, cfg, gpg): """apt_src_basic Test Fix deb source string, has to overwrite mirror conf in params """ cfg = self.wrapv1conf(cfg) - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) assert os.path.isfile(filename) @@ -132,7 +137,7 @@ def apt_src_basic(self, filename, cfg): flags=re.IGNORECASE, ) - def test_apt_src_basic(self, apt_lists): + def test_apt_src_basic(self, apt_lists, m_gpg): """Test deb source string, overwrite mirror and filename""" cfg = { "source": ( @@ -142,9 +147,9 @@ def test_apt_src_basic(self, apt_lists): ), "filename": apt_lists[0], } - self.apt_src_basic(apt_lists[0], [cfg]) + self.apt_src_basic(apt_lists[0], [cfg], m_gpg) - def test_apt_src_basic_dict(self, apt_lists): + def test_apt_src_basic_dict(self, apt_lists, m_gpg): """Test deb source string, overwrite mirror and filename (dict)""" cfg = { apt_lists[0]: { @@ -155,15 +160,15 @@ def test_apt_src_basic_dict(self, apt_lists): ) } } - self.apt_src_basic(apt_lists[0], cfg) + self.apt_src_basic(apt_lists[0], cfg, m_gpg) - def apt_src_basic_tri(self, cfg, apt_lists): + def apt_src_basic_tri(self, cfg, apt_lists, m_gpg): """apt_src_basic_tri Test Fix three deb source string, has to overwrite mirror conf in params. Test with filenames provided in config. generic part to check three files with different content """ - self.apt_src_basic(apt_lists[0], cfg) + self.apt_src_basic(apt_lists[0], cfg, m_gpg) # extra verify on two extra files of this test contents = util.load_text_file(apt_lists[1]) @@ -191,7 +196,7 @@ def apt_src_basic_tri(self, cfg, apt_lists): flags=re.IGNORECASE, ) - def test_apt_src_basic_tri(self, apt_lists): + def test_apt_src_basic_tri(self, apt_lists, m_gpg): """Test Fix three deb source string with filenames""" cfg1 = { "source": ( @@ -217,9 +222,9 @@ def test_apt_src_basic_tri(self, apt_lists): ), "filename": apt_lists[2], } - self.apt_src_basic_tri([cfg1, cfg2, cfg3], apt_lists) + self.apt_src_basic_tri([cfg1, cfg2, cfg3], apt_lists, m_gpg) - def test_apt_src_basic_dict_tri(self, apt_lists): + def test_apt_src_basic_dict_tri(self, apt_lists, m_gpg): """Test Fix three deb source string with filenames (dict)""" cfg = { apt_lists[0]: { @@ -244,9 +249,9 @@ def test_apt_src_basic_dict_tri(self, apt_lists): ) }, } - self.apt_src_basic_tri(cfg, apt_lists) + self.apt_src_basic_tri(cfg, apt_lists, m_gpg) - def test_apt_src_basic_nofn(self, fallback_path, tmpdir): + def test_apt_src_basic_nofn(self, fallback_path, tmpdir, m_gpg): """Test Fix three deb source string without filenames (dict)""" cfg = { "source": ( @@ -258,7 +263,7 @@ def test_apt_src_basic_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_basic(fallback_path, [cfg]) + self.apt_src_basic(fallback_path, [cfg], m_gpg) def apt_src_replacement(self, filename, cfg): """apt_src_replace @@ -347,14 +352,19 @@ def test_apt_src_replace_nofn(self, fallback_path, tmpdir): ): self.apt_src_replacement(fallback_path, [cfg]) - def apt_src_keyid(self, filename, cfg, keynum): + def apt_src_keyid(self, filename, cfg, keynum, gpg): """apt_src_keyid Test specification of a source + keyid """ cfg = self.wrapv1conf(cfg) cloud = get_cloud() - with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as this_gpg, mock.patch.object( + cc_apt_configure, "add_apt_key" + ) as mockobj: + this_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, cloud, []) # check if it added the right number of keys @@ -362,7 +372,7 @@ def apt_src_keyid(self, filename, cfg, keynum): sources = cfg["apt"]["sources"] for src in sources: print(sources[src]) - calls.append(call(sources[src], cloud)) + calls.append(call(sources[src], cloud, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -381,7 +391,7 @@ def apt_src_keyid(self, filename, cfg, keynum): flags=re.IGNORECASE, ) - def test_apt_src_keyid(self, apt_lists): + def test_apt_src_keyid(self, apt_lists, m_gpg): """Test specification of a source + keyid with filename being set""" cfg = { "source": ( @@ -393,9 +403,9 @@ def test_apt_src_keyid(self, apt_lists): "keyid": "03683F77", "filename": apt_lists[0], } - self.apt_src_keyid(apt_lists[0], [cfg], 1) + self.apt_src_keyid(apt_lists[0], [cfg], 1, m_gpg) - def test_apt_src_keyid_tri(self, apt_lists): + def test_apt_src_keyid_tri(self, apt_lists, m_gpg): """Test 3x specification of a source + keyid with filename being set""" cfg1 = { "source": ( @@ -428,7 +438,7 @@ def test_apt_src_keyid_tri(self, apt_lists): "filename": apt_lists[2], } - self.apt_src_keyid(apt_lists[0], [cfg1, cfg2, cfg3], 3) + self.apt_src_keyid(apt_lists[0], [cfg1, cfg2, cfg3], 3, m_gpg) contents = util.load_text_file(apt_lists[1]) assert re.search( r"%s %s %s %s\n" @@ -454,7 +464,7 @@ def test_apt_src_keyid_tri(self, apt_lists): flags=re.IGNORECASE, ) - def test_apt_src_keyid_nofn(self, fallback_path, tmpdir): + def test_apt_src_keyid_nofn(self, fallback_path, tmpdir, m_gpg): """Test specification of a source + keyid without filename being set""" cfg = { "source": ( @@ -468,24 +478,28 @@ def test_apt_src_keyid_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_keyid(fallback_path, [cfg], 1) + self.apt_src_keyid(fallback_path, [cfg], 1, m_gpg) - def apt_src_key(self, filename, cfg): + def apt_src_key(self, filename, cfg, gpg): """apt_src_key Test specification of a source + key """ cfg = self.wrapv1conf([cfg]) cloud = get_cloud() - with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as this_gpg, mock.patch.object( + cc_apt_configure, "add_apt_key" + ) as mockobj: + this_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, cloud, []) # check if it added the right amount of keys sources = cfg["apt"]["sources"] calls = [] for src in sources: - print(sources[src]) - calls.append(call(sources[src], cloud)) + calls.append(call(sources[src], cloud, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -504,7 +518,7 @@ def apt_src_key(self, filename, cfg): flags=re.IGNORECASE, ) - def test_apt_src_key(self, apt_lists): + def test_apt_src_key(self, apt_lists, m_gpg): """Test specification of a source + key with filename being set""" cfg = { "source": ( @@ -516,9 +530,9 @@ def test_apt_src_key(self, apt_lists): "key": "fakekey 4321", "filename": apt_lists[0], } - self.apt_src_key(apt_lists[0], cfg) + self.apt_src_key(apt_lists[0], cfg, m_gpg) - def test_apt_src_key_nofn(self, fallback_path, tmpdir): + def test_apt_src_key_nofn(self, fallback_path, tmpdir, m_gpg): """Test specification of a source + key without filename being set""" cfg = { "source": ( @@ -532,18 +546,22 @@ def test_apt_src_key_nofn(self, fallback_path, tmpdir): with mock.patch.object( os.path, "join", side_effect=partial(self.myjoin, tmpdir) ): - self.apt_src_key(fallback_path, cfg) + self.apt_src_key(fallback_path, cfg, m_gpg) - def test_apt_src_keyonly(self, apt_lists): + def test_apt_src_keyonly(self, apt_lists, m_gpg): """Test specifying key without source""" cfg = {"key": "fakekey 4242", "filename": apt_lists[0]} cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + with mock.patch.object( + cc_apt_configure, "GPG" + ) as gpg, mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + gpg.return_value = m_gpg cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = ( call( "add", + m_gpg, output_file=pathlib.Path(apt_lists[0]).stem, data="fakekey 4242", hardened=False, @@ -554,88 +572,93 @@ def test_apt_src_keyonly(self, apt_lists): # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) - def test_apt_src_keyidonly(self, apt_lists): + def test_apt_src_keyidonly(self, apt_lists, m_gpg): """Test specification of a keyid without source""" cfg = {"keyid": "03683F77", "filename": apt_lists[0]} cfg = self.wrapv1conf([cfg]) - SAMPLE_GPG_AGENT_DIRMNGR_PIDS = """\ - PPID PID - 1 1057 - 1 1095 - 1511 2493 - 1511 2509 -""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 1212") + SAMPLE_GPG_AGENT_DIRMNGR_PIDS = dedent( + """\ + PPID PID + 1 1057 + 1 1095 + 1511 2493 + 1511 2509 + """ + ) with mock.patch.object( subp, "subp", side_effect=[ - ("fakekey 1212", ""), - (SAMPLE_GPG_AGENT_DIRMNGR_PIDS, ""), + SubpResult("fakekey 1212", ""), + SubpResult(SAMPLE_GPG_AGENT_DIRMNGR_PIDS, ""), ], - ): - with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: - with mock.patch.object(cc_apt_configure.os, "kill") as m_kill: - cc_apt_configure.handle("test", cfg, get_cloud(), []) + ), mock.patch.object( + cc_apt_configure, "GPG" + ) as gpg, mock.patch.object( + cc_apt_configure, "apt_key" + ) as mockobj: + gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = ( call( "add", + m_gpg, output_file=pathlib.Path(apt_lists[0]).stem, data="fakekey 1212", hardened=False, ), ) mockobj.assert_has_calls(calls, any_order=True) - assert ( - [call(1057, signal.SIGKILL), call(1095, signal.SIGKILL)] - ) == m_kill.call_args_list # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) def apt_src_keyid_real( - self, apt_lists, cfg, expectedkey, is_hardened=None + self, apt_lists, cfg, expectedkey, gpg, is_hardened=None ): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the environment as is) """ - key = cfg["keyid"] - keyserver = cfg.get("keyserver", "keyserver.ubuntu.com") cfg = self.wrapv1conf([cfg]) + gpg.getkeybyid = mock.Mock(return_value=expectedkey) with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey: - with mock.patch.object( - gpg, "getkeybyid", return_value=expectedkey - ) as mockgetkey: + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = gpg cc_apt_configure.handle("test", cfg, get_cloud(), []) if is_hardened is not None: mockkey.assert_called_with( - expectedkey, apt_lists[0], hardened=is_hardened + expectedkey, apt_lists[0], gpg, hardened=is_hardened ) else: - mockkey.assert_called_with(expectedkey, apt_lists[0]) - mockgetkey.assert_called_with(key, keyserver) + mockkey.assert_called_with(expectedkey, apt_lists[0], gpg) # filename should be ignored on key only assert not os.path.isfile(apt_lists[0]) - def test_apt_src_keyid_real(self, apt_lists): + def test_apt_src_keyid_real(self, apt_lists, m_gpg): """test_apt_src_keyid_real - Test keyid including key add""" keyid = "03683F77" cfg = {"keyid": keyid, "filename": apt_lists[0]} - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_longkeyid_real(self, apt_lists): + def test_apt_src_longkeyid_real(self, apt_lists, m_gpg): """test_apt_src_longkeyid_real - Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {"keyid": keyid, "filename": apt_lists[0]} - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_longkeyid_ks_real(self, apt_lists): + def test_apt_src_longkeyid_ks_real(self, apt_lists, m_gpg): """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = { @@ -644,20 +667,25 @@ def test_apt_src_longkeyid_ks_real(self, apt_lists): "filename": apt_lists[0], } - self.apt_src_keyid_real(apt_lists, cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real( + apt_lists, cfg, EXPECTEDKEY, m_gpg, is_hardened=False + ) - def test_apt_src_ppa(self, apt_lists, mocker): + def test_apt_src_ppa(self, apt_lists, mocker, m_gpg): """Test adding a ppa""" m_subp = mocker.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) + mocker.patch("cloudinit.gpg.subp.which", return_value=False) cfg = { "source": "ppa:smoser/cloud-init-test", "filename": apt_lists[0], } cfg = self.wrapv1conf([cfg]) - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) assert m_subp.call_args_list == [ mock.call( [ @@ -666,26 +694,11 @@ def test_apt_src_ppa(self, apt_lists, mocker): "ppa:smoser/cloud-init-test", ], ), - mock.call( - [ - "ps", - "-o", - "ppid,pid", - "-C", - "keyboxd", - "-C", - "dirmngr", - "-C", - "gpg-agent", - ], - capture=True, - rcs=[0, 1], - ), ] # adding ppa should ignore filename (uses add-apt-repository) assert not os.path.isfile(apt_lists[0]) - def test_apt_src_ppa_tri(self, apt_lists): + def test_apt_src_ppa_tri(self, apt_lists, m_gpg): """Test adding three ppa's""" cfg1 = { "source": "ppa:smoser/cloud-init-test", @@ -702,9 +715,11 @@ def test_apt_src_ppa_tri(self, apt_lists): cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) with mock.patch.object( - subp, "subp", return_value=("PPID PID", "") + subp, "subp", return_value=SubpResult("PPID PID", "") ) as mockobj: - cc_apt_configure.handle("test", cfg, get_cloud(), []) + with mock.patch.object(cc_apt_configure, "GPG") as my_gpg: + my_gpg.return_value = m_gpg + cc_apt_configure.handle("test", cfg, get_cloud(), []) calls = [ call( [ @@ -802,7 +817,7 @@ def test_convert_to_new_format_collision(self): with pytest.raises(ValueError, match=match): cc_apt_configure.convert_to_v3_apt_format(cfgconflict) - def test_convert_to_new_format_dict_collision(self, apt_lists): + def test_convert_to_new_format_dict_collision(self, apt_lists, m_gpg): cfg1 = { "source": "deb $MIRROR $RELEASE multiverse", "filename": apt_lists[0], diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py index 5971e2f4a24..a1c75b328ef 100644 --- a/tests/unittests/config/test_apt_source_v3.py +++ b/tests/unittests/config/test_apt_source_v3.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """test_handler_apt_source_v3 Testing various config variations of the apt_source custom config @@ -63,9 +64,9 @@ def setup(self, mocker, tmpdir): self.matcher = re.compile(ADD_APT_REPO_MATCH).search @staticmethod - def _add_apt_sources(*args, **kwargs): + def _add_apt_sources(cfg, cloud, gpg, **kwargs): with mock.patch.object(cc_apt_configure, "update_packages"): - cc_apt_configure.add_apt_sources(*args, **kwargs) + cc_apt_configure.add_apt_sources(cfg, cloud, gpg, **kwargs) @staticmethod def _get_default_params(): @@ -80,7 +81,7 @@ def _get_default_params(): ] return params - def _apt_src_basic(self, filename, cfg, tmpdir): + def _apt_src_basic(self, filename, cfg, tmpdir, gpg): """_apt_src_basic Test Fix deb source string, has to overwrite mirror conf in params """ @@ -89,6 +90,7 @@ def _apt_src_basic(self, filename, cfg, tmpdir): self._add_apt_sources( cfg, cloud=None, + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -110,7 +112,7 @@ def _apt_src_basic(self, filename, cfg, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT config in {filename}: {contents}" - def test_apt_v3_src_basic(self, tmpdir): + def test_apt_v3_src_basic(self, tmpdir, m_gpg): """test_apt_v3_src_basic - Test fix deb source string""" cfg = { self.aptlistfile: { @@ -121,9 +123,9 @@ def test_apt_v3_src_basic(self, tmpdir): ) } } - self._apt_src_basic(self.aptlistfile, cfg, tmpdir) + self._apt_src_basic(self.aptlistfile, cfg, tmpdir, m_gpg) - def test_apt_v3_src_basic_tri(self, tmpdir): + def test_apt_v3_src_basic_tri(self, tmpdir, m_gpg): """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" cfg = { self.aptlistfile: { @@ -148,7 +150,7 @@ def test_apt_v3_src_basic_tri(self, tmpdir): ) }, } - self._apt_src_basic(self.aptlistfile, cfg, tmpdir) + self._apt_src_basic(self.aptlistfile, cfg, tmpdir, m_gpg) # extra verify on two extra files of this test contents = util.load_text_file(self.aptlistfile2) @@ -176,7 +178,7 @@ def test_apt_v3_src_basic_tri(self, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT format of {self.aptlistfile3}: contents" - def _apt_src_replacement(self, filename, cfg, tmpdir): + def _apt_src_replacement(self, filename, cfg, tmpdir, gpg): """apt_src_replace Test Autoreplacement of MIRROR and RELEASE in source specs """ @@ -184,6 +186,7 @@ def _apt_src_replacement(self, filename, cfg, tmpdir): self._add_apt_sources( cfg, cloud=None, + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -198,12 +201,12 @@ def _apt_src_replacement(self, filename, cfg, tmpdir): flags=re.IGNORECASE, ) - def test_apt_v3_src_replace(self, tmpdir): + def test_apt_v3_src_replace(self, tmpdir, m_gpg): """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" cfg = {self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}} - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, m_gpg) - def test_apt_v3_src_replace_fn(self, tmpdir): + def test_apt_v3_src_replace_fn(self, tmpdir, m_gpg): """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" cfg = { "ignored": { @@ -212,14 +215,14 @@ def test_apt_v3_src_replace_fn(self, tmpdir): } } # second file should overwrite the dict key - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, m_gpg) - def _apt_src_replace_tri(self, cfg, tmpdir): + def _apt_src_replace_tri(self, cfg, tmpdir, gpg): """_apt_src_replace_tri Test three autoreplacements of MIRROR and RELEASE in source specs with generic part """ - self._apt_src_replacement(self.aptlistfile, cfg, tmpdir) + self._apt_src_replacement(self.aptlistfile, cfg, tmpdir, gpg) # extra verify on two extra files of this test params = self._get_default_params() @@ -238,7 +241,7 @@ def _apt_src_replace_tri(self, cfg, tmpdir): flags=re.IGNORECASE, ), f"Unexpected APT format {self.aptlistfile3}: {contents}" - def test_apt_v3_src_replace_tri(self, tmpdir): + def test_apt_v3_src_replace_tri(self, tmpdir, m_gpg): """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" cfg = { self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}, @@ -248,9 +251,11 @@ def test_apt_v3_src_replace_tri(self, tmpdir): }, self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"}, } - self._apt_src_replace_tri(cfg, tmpdir) + self._apt_src_replace_tri(cfg, tmpdir, m_gpg) - def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): + def _apt_src_keyid( + self, filename, cfg, keynum, tmpdir, gpg, is_hardened=None + ): """_apt_src_keyid Test specification of a source + keyid """ @@ -260,6 +265,7 @@ def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): self._add_apt_sources( cfg, cloud=None, + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -268,9 +274,9 @@ def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): calls = [] for key in cfg: if is_hardened is not None: - calls.append(call(cfg[key], None, hardened=is_hardened)) + calls.append(call(cfg[key], None, gpg, hardened=is_hardened)) else: - calls.append(call(cfg[key], None)) + calls.append(call(cfg[key], None, gpg)) mockobj.assert_has_calls(calls, any_order=True) @@ -289,7 +295,7 @@ def _apt_src_keyid(self, filename, cfg, keynum, tmpdir, is_hardened=None): flags=re.IGNORECASE, ) - def test_apt_v3_src_keyid(self, tmpdir): + def test_apt_v3_src_keyid(self, tmpdir, m_gpg): """test_apt_v3_src_keyid - Test source + keyid with filename""" cfg = { self.aptlistfile: { @@ -303,9 +309,9 @@ def test_apt_v3_src_keyid(self, tmpdir): "keyid": "03683F77", } } - self._apt_src_keyid(self.aptlistfile, cfg, 1, tmpdir) + self._apt_src_keyid(self.aptlistfile, cfg, 1, tmpdir, m_gpg) - def test_apt_v3_src_keyid_tri(self, tmpdir): + def test_apt_v3_src_keyid_tri(self, tmpdir, m_gpg): """test_apt_v3_src_keyid_tri - Test multiple src+key+file writes""" cfg = { self.aptlistfile: { @@ -339,7 +345,7 @@ def test_apt_v3_src_keyid_tri(self, tmpdir): }, } - self._apt_src_keyid(self.aptlistfile, cfg, 3, tmpdir) + self._apt_src_keyid(self.aptlistfile, cfg, 3, tmpdir, m_gpg) contents = util.load_text_file(self.aptlistfile2) assert re.search( r"%s %s %s %s\n" @@ -365,7 +371,7 @@ def test_apt_v3_src_keyid_tri(self, tmpdir): flags=re.IGNORECASE, ) - def test_apt_v3_src_key(self, mocker): + def test_apt_v3_src_key(self, mocker, m_gpg): """test_apt_v3_src_key - Test source + key""" params = self._get_default_params() cfg = { @@ -384,6 +390,7 @@ def test_apt_v3_src_key(self, mocker): self._add_apt_sources( cfg, cloud=None, + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -391,6 +398,7 @@ def test_apt_v3_src_key(self, mocker): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 4321", hardened=False, @@ -410,8 +418,9 @@ def test_apt_v3_src_key(self, mocker): flags=re.IGNORECASE, ) - def test_apt_v3_src_keyonly(self, mocker): + def test_apt_v3_src_keyonly(self, mocker, m_gpg): """test_apt_v3_src_keyonly - Test key without source""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 4242") params = self._get_default_params() cfg = {self.aptlistfile: {"key": "fakekey 4242"}} @@ -419,6 +428,7 @@ def test_apt_v3_src_keyonly(self, mocker): self._add_apt_sources( cfg, cloud=None, + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -426,6 +436,7 @@ def test_apt_v3_src_keyonly(self, mocker): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 4242", hardened=False, @@ -436,8 +447,9 @@ def test_apt_v3_src_keyonly(self, mocker): # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyidonly(self): + def test_apt_v3_src_keyidonly(self, m_gpg): """test_apt_v3_src_keyidonly - Test keyid without source""" + m_gpg.getkeybyid = mock.Mock(return_value="fakekey 1212") params = self._get_default_params() cfg = {self.aptlistfile: {"keyid": "03683F77"}} with mock.patch.object( @@ -447,6 +459,7 @@ def test_apt_v3_src_keyidonly(self): self._add_apt_sources( cfg, cloud=None, + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -454,6 +467,7 @@ def test_apt_v3_src_keyidonly(self): calls = ( call( "add", + m_gpg, output_file=pathlib.Path(self.aptlistfile).stem, data="fakekey 1212", hardened=False, @@ -466,7 +480,7 @@ def test_apt_v3_src_keyidonly(self): os.path.isfile(self.aptlistfile) is False ), f"Unexpected file {self.aptlistfile} found" - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): + def apt_src_keyid_real(self, cfg, expectedkey, gpg, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -481,6 +495,7 @@ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): self._add_apt_sources( cfg, cloud=None, + gpg=gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -493,27 +508,28 @@ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): mockkey.assert_called_with( expectedkey, keycfg["keyfile"], + gpg, hardened=is_hardened, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_keyid_real(self): + def test_apt_v3_src_keyid_real(self, m_gpg): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg, is_hardened=False) - def test_apt_v3_src_longkeyid_real(self): + def test_apt_v3_src_longkeyid_real(self, m_gpg): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg, is_hardened=False) - def test_apt_v3_src_longkeyid_ks_real(self): + def test_apt_v3_src_longkeyid_ks_real(self, m_gpg): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = { @@ -524,9 +540,9 @@ def test_apt_v3_src_longkeyid_ks_real(self): } } - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, m_gpg) - def test_apt_v3_src_keyid_keyserver(self): + def test_apt_v3_src_keyid_keyserver(self, m_gpg): """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" keyid = "03683F77" params = self._get_default_params() @@ -540,30 +556,27 @@ def test_apt_v3_src_keyid_keyserver(self): # in some test environments only *.ubuntu.com is reachable # so mock the call and check if the config got there - with mock.patch.object( - gpg, "getkeybyid", return_value="fakekey" - ) as mockgetkey: - with mock.patch.object( - cc_apt_configure, "add_apt_key_raw" - ) as mockadd: - self._add_apt_sources( - cfg, - cloud=None, - template_params=params, - aa_repo_match=self.matcher, - ) + with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: + self._add_apt_sources( + cfg, + cloud=None, + gpg=m_gpg, + template_params=params, + aa_repo_match=self.matcher, + ) - mockgetkey.assert_called_with("03683F77", "test.random.com") + m_gpg.getkeybyid.assert_called_with("03683F77", "test.random.com") mockadd.assert_called_with( - "fakekey", + "", self.aptlistfile, + m_gpg, hardened=False, ) # filename should be ignored on key only assert os.path.isfile(self.aptlistfile) is False - def test_apt_v3_src_ppa(self): + def test_apt_v3_src_ppa(self, m_gpg): """test_apt_v3_src_ppa - Test specification of a ppa""" params = self._get_default_params() cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}} @@ -572,6 +585,7 @@ def test_apt_v3_src_ppa(self): self._add_apt_sources( cfg, cloud=None, + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -588,7 +602,7 @@ def test_apt_v3_src_ppa(self): os.path.isfile(self.aptlistfile) is False ), f"Unexpected file found {self.aptlistfile}" - def test_apt_v3_src_ppa_tri(self): + def test_apt_v3_src_ppa_tri(self, m_gpg): """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" params = self._get_default_params() cfg = { @@ -601,6 +615,7 @@ def test_apt_v3_src_ppa_tri(self): self._add_apt_sources( cfg, cloud=None, + gpg=m_gpg, template_params=params, aa_repo_match=self.matcher, ) @@ -1202,7 +1217,7 @@ def test_apt_v3_mirror_search_dns(self, m_get_hostname): assert mirrors["PRIMARY"] == pmir assert mirrors["SECURITY"] == smir - def test_apt_v3_add_mirror_keys(self, tmpdir): + def test_apt_v3_add_mirror_keys(self, tmpdir, m_gpg): """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" arch = "amd64" cfg = { @@ -1225,10 +1240,10 @@ def test_apt_v3_add_mirror_keys(self, tmpdir): } with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: - cc_apt_configure.add_mirror_keys(cfg, None) + cc_apt_configure.add_mirror_keys(cfg, None, gpg) calls = [ - mock.call("fakekey_primary", "primary", hardened=False), - mock.call("fakekey_security", "security", hardened=False), + mock.call("fakekey_primary", "primary", gpg, hardened=False), + mock.call("fakekey_security", "security", gpg, hardened=False), ] mockadd.assert_has_calls(calls, any_order=True) diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py index 685dbd70ff9..271d9d037ec 100644 --- a/tests/unittests/config/test_cc_ansible.py +++ b/tests/unittests/config/test_cc_ansible.py @@ -287,8 +287,8 @@ def test_filter_args(self): ), ) def test_required_keys(self, cfg, exception, mocker): - mocker.patch(M_PATH + "subp", return_value=("", "")) - mocker.patch(M_PATH + "which", return_value=True) + mocker.patch(M_PATH + "subp.subp", return_value=("", "")) + mocker.patch(M_PATH + "subp.which", return_value=True) mocker.patch(M_PATH + "AnsiblePull.check_deps") mocker.patch( M_PATH + "AnsiblePull.get_version", @@ -319,28 +319,30 @@ def test_required_keys(self, cfg, exception, mocker): ["python3-pip"] ) - @mock.patch(M_PATH + "which", return_value=False) + @mock.patch(M_PATH + "subp.which", return_value=False) def test_deps_not_installed(self, m_which): """assert exception raised if package not installed""" with raises(ValueError): cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps() - @mock.patch(M_PATH + "which", return_value=True) + @mock.patch(M_PATH + "subp.which", return_value=True) def test_deps(self, m_which): """assert exception not raised if package installed""" cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps() - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch(M_PATH + "which", return_value=False) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=False) def test_pip_bootstrap(self, m_which, m_subp): distro = get_cloud(mocked_distro=True).distro with mock.patch("builtins.__import__", side_effect=ImportError): cc_ansible.AnsiblePullPip(distro, "ansible").install("") distro.install_packages.assert_called_once() - @mock.patch(M_PATH + "which", return_value=True) - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch("cloudinit.distros.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=True) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch( + "cloudinit.distros.subp.subp", return_value=("stdout", "stderr") + ) @mark.parametrize( ("cfg", "expected"), ( @@ -406,7 +408,8 @@ def test_do_not_run(self, m_validate): assert not m_validate.called @mock.patch( - "cloudinit.config.cc_ansible.subp", side_effect=[(distro_version, "")] + "cloudinit.config.cc_ansible.subp.subp", + side_effect=[(distro_version, "")], ) def test_parse_version_distro(self, m_subp): """Verify that the expected version is returned""" @@ -424,8 +427,8 @@ def test_parse_version_pip(self, m_subp): expected = util.Version(2, 13, 2) assert received == expected - @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr")) - @mock.patch(M_PATH + "which", return_value=True) + @mock.patch(M_PATH + "subp.subp", return_value=("stdout", "stderr")) + @mock.patch(M_PATH + "subp.which", return_value=True) def test_ansible_env_var(self, m_which, m_subp): cc_ansible.handle("", CFG_FULL_PULL, get_cloud(), []) diff --git a/tests/unittests/config/test_cc_apt_configure.py b/tests/unittests/config/test_cc_apt_configure.py index a75acd3cdde..0b0a4e8515a 100644 --- a/tests/unittests/config/test_cc_apt_configure.py +++ b/tests/unittests/config/test_cc_apt_configure.py @@ -298,6 +298,7 @@ class TestAptConfigure: ), ) @mock.patch(M_PATH + "get_apt_cfg") + @mock.patch.object(features, "APT_DEB822_SOURCE_LIST_FILE", True) def test_remove_source( self, m_get_apt_cfg, @@ -312,7 +313,6 @@ def test_remove_source( "sourceparts": f"{tmpdir}/etc/apt/sources.list.d/", } cloud = get_cloud(distro_name) - features.APT_DEB822_SOURCE_LIST_FILE = True sources_file = tmpdir.join("/etc/apt/sources.list") deb822_sources_file = tmpdir.join( f"/etc/apt/sources.list.d/{distro_name}.sources" diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py index 46b2a63523b..7013a95dbe8 100644 --- a/tests/unittests/config/test_cc_ca_certs.py +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import re import shutil import tempfile @@ -379,6 +380,7 @@ def test_non_existent_cert_cfg(self): cc_ca_certs.disable_default_ca_certs(distro_name, conf) +@pytest.mark.usefixtures("clear_deprecation_log") class TestCACertsSchema: """Directly test schema rather than through handle.""" diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py index 3913542d099..5505b8b049e 100644 --- a/tests/unittests/config/test_cc_growpart.py +++ b/tests/unittests/config/test_cc_growpart.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import errno import logging diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py index a77c305eb99..08db05a03fb 100644 --- a/tests/unittests/config/test_cc_package_update_upgrade_install.py +++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py @@ -37,6 +37,14 @@ def common_mocks(mocker): "cloudinit.distros.package_management.apt.Apt._apt_lock_available", return_value=True, ) + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=True, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=True, + ) class TestRebootIfRequired: @@ -275,7 +283,7 @@ def _new_subp(*args, **kwargs): assert caplog.records[-3].levelname == "WARNING" assert ( caplog.records[-3].message - == "Failed to install packages: ['pkg1']" + == "Failure when attempting to install packages: ['pkg1']" ) diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py index ef34a8c6052..d37faedd4c6 100644 --- a/tests/unittests/config/test_cc_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import logging from unittest import mock @@ -508,6 +509,7 @@ def test_chpasswd_parity(self, list_def, users_def): class TestExpire: @pytest.mark.parametrize("cfg", expire_cases) def test_expire(self, cfg, mocker, caplog): + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") @@ -533,7 +535,9 @@ def test_expire(self, cfg, mocker, caplog): def test_expire_old_behavior(self, cfg, mocker, caplog): # Previously expire didn't apply to hashed passwords. # Ensure we can preserve that case on older releases - features.EXPIRE_APPLIES_TO_HASHED_USERS = False + mocker.patch.object(features, "EXPIRE_APPLIES_TO_HASHED_USERS", False) + + cfg = copy.deepcopy(cfg) cloud = get_cloud() mocker.patch(f"{MODPATH}subp.subp") mocker.patch.object(cloud.distro, "chpasswd") diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py index 544e0b67b8a..102519ebf58 100644 --- a/tests/unittests/config/test_cc_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -38,8 +38,10 @@ def publish_hostkey_test_setup(tmpdir): with open(filepath, "w") as f: f.write(" ".join(test_hostkeys[key_type])) - cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key") - yield test_hostkeys, test_hostkey_files + with mock.patch.object( + cc_ssh, "KEY_FILE_TPL", os.path.join(hostkey_tmpdir, "ssh_host_%s_key") + ): + yield test_hostkeys, test_hostkey_files def _replace_options(user: Optional[str] = None) -> str: @@ -255,6 +257,7 @@ def test_handle_default_root( @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") @mock.patch(MODPATH + "util.fips_enabled", return_value=False) + @mock.patch.object(cc_ssh, "PUBLISH_HOST_KEYS", True) def test_handle_publish_hostkeys( self, m_fips, @@ -268,7 +271,6 @@ def test_handle_publish_hostkeys( ): """Test handle with various configs for ssh_publish_hostkeys.""" test_hostkeys, test_hostkey_files = publish_hostkey_test_setup - cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. @@ -282,7 +284,6 @@ def test_handle_publish_hostkeys( m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys}) - cloud.datasource.publish_host_keys = mock.Mock() expected_calls = [] if expected_key_types is not None: @@ -294,10 +295,15 @@ def test_handle_publish_hostkeys( ] ) ] - cc_ssh.handle("name", cfg, cloud, None) - assert ( - expected_calls == cloud.datasource.publish_host_keys.call_args_list - ) + + with mock.patch.object( + cloud.datasource, "publish_host_keys", mock.Mock() + ): + cc_ssh.handle("name", cfg, cloud, None) + assert ( + expected_calls + == cloud.datasource.publish_host_keys.call_args_list + ) @pytest.mark.parametrize( "ssh_keys_group_exists,sshd_version,expected_private_permissions", diff --git a/tests/unittests/config/test_cc_ubuntu_autoinstall.py b/tests/unittests/config/test_cc_ubuntu_autoinstall.py index 4b56669091d..1a492ad0f4c 100644 --- a/tests/unittests/config/test_cc_ubuntu_autoinstall.py +++ b/tests/unittests/config/test_cc_ubuntu_autoinstall.py @@ -66,7 +66,7 @@ def test_runtime_validation_errors(self, src_cfg, error_msg): @mock.patch(MODPATH + "util.wait_for_snap_seeded") -@mock.patch(MODPATH + "subp") +@mock.patch(MODPATH + "subp.subp") class TestHandleAutoinstall: """Test cc_ubuntu_autoinstall handling of config.""" diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py index 3300b77b2b7..0f42b05f31e 100644 --- a/tests/unittests/config/test_cc_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -97,6 +97,7 @@ def test_handle_users_in_cfg_calls_create_users_on_bsd( "lock_passwd": True, "groups": ["wheel"], "shell": "/bin/tcsh", + "homedir": "/home/freebsd", } } metadata = {} @@ -116,6 +117,7 @@ def test_handle_users_in_cfg_calls_create_users_on_bsd( groups="wheel", lock_passwd=True, shell="/bin/tcsh", + homedir="/home/freebsd", ), mock.call("me2", uid=1234, default=False), ], diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index c99f5d860ec..20dc5cc594e 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -1,15 +1,33 @@ import builtins import glob import os +import shutil from pathlib import Path from unittest import mock import pytest from cloudinit import atomic_helper, log, util +from cloudinit.gpg import GPG from tests.hypothesis import HAS_HYPOTHESIS from tests.unittests.helpers import retarget_many_wrapper + +@pytest.fixture +def m_gpg(): + MockGPG = mock.Mock(spec=GPG) + MockGPG.configure_mock(**{"getkeybyid.return_value": "fakekey"}) + gpg = MockGPG() + gpg.list_keys = mock.Mock(return_value="") + gpg.getkeybyid = mock.Mock(return_value="") + + # to make tests for cc_apt_configure behave, we need the mocked GPG + # to actually behave like a context manager + gpg.__enter__ = GPG.__enter__ + gpg.__exit__ = GPG.__exit__ + yield gpg + + FS_FUNCS = { os.path: [ ("isfile", 1), @@ -22,6 +40,7 @@ os: [ ("listdir", 1), ("mkdir", 1), + ("rmdir", 1), ("lstat", 1), ("symlink", 2), ("stat", 1), @@ -49,12 +68,21 @@ ("write_file", 1), ("write_json", 1), ], + shutil: [ + ("rmtree", 1), + ], } @pytest.fixture def fake_filesystem(mocker, tmpdir): """Mocks fs functions to operate under `tmpdir`""" + # This allows fake_filesystem to be used with production code that + # creates temporary directories. Functions like TemporaryDirectory() + # attempt to create a directory under "/tmp" assuming that it already + # exists, but then it fails because of the retargeting that happens here. + tmpdir.mkdir("tmp") + for (mod, funcs) in FS_FUNCS.items(): for f, nargs in funcs: func = getattr(mod, f) @@ -63,6 +91,16 @@ def fake_filesystem(mocker, tmpdir): yield str(tmpdir) +@pytest.fixture(scope="session", autouse=True) +def disable_sysfs_net(tmpdir_factory): + """Avoid tests which read the underlying host's /syc/class/net.""" + mock_sysfs = f"{tmpdir_factory.mktemp('sysfs')}/" + with mock.patch( + "cloudinit.net.get_sys_class_path", return_value=mock_sysfs + ): + yield mock_sysfs + + @pytest.fixture(autouse=True) def disable_dns_lookup(request): if "allow_dns_lookup" in request.keywords: @@ -91,12 +129,21 @@ def dhclient_exists(): log.configure_root_logger() -@pytest.fixture(autouse=True) -def disable_root_logger_setup(request): - with mock.patch("cloudinit.cmd.main.configure_root_logger", autospec=True): +@pytest.fixture(autouse=True, scope="session") +def disable_root_logger_setup(): + with mock.patch("cloudinit.log.configure_root_logger", autospec=True): yield +@pytest.fixture +def clear_deprecation_log(): + """Clear any deprecation warnings before and after running tests.""" + # Since deprecations are de-duped, the existance (or non-existance) of + # a deprecation warning in a previous test can cause the next test to + # fail. + util.deprecate._log = set() + + PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) if PYTEST_VERSION_TUPLE < (3, 9, 0): diff --git a/tests/unittests/distros/package_management/test_apt.py b/tests/unittests/distros/package_management/test_apt.py index d261e757ae6..8b5c707aa15 100644 --- a/tests/unittests/distros/package_management/test_apt.py +++ b/tests/unittests/distros/package_management/test_apt.py @@ -7,6 +7,8 @@ from cloudinit import subp from cloudinit.distros.package_management.apt import APT_GET_COMMAND, Apt +M_PATH = "cloudinit.distros.package_management.apt.Apt." + @mock.patch.dict("os.environ", {}, clear=True) @mock.patch("cloudinit.distros.debian.subp.which", return_value=True) @@ -86,3 +88,21 @@ def test_lock_exception_timeout( ) with pytest.raises(TimeoutError): apt._wait_for_apt_command("stub", {"args": "stub2"}, timeout=5) + + def test_search_stem(self, m_subp, m_which, mocker): + """Test that containing `-`, `/`, or `=` is handled correctly.""" + mocker.patch(f"{M_PATH}update_package_sources") + mocker.patch( + f"{M_PATH}get_all_packages", + return_value=["cloud-init", "pkg2", "pkg3", "pkg4"], + ) + m_install = mocker.patch(f"{M_PATH}run_package_command") + + apt = Apt(runner=mock.Mock()) + apt.install_packages( + ["cloud-init", "pkg2-", "pkg3/jammy-updates", "pkg4=1.2"] + ) + m_install.assert_called_with( + "install", + pkgs=["cloud-init", "pkg2-", "pkg3/jammy-updates", "pkg4=1.2"], + ) diff --git a/tests/unittests/distros/test_azurelinux.py b/tests/unittests/distros/test_azurelinux.py new file mode 100644 index 00000000000..03c895bc2a2 --- /dev/null +++ b/tests/unittests/distros/test_azurelinux.py @@ -0,0 +1,25 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase + +from . import _get_distro + +SYSTEM_INFO = { + "paths": { + "cloud_dir": "/var/lib/cloud/", + "templates_dir": "/etc/cloud/templates/", + }, + "network": {"renderers": "networkd"}, +} + + +class TestAzurelinux(CiTestCase): + with_logs = True + distro = _get_distro("azurelinux", SYSTEM_INFO) + expected_log_line = "Rely on Azure Linux default network config" + + def test_network_renderer(self): + self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd") + + def test_get_distro(self): + self.assertEqual(self.distro.osfamily, "azurelinux") diff --git a/tests/unittests/distros/test_init.py b/tests/unittests/distros/test_init.py index f7994f1ca7c..986ccafcbca 100644 --- a/tests/unittests/distros/test_init.py +++ b/tests/unittests/distros/test_init.py @@ -256,6 +256,17 @@ def test_valid_substitution( class TestInstall: """Tests for cloudinit.distros.Distro.install_packages.""" + @pytest.fixture(autouse=True) + def ensure_available(self, mocker): + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=True, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=True, + ) + @pytest.fixture def m_apt_install(self, mocker): return mocker.patch( @@ -318,7 +329,7 @@ def test_non_default_package_manager_fail( ) with pytest.raises( PackageInstallerError, - match="Failed to install the following packages: \\['pkg3'\\]", + match="Failed to install the following packages: {'pkg3'}", ): _get_distro("debian").install_packages( [{"apt": ["pkg1"]}, "pkg2", {"snap": ["pkg3"]}] @@ -339,3 +350,120 @@ def test_default_and_specific_package_manager( assert "pkg3" not in apt_install_args m_snap_install.assert_not_called() + + def test_specific_package_manager_fail_doesnt_retry( + self, mocker, m_snap_install + ): + """Test fail from package manager doesn't retry as generic.""" + m_apt_install = mocker.patch( + "cloudinit.distros.package_management.apt.Apt.install_packages", + return_value=["pkg1"], + ) + with pytest.raises(PackageInstallerError): + _get_distro("ubuntu").install_packages([{"apt": ["pkg1"]}]) + apt_install_args = m_apt_install.call_args_list[0][0][0] + assert "pkg1" in apt_install_args + m_snap_install.assert_not_called() + + def test_no_attempt_if_no_package_manager( + self, mocker, m_apt_install, m_snap_install, caplog + ): + """Test that no attempt is made if there are no package manager.""" + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=False, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.available", + return_value=False, + ) + with pytest.raises(PackageInstallerError): + _get_distro("ubuntu").install_packages( + ["pkg1", "pkg2", {"other": "pkg3"}] + ) + m_apt_install.assert_not_called() + m_snap_install.assert_not_called() + + assert "Package manager 'apt' not available" in caplog.text + assert "Package manager 'snap' not available" in caplog.text + + @pytest.mark.parametrize( + "distro,pkg_list,apt_available,apt_failed,snap_failed,total_failed", + [ + pytest.param( + "debian", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + False, + [], + ["pkg1", "pkg3"], + ["pkg1", "pkg2", "pkg3"], + id="debian_no_apt", + ), + pytest.param( + "debian", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + True, + ["pkg2"], + ["pkg3"], + ["pkg2", "pkg3"], + id="debian_with_apt", + ), + pytest.param( + "ubuntu", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + False, + [], + [], + ["pkg2"], + id="ubuntu_no_apt", + ), + pytest.param( + "ubuntu", + ["pkg1", {"apt": ["pkg2"], "snap": ["pkg3"]}], + True, + ["pkg1"], + ["pkg3"], + ["pkg3"], + id="ubuntu_with_apt", + ), + ], + ) + def test_failed( + self, + distro, + pkg_list, + apt_available, + apt_failed, + snap_failed, + total_failed, + mocker, + m_apt_install, + m_snap_install, + ): + """Test that failed packages are properly tracked. + + We need to ensure that the failed packages are properly tracked: + 1. When package install fails normally + 2. When package manager is not available + 3. When package manager is not explicitly supported by the distro + + So test various combinations of these scenarios. + """ + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.available", + return_value=apt_available, + ) + mocker.patch( + "cloudinit.distros.package_management.apt.Apt.install_packages", + return_value=apt_failed, + ) + mocker.patch( + "cloudinit.distros.package_management.snap.Snap.install_packages", + return_value=snap_failed, + ) + with pytest.raises(PackageInstallerError) as exc: + _get_distro(distro).install_packages(pkg_list) + message = exc.value.args[0] + assert "Failed to install the following packages" in message + for pkg in total_failed: + assert pkg in message diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index 48690d7128d..b8a22a89aff 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -303,7 +303,7 @@ def setUp(self): def _get_distro(self, dname, renderers=None, activators=None): cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN + cfg = copy.deepcopy(settings.CFG_BUILTIN) cfg["system_info"]["distro"] = dname system_info_network_cfg = {} if renderers: @@ -1293,6 +1293,131 @@ def test_mariner_network_config_v1_with_duplicates(self): ) +class TestNetCfgDistroAzureLinux(TestNetCfgDistroBase): + def setUp(self): + super().setUp() + self.distro = self._get_distro("azurelinux", renderers=["networkd"]) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r"^\[(.+)\]$", line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): + if not expected_cfgs: + raise ValueError("expected_cfg must not be None") + + tmpd = None + with mock.patch("cloudinit.net.networkd.available") as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return "/etc/systemd/network/10-cloud-init-%s.network" % ifname + + def net_cfg_1(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" + % ifname + ) + return ret + + def net_cfg_2(self, ifname): + ret = ( + """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" + % ifname + ) + return ret + + def test_azurelinux_network_config_v1(self): + tmp = self.net_cfg_1("eth0").splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth1").splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth0"): expected_eth0, + self.nwk_file_path("eth1"): expected_eth1, + } + + self._apply_and_verify( + self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v2(self): + tmp = self.net_cfg_1("eth7").splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2("eth9").splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path("eth7"): expected_eth7, + self.nwk_file_path("eth9"): expected_eth9, + } + + self._apply_and_verify( + self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy() + ) + + def test_azurelinux_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path("eth0"): expected, + } + + self._apply_and_verify( + self.distro.apply_network_config, net_cfg, expected_cfgs.copy() + ) + + def get_mode(path, target=None): # Mask upper st_mode bits like S_IFREG bit preserve sticky and isuid/osgid return os.stat(subp.target_path(target, path)).st_mode & 0o777 diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index bed234dfbfa..70ca9dff912 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import functools import io @@ -161,6 +162,7 @@ def setUp(self): self.old_handlers = self.logger.handlers self.logger.handlers = [handler] self.old_level = logging.root.level + self.logger.level = logging.DEBUG if self.allowed_subp is True: subp.subp = _real_subp else: diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index c6a3518445a..c38ee676d97 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -3,6 +3,7 @@ import os import signal import socket +import subprocess from textwrap import dedent import pytest @@ -374,15 +375,13 @@ def test_redhat_format_with_a_space_too_much_after_comma(self): ) -class TestDHCPDiscoveryClean(CiTestCase): - with_logs = True - +class TestDHCPDiscoveryClean: @mock.patch("cloudinit.distros.net.find_fallback_nic", return_value="eth9") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which") def test_dhcpcd_exits_with_error( - self, m_which, m_subp, m_remove, m_fallback + self, m_which, m_subp, m_remove, m_fallback, caplog ): """Log and do nothing when nic is absent and no fallback is found.""" m_subp.side_effect = [ @@ -393,16 +392,15 @@ def test_dhcpcd_exits_with_error( with pytest.raises(NoDHCPLeaseError): maybe_perform_dhcp_discovery(Distro("fake but not", {}, None)) - self.assertIn( - "DHCP client selected: dhcpcd", - self.logs.getvalue(), - ) + assert "DHCP client selected: dhcpcd" in caplog.text @mock.patch("cloudinit.distros.net.find_fallback_nic", return_value="eth9") @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("cloudinit.net.dhcp.subp.subp") @mock.patch("cloudinit.net.dhcp.subp.which") - def test_dhcp_client_failover(self, m_which, m_subp, m_remove, m_fallback): + def test_dhcp_client_failover( + self, m_which, m_subp, m_remove, m_fallback, caplog + ): """Log and do nothing when nic is absent and no fallback client is found.""" m_subp.side_effect = [ @@ -414,40 +412,22 @@ def test_dhcp_client_failover(self, m_which, m_subp, m_remove, m_fallback): with pytest.raises(NoDHCPLeaseError): maybe_perform_dhcp_discovery(Distro("somename", {}, None)) - self.assertIn( - "DHCP client not found: dhclient", - self.logs.getvalue(), - ) - self.assertIn( - "DHCP client not found: dhcpcd", - self.logs.getvalue(), - ) - self.assertIn( - "DHCP client not found: udhcpc", - self.logs.getvalue(), - ) + assert "DHCP client not found: dhclient" in caplog.text + assert "DHCP client not found: dhcpcd" in caplog.text + assert "DHCP client not found: udhcpc" in caplog.text @mock.patch("cloudinit.net.dhcp.subp.which") @mock.patch("cloudinit.distros.net.find_fallback_nic") - def test_absent_dhclient_command(self, m_fallback, m_which): + def test_absent_dhclient_command(self, m_fallback, m_which, caplog): """When dhclient doesn't exist in the OS, log the issue and no-op.""" m_fallback.return_value = "eth9" m_which.return_value = None # dhclient isn't found with pytest.raises(NoDHCPLeaseMissingDhclientError): maybe_perform_dhcp_discovery(Distro("whoa", {}, None)) - self.assertIn( - "DHCP client not found: dhclient", - self.logs.getvalue(), - ) - self.assertIn( - "DHCP client not found: dhcpcd", - self.logs.getvalue(), - ) - self.assertIn( - "DHCP client not found: udhcpc", - self.logs.getvalue(), - ) + assert "DHCP client not found: dhclient" in caplog.text + assert "DHCP client not found: dhcpcd" in caplog.text + assert "DHCP client not found: udhcpc" in caplog.text @mock.patch("cloudinit.net.dhcp.os.remove") @mock.patch("time.sleep", mock.MagicMock()) @@ -456,7 +436,7 @@ def test_absent_dhclient_command(self, m_fallback, m_which): @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.net.dhcp.util.wait_for_files", return_value=False) def test_dhcp_discovery_warns_invalid_pid( - self, m_wait, m_which, m_subp, m_kill, m_remove + self, m_wait, m_which, m_subp, m_kill, m_remove, caplog ): """dhcp_discovery logs a warning when pidfile contains invalid content. @@ -478,22 +458,18 @@ def test_dhcp_discovery_warns_invalid_pid( with mock.patch( "cloudinit.util.load_text_file", return_value=lease_content ): - self.assertCountEqual( - { - "interface": "eth9", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - }, - IscDhclient().get_newest_lease("eth0"), - ) - with self.assertRaises(InvalidDHCPLeaseFileError): + assert { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().get_newest_lease("eth0") + with pytest.raises(InvalidDHCPLeaseFileError): with mock.patch("cloudinit.util.load_text_file", return_value=""): IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) - self.assertIn( - "dhclient(pid=, parentpid=unknown) failed " - "to daemonize after 10.0 seconds", - self.logs.getvalue(), + assert ( + "dhclient(pid=, parentpid=unknown) failed to daemonize after" + " 10.0 seconds" in caplog.text ) m_kill.assert_not_called() @@ -503,23 +479,20 @@ def test_dhcp_discovery_warns_invalid_pid( @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_discovery_waits_on_lease_and_pid( - self, m_subp, m_which, m_wait, m_kill, m_remove + self, m_subp, m_which, m_wait, m_kill, m_remove, caplog ): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" m_subp.return_value = ("", "") # Don't create pid or leases file m_wait.return_value = [PID_F] # Return the missing pidfile wait for - self.assertEqual( - {}, IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) + assert {} == IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) + m_wait.assert_called_once_with( + [PID_F, LEASE_F], maxwait=5, naplen=0.01 ) - self.assertEqual( - mock.call([PID_F, LEASE_F], maxwait=5, naplen=0.01), - m_wait.call_args_list[0], - ) - self.assertIn( - "WARNING: dhclient did not produce expected files: dhclient.pid", - self.logs.getvalue(), + assert ( + "dhclient did not produce expected files: dhclient.pid" + in caplog.text ) m_kill.assert_not_called() @@ -557,15 +530,12 @@ def test_dhcp_discovery( with mock.patch( "cloudinit.util.load_text_file", side_effect=["1", lease_content] ): - self.assertCountEqual( - { - "interface": "eth9", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - }, - IscDhclient().dhcp_discovery("eth9", distro=MockDistro()), - ) + assert { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().dhcp_discovery("eth9", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -633,15 +603,12 @@ def test_dhcp_discovery_ib( with mock.patch( "cloudinit.util.load_text_file", side_effect=["1", lease_content] ): - self.assertCountEqual( - { - "interface": "ib0", - "fixed-address": "192.168.2.74", - "subnet-mask": "255.255.255.0", - "routers": "192.168.2.1", - }, - IscDhclient().dhcp_discovery("ib0", distro=MockDistro()), - ) + assert { + "interface": "ib0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } == IscDhclient().dhcp_discovery("ib0", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -682,14 +649,13 @@ def test_dhcp_discovery_ib( @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhclient") @mock.patch("cloudinit.util.wait_for_files") def test_dhcp_output_error_stream( - self, m_wait, m_which, m_subp, m_kill, m_remove + self, m_wait, m_which, m_subp, m_kill, m_remove, tmpdir ): """ "dhcp_log_func is called with the output and error streams of dhclient when the callable is passed.""" dhclient_err = "FAKE DHCLIENT ERROR" dhclient_out = "FAKE DHCLIENT OUT" m_subp.return_value = (dhclient_out, dhclient_err) - tmpdir = self.tmp_dir() lease_content = dedent( """ lease { @@ -707,8 +673,8 @@ def test_dhcp_output_error_stream( write_file(pid_file, "%d\n" % my_pid) def dhcp_log_func(out, err): - self.assertEqual(out, dhclient_out) - self.assertEqual(err, dhclient_err) + assert out == dhclient_out + assert err == dhclient_err IscDhclient().dhcp_discovery( "eth9", dhcp_log_func=dhcp_log_func, distro=MockDistro() @@ -1167,9 +1133,11 @@ def test_get_newest_lease_file_from_distro_debian(self, *_): # otherwise mock a reply with leasefile @mock.patch( "os.listdir", - side_effect=lambda x: [] - if x == "/var/lib/NetworkManager" - else ["some_file", "!@#$-eth0.lease", "some_other_file"], + side_effect=lambda x: ( + [] + if x == "/var/lib/NetworkManager" + else ["some_file", "!@#$-eth0.lease", "some_other_file"] + ), ) @mock.patch("os.path.getmtime", return_value=123.45) def test_fallback_when_nothing_found(self, *_): @@ -1319,6 +1287,51 @@ def test_dhcpcd_discovery_ib( ] ) + @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/dhcpcd") + @mock.patch("cloudinit.net.dhcp.os.killpg") + @mock.patch("cloudinit.net.dhcp.subp.subp") + @mock.patch("cloudinit.util.load_json") + @mock.patch("cloudinit.util.load_binary_file") + @mock.patch("cloudinit.util.write_file") + def test_dhcpcd_discovery_timeout( + self, + m_write_file, + m_load_file, + m_loadjson, + m_subp, + m_remove, + m_which, + ): + """Verify dhcpcd timeout results in NoDHCPLeaseError exception.""" + m_subp.side_effect = [ + SubpResult("a=b", ""), + subprocess.TimeoutExpired( + "/sbin/dhcpcd", timeout=6, output="testout", stderr="testerr" + ), + ] + with pytest.raises(NoDHCPLeaseError): + Dhcpcd().dhcp_discovery("eth0", distro=MockDistro()) + + m_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "dev", "eth0", "up"], + ), + mock.call( + [ + "/sbin/dhcpcd", + "--ipv4only", + "--waitip", + "--persistent", + "--noarp", + "--script=/bin/true", + "eth0", + ], + timeout=Dhcpcd.timeout, + ), + ] + ) + class TestMaybePerformDhcpDiscovery: def test_none_and_missing_fallback(self): diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 60a44186efa..6131b1a16ca 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import errno @@ -17,49 +18,52 @@ from cloudinit.net.ephemeral import EphemeralIPv4Network, EphemeralIPv6Network from cloudinit.subp import ProcessExecutionError from cloudinit.util import ensure_file, write_file -from tests.unittests.helpers import CiTestCase, ResponsesTestCase +from tests.unittests.helpers import ( + CiTestCase, + ResponsesTestCase, + random_string, +) from tests.unittests.util import MockDistro -class TestSysDevPath(CiTestCase): +class TestSysDevPath: def test_sys_dev_path(self): """sys_dev_path returns a path under SYS_CLASS_NET for a device.""" dev = "something" path = "attribute" - expected = net.SYS_CLASS_NET + dev + "/" + path - self.assertEqual(expected, net.sys_dev_path(dev, path)) + expected = net.get_sys_class_path() + dev + "/" + path + assert expected == net.sys_dev_path(dev, path) def test_sys_dev_path_without_path(self): """When path param isn't provided it defaults to empty string.""" dev = "something" - expected = net.SYS_CLASS_NET + dev + "/" - self.assertEqual(expected, net.sys_dev_path(dev)) - + expected = net.get_sys_class_path() + dev + "/" + assert expected == net.sys_dev_path(dev) -class TestReadSysNet(CiTestCase): - with_logs = True - def setUp(self): - super(TestReadSysNet, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestReadSysNet: + @pytest.fixture(autouse=True) + def setup(self, tmpdir_factory): + # We mock invididual numbered tmpdirs here because these tests write + # to the sysfs directory and stale test artifacts break later tests. + mock_sysfs = f"{tmpdir_factory.mktemp('sysfs', numbered=True)}/" + with mock.patch( + "cloudinit.net.get_sys_class_path", return_value=mock_sysfs + ): + self.sysdir = mock_sysfs + yield def test_read_sys_net_strips_contents_of_sys_path(self): """read_sys_net strips whitespace from the contents of a sys file.""" content = "some stuff with trailing whitespace\t\r\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - self.assertEqual(content.strip(), net.read_sys_net("dev", "attr")) + assert content.strip() == net.read_sys_net("dev", "attr") def test_read_sys_net_reraises_oserror(self): """read_sys_net raises OSError/IOError when file doesn't exist.""" # Non-specific Exception because versions of python OSError vs IOError. - with self.assertRaises(Exception) as context_manager: # noqa: H202 + with pytest.raises(Exception, match="No such file or directory"): net.read_sys_net("dev", "attr") - error = context_manager.exception - self.assertIn("No such file or directory", str(error)) def test_read_sys_net_handles_error_with_on_enoent(self): """read_sys_net handles OSError/IOError with on_enoent if provided.""" @@ -70,30 +74,27 @@ def on_enoent(e): net.read_sys_net("dev", "attr", on_enoent=on_enoent) error = handled_errors[0] - self.assertIsInstance(error, Exception) - self.assertIn("No such file or directory", str(error)) + assert isinstance(error, Exception) + assert "No such file or directory" in str(error) def test_read_sys_net_translates_content(self): """read_sys_net translates content when translate dict is provided.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) translate = {"you're welcome": "de nada"} - self.assertEqual( - "de nada", net.read_sys_net("dev", "attr", translate=translate) + assert "de nada" == net.read_sys_net( + "dev", "attr", translate=translate ) - def test_read_sys_net_errors_on_translation_failures(self): + def test_read_sys_net_errors_on_translation_failures(self, caplog): """read_sys_net raises a KeyError and logs details on failure.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - with self.assertRaises(KeyError) as context_manager: + with pytest.raises(KeyError, match='"you\'re welcome"'): net.read_sys_net("dev", "attr", translate={}) - error = context_manager.exception - self.assertEqual('"you\'re welcome"', str(error)) - self.assertIn( + assert ( "Found unexpected (not translatable) value 'you're welcome' in " - "'{0}dev/attr".format(self.sysdir), - self.logs.getvalue(), + "'{0}dev/attr".format(self.sysdir) in caplog.text ) def test_read_sys_net_handles_handles_with_onkeyerror(self): @@ -107,63 +108,63 @@ def on_keyerror(e): net.read_sys_net("dev", "attr", translate={}, on_keyerror=on_keyerror) error = handled_errors[0] - self.assertIsInstance(error, KeyError) - self.assertEqual('"you\'re welcome"', str(error)) + assert isinstance(error, KeyError) + assert '"you\'re welcome"' == str(error) def test_read_sys_net_safe_false_on_translate_failure(self): """read_sys_net_safe returns False on translation failures.""" content = "you're welcome\n" write_file(os.path.join(self.sysdir, "dev", "attr"), content) - self.assertFalse(net.read_sys_net_safe("dev", "attr", translate={})) + assert not net.read_sys_net_safe("dev", "attr", translate={}) def test_read_sys_net_safe_returns_false_on_noent_failure(self): """read_sys_net_safe returns False on file not found failures.""" - self.assertFalse(net.read_sys_net_safe("dev", "attr")) + assert not net.read_sys_net_safe("dev", "attr") def test_read_sys_net_int_returns_none_on_error(self): """read_sys_net_safe returns None on failures.""" - self.assertFalse(net.read_sys_net_int("dev", "attr")) + assert not net.read_sys_net_int("dev", "attr") def test_read_sys_net_int_returns_none_on_valueerror(self): """read_sys_net_safe returns None when content is not an int.""" write_file(os.path.join(self.sysdir, "dev", "attr"), "NOTINT\n") - self.assertFalse(net.read_sys_net_int("dev", "attr")) + assert not net.read_sys_net_int("dev", "attr") def test_read_sys_net_int_returns_integer_from_content(self): """read_sys_net_safe returns None on failures.""" write_file(os.path.join(self.sysdir, "dev", "attr"), "1\n") - self.assertEqual(1, net.read_sys_net_int("dev", "attr")) + assert 1 == net.read_sys_net_int("dev", "attr") def test_is_up_true(self): """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'.""" for state in ["up", "unknown"]: write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) - self.assertTrue(net.is_up("eth0")) + assert net.is_up("eth0") def test_is_up_false(self): """is_up is False if sys/net/devname/operstate is 'down' or invalid.""" for state in ["down", "incomprehensible"]: write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) - self.assertFalse(net.is_up("eth0")) + assert not net.is_up("eth0") def test_is_bridge(self): """is_bridge is True when /sys/net/devname/bridge exists.""" - self.assertFalse(net.is_bridge("eth0")) + assert not net.is_bridge("eth0") ensure_file(os.path.join(self.sysdir, "eth0", "bridge")) - self.assertTrue(net.is_bridge("eth0")) + assert net.is_bridge("eth0") def test_is_bond(self): """is_bond is True when /sys/net/devname/bonding exists.""" - self.assertFalse(net.is_bond("eth0")) + assert not net.is_bond("eth0") ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) - self.assertTrue(net.is_bond("eth0")) + assert net.is_bond("eth0") def test_get_master(self): """get_master returns the path when /sys/net/devname/master exists.""" - self.assertIsNone(net.get_master("enP1s1")) + assert net.get_master("enP1s1") is None master_path = os.path.join(self.sysdir, "enP1s1", "master") ensure_file(master_path) - self.assertEqual(master_path, net.get_master("enP1s1")) + assert master_path == net.get_master("enP1s1") def test_master_is_bridge_or_bond(self): bridge_mac = "aa:bb:cc:aa:bb:cc" @@ -173,8 +174,8 @@ def test_master_is_bridge_or_bond(self): write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac) write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) - self.assertFalse(net.master_is_bridge_or_bond("eth2")) + assert not net.master_is_bridge_or_bond("eth1") + assert not net.master_is_bridge_or_bond("eth2") # masters without bridge/bonding => False write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac) @@ -183,15 +184,15 @@ def test_master_is_bridge_or_bond(self): os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master")) os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master")) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) - self.assertFalse(net.master_is_bridge_or_bond("eth2")) + assert not net.master_is_bridge_or_bond("eth1") + assert not net.master_is_bridge_or_bond("eth2") # masters with bridge/bonding => True write_file(os.path.join(self.sysdir, "br0", "bridge"), "") write_file(os.path.join(self.sysdir, "bond0", "bonding"), "") - self.assertTrue(net.master_is_bridge_or_bond("eth1")) - self.assertTrue(net.master_is_bridge_or_bond("eth2")) + assert net.master_is_bridge_or_bond("eth1") + assert net.master_is_bridge_or_bond("eth2") def test_master_is_openvswitch(self): ovs_mac = "bb:cc:aa:bb:cc:aa" @@ -199,7 +200,7 @@ def test_master_is_openvswitch(self): # No master => False write_file(os.path.join(self.sysdir, "eth1", "address"), ovs_mac) - self.assertFalse(net.master_is_bridge_or_bond("eth1")) + assert not net.master_is_bridge_or_bond("eth1") # masters without ovs-system => False write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac) @@ -208,7 +209,7 @@ def test_master_is_openvswitch(self): "../ovs-system", os.path.join(self.sysdir, "eth1", "master") ) - self.assertFalse(net.master_is_openvswitch("eth1")) + assert not net.master_is_openvswitch("eth1") # masters with ovs-system => True os.symlink( @@ -216,15 +217,15 @@ def test_master_is_openvswitch(self): os.path.join(self.sysdir, "eth1", "upper_ovs-system"), ) - self.assertTrue(net.master_is_openvswitch("eth1")) + assert net.master_is_openvswitch("eth1") def test_is_vlan(self): """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" ensure_file(os.path.join(self.sysdir, "eth0", "uevent")) - self.assertFalse(net.is_vlan("eth0")) + assert not net.is_vlan("eth0") content = "junk\nDEVTYPE=vlan\njunk\n" write_file(os.path.join(self.sysdir, "eth0", "uevent"), content) - self.assertTrue(net.is_vlan("eth0")) + assert net.is_vlan("eth0") class TestGenerateFallbackConfig(CiTestCase): @@ -1457,134 +1458,121 @@ def test_runtime_error_on_unknown_netcfg_version(self): net.extract_physdevs({"version": 3, "awesome_config": []}) -class TestNetFailOver(CiTestCase): - def setUp(self): - super(TestNetFailOver, self).setUp() - self.add_patch("cloudinit.net.util", "m_util") - self.add_patch("cloudinit.net.read_sys_net", "m_read_sys_net") - self.add_patch("cloudinit.net.device_driver", "m_device_driver") +class TestNetFailOver: + @pytest.fixture(autouse=True) + def setup(self, mocker): + mocker.patch("cloudinit.net.util") + self.device_driver = mocker.patch("cloudinit.net.device_driver") + self.read_sys_net = mocker.patch("cloudinit.net.read_sys_net") def test_get_dev_features(self): - devname = self.random_string() - features = self.random_string() - self.m_read_sys_net.return_value = features + devname = random_string() + features = random_string() + self.read_sys_net.return_value = features - self.assertEqual(features, net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual( - mock.call(devname, "device/features"), - self.m_read_sys_net.call_args_list[0], - ) + assert features == net.get_dev_features(devname) + assert 1 == self.read_sys_net.call_count + self.read_sys_net.assert_called_once_with(devname, "device/features") def test_get_dev_features_none_returns_empty_string(self): - devname = self.random_string() - self.m_read_sys_net.side_effect = Exception("error") - self.assertEqual("", net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual( - mock.call(devname, "device/features"), - self.m_read_sys_net.call_args_list[0], - ) + devname = random_string() + self.read_sys_net.side_effect = Exception("error") + assert "" == net.get_dev_features(devname) + assert 1 == self.read_sys_net.call_count + self.read_sys_net.assert_called_once_with(devname, "device/features") @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature(self, m_dev_features): - devname = self.random_string() + devname = random_string() standby_features = ("0" * 62) + "1" + "0" m_dev_features.return_value = standby_features - self.assertTrue(net.has_netfail_standby_feature(devname)) + assert net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): - devname = self.random_string() - standby_features = self.random_string() + devname = random_string() + standby_features = random_string() m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_not_present_is_false( self, m_dev_features ): - devname = self.random_string() + devname = random_string() standby_features = "0" * 64 m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_no_features_is_false( self, m_dev_features ): - devname = self.random_string() + devname = random_string() standby_features = None m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) + assert not net.has_netfail_standby_feature(devname) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_master(devname, driver)) + assert net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_master_checks_master_attr(self, m_sysdev): - devname = self.random_string() + devname = random_string() driver = "virtio_net" - m_sysdev.return_value = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) - self.assertEqual(1, m_sysdev.call_count) - self.assertEqual( - mock.call(devname, path="master"), m_sysdev.call_args_list[0] - ) + m_sysdev.return_value = random_string() + assert not net.is_netfail_master(devname, driver) + assert 1 == m_sysdev.call_count + m_sysdev.assert_called_once_with(devname, path="master") @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) + devname = random_string() + driver = random_string() + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr - self.assertFalse(net.is_netfail_master(devname, driver)) + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = False # no standby feature flag - self.assertFalse(net.is_netfail_master(devname, driver)) + assert not net.is_netfail_master(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "virtio_net" # master virtio_net + self.device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_primary(devname, driver)) - self.assertEqual(1, self.m_device_driver.call_count) - self.assertEqual( - mock.call(master_devname), self.m_device_driver.call_args_list[0] - ) - self.assertEqual(1, m_standby.call_count) - self.assertEqual( - mock.call(master_devname), m_standby.call_args_list[0] - ) + assert net.is_netfail_primary(devname, driver) + self.device_driver.assert_called_once_with(master_devname) + assert 1 == m_standby.call_count + m_standby.assert_called_once_with(master_devname) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1592,18 +1580,18 @@ def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): def test_is_netfail_primary_wrong_driver( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() + devname = random_string() driver = "virtio_net" - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net + devname = random_string() + driver = random_string() # device not virtio_net m_exists.return_value = False # no master sysfs attr - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1611,16 +1599,16 @@ def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): def test_is_netfail_primary_bad_master( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "XXXX" # master not virtio_net - self.assertFalse(net.is_netfail_primary(devname, driver)) + self.device_driver.return_value = "XXXX" # master not virtio_net + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") @@ -1628,77 +1616,77 @@ def test_is_netfail_primary_bad_master( def test_is_netfail_primary_no_standby( self, m_sysdev, m_exists, m_standby ): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() + devname = random_string() + driver = random_string() # device not virtio_net + master_devname = random_string() m_sysdev.return_value = "%s/%s" % ( - self.random_string(), + random_string(), master_devname, ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = "virtio_net" # master virtio_net + self.device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = False # master has no standby feature flag - self.assertFalse(net.is_netfail_primary(devname, driver)) + assert not net.is_netfail_primary(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_standby(devname, driver)) + assert net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_standby(devname, driver)) + devname = random_string() + driver = random_string() + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_master(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = False # has master sysfs attr - self.assertFalse(net.is_netfail_standby(devname, driver)) + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.has_netfail_standby_feature") @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): - devname = self.random_string() + devname = random_string() driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = False # has standby feature flag - self.assertFalse(net.is_netfail_standby(devname, driver)) + assert not net.is_netfail_standby(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_primary(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = True m_standby.return_value = False - self.assertTrue(net.is_netfailover(devname, driver)) + assert net.is_netfailover(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_standby(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = False m_standby.return_value = True - self.assertTrue(net.is_netfailover(devname, driver)) + assert net.is_netfailover(devname, driver) @mock.patch("cloudinit.net.is_netfail_standby") @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_returns_false(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() + devname = random_string() + driver = random_string() m_primary.return_value = False m_standby.return_value = False - self.assertFalse(net.is_netfailover(devname, driver)) + assert not net.is_netfailover(devname, driver) class TestOpenvswitchIsInstalled: @@ -1904,3 +1892,29 @@ class TestIsIpNetwork: ) def test_is_ip_network(self, func, arg, expected_return): assert func(arg) == expected_return + + +class TestIsIpInSubnet: + """Tests for net.is_ip_in_subnet().""" + + @pytest.mark.parametrize( + "func,ip,subnet,expected_return", + ( + (net.is_ip_in_subnet, "192.168.1.1", "2001:67c::1/64", False), + (net.is_ip_in_subnet, "2001:67c::1", "192.168.1.1/24", False), + (net.is_ip_in_subnet, "192.168.1.1", "192.168.1.1/24", True), + (net.is_ip_in_subnet, "192.168.1.1", "192.168.1.1/32", True), + (net.is_ip_in_subnet, "192.168.1.2", "192.168.1.1/24", True), + (net.is_ip_in_subnet, "192.168.1.2", "192.168.1.1/32", False), + (net.is_ip_in_subnet, "192.168.2.2", "192.168.1.1/24", False), + (net.is_ip_in_subnet, "192.168.2.2", "192.168.1.1/32", False), + (net.is_ip_in_subnet, "2001:67c1::1", "2001:67c1::1/64", True), + (net.is_ip_in_subnet, "2001:67c1::1", "2001:67c1::1/128", True), + (net.is_ip_in_subnet, "2001:67c1::2", "2001:67c1::1/64", True), + (net.is_ip_in_subnet, "2001:67c1::2", "2001:67c1::1/128", False), + (net.is_ip_in_subnet, "2002:67c1::1", "2001:67c1::1/8", True), + (net.is_ip_in_subnet, "2002:67c1::1", "2001:67c1::1/16", False), + ), + ) + def test_is_ip_in_subnet(self, func, ip, subnet, expected_return): + assert func(ip, subnet) == expected_return diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py index bb781b9835e..4a76e96ec33 100644 --- a/tests/unittests/net/test_networkd.py +++ b/tests/unittests/net/test_networkd.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +from configparser import ConfigParser from string import Template from unittest import mock @@ -195,6 +196,7 @@ [Route] Gateway=10.0.0.1 +GatewayOnLink=yes [Route] Gateway=2a01:4f8:10a:19d2::2 @@ -231,6 +233,7 @@ [Route] Gateway=192.168.254.254 +GatewayOnLink=yes [Route] Gateway=fec0::ffff @@ -243,6 +246,204 @@ """ +V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK = """ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: 'ae:98:25:fa:36:9e' + subnets: + - type: static + address: '10.0.0.2' + netmask: '255.255.255.0' + gateway: '10.0.0.1' + - type: static6 + address: '2a01:4f8:10a:19d2::4/64' + gateway: '2a01:4f8:10a:19d2::2' + - type: nameserver + address: + - '100.100.100.100' + search: + - 'rgrunbla.github.beta.tailscale.net' +""" + +V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED = """\ +[Address] +Address=10.0.0.2/24 + +[Address] +Address=2a01:4f8:10a:19d2::4/64 + +[Match] +MACAddress=ae:98:25:fa:36:9e +Name=eth0 + +[Network] +DHCP=no +DNS=100.100.100.100 +Domains=rgrunbla.github.beta.tailscale.net + +[Route] +Gateway=10.0.0.1 + +[Route] +Gateway=2a01:4f8:10a:19d2::2 + +""" + +V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK = """ +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.1/24 + - fec0::1/64 + gateway4: 192.168.1.254 + gateway6: "fec0::ffff" + routes: + - to: 169.254.1.1/32 + - to: "fe80::1/128" +""" + +V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED = """\ +[Address] +Address=192.168.1.1/24 + +[Address] +Address=fec0::1/64 + +[Match] +Name=eth0 + +[Network] +DHCP=no + +[Route] +Gateway=192.168.1.254 + +[Route] +Gateway=fec0::ffff + +[Route] +Destination=169.254.1.1/32 + +[Route] +Destination=fe80::1/128 + +""" + +V1_CONFIG_MULTI_SUBNETS_ONLINK = """ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: 'ae:98:25:fa:36:9e' + subnets: + - type: static + address: '10.0.0.2' + netmask: '255.255.255.0' + gateway: '192.168.0.1' + - type: static6 + address: '2a01:4f8:10a:19d2::4/64' + gateway: '2000:4f8:10a:19d2::2' + - type: nameserver + address: + - '100.100.100.100' + search: + - 'rgrunbla.github.beta.tailscale.net' +""" + +V1_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED = """\ +[Address] +Address=10.0.0.2/24 + +[Address] +Address=2a01:4f8:10a:19d2::4/64 + +[Match] +MACAddress=ae:98:25:fa:36:9e +Name=eth0 + +[Network] +DHCP=no +DNS=100.100.100.100 +Domains=rgrunbla.github.beta.tailscale.net + +[Route] +Gateway=192.168.0.1 +GatewayOnLink=yes + +[Route] +Gateway=2000:4f8:10a:19d2::2 +GatewayOnLink=yes + +""" + +V2_CONFIG_MULTI_SUBNETS_ONLINK = """ +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.1/32 + - fec0::1/128 + gateway4: 192.168.254.254 + gateway6: "fec0::ffff" + routes: + - to: 169.254.1.1/32 + - to: "fe80::1/128" +""" + +V2_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED = """\ +[Address] +Address=192.168.1.1/32 + +[Address] +Address=fec0::1/128 + +[Match] +Name=eth0 + +[Network] +DHCP=no + +[Route] +Gateway=192.168.254.254 +GatewayOnLink=yes + +[Route] +Gateway=fec0::ffff +GatewayOnLink=yes + +[Route] +Destination=169.254.1.1/32 + +[Route] +Destination=fe80::1/128 + +""" + +V1_CONFIG_ACCEPT_RA_YAML = """\ +network: + version: 1 + config: + - type: physical + name: eth0 + mac_address: "00:11:22:33:44:55" +""" + +V2_CONFIG_ACCEPT_RA_YAML = """\ +network: + version: 2 + ethernets: + eth0: + match: + macaddress: "00:11:22:33:44:55" +""" + class TestNetworkdRenderState: def _parse_network_state_from_config(self, config): @@ -364,3 +565,127 @@ def test_networkd_render_v2_multi_subnets(self): rendered_content = renderer._render_content(ns) assert rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_RENDERED + + def test_networkd_render_v1_multi_subnets_not_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] + == V1_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED + ) + + def test_networkd_render_v2_multi_subnets_not_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] + == V2_CONFIG_MULTI_SUBNETS_NOT_ONLINK_RENDERED + ) + + def test_networkd_render_v1_multi_subnets_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V1_CONFIG_MULTI_SUBNETS_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] == V1_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED + ) + + def test_networkd_render_v2_multi_subnets_onlink(self): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + ns = self._parse_network_state_from_config( + V2_CONFIG_MULTI_SUBNETS_ONLINK + ) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert ( + rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_ONLINK_RENDERED + ) + + @pytest.mark.parametrize("version", ["v1", "v2"]) + @pytest.mark.parametrize( + "address", ["4", "6", "10.0.0.10/24", "2001:db8::1/64"] + ) + @pytest.mark.parametrize("accept_ra", [True, False, None]) + def test_networkd_render_accept_ra(self, version, address, accept_ra): + with mock.patch("cloudinit.net.get_interfaces_by_mac"): + # network-config v1 inputs + if version == "v1": + config = safeyaml.load(V1_CONFIG_ACCEPT_RA_YAML) + if address == "4" or address == "6": + config["network"]["config"][0]["subnets"] = [ + {"type": f"dhcp{address}"} + ] + else: + config["network"]["config"][0]["subnets"] = [ + {"type": "static", "address": address} + ] + if accept_ra is not None: + config["network"]["config"][0]["accept-ra"] = accept_ra + # network-config v2 inputs + elif version == "v2": + config = safeyaml.load(V2_CONFIG_ACCEPT_RA_YAML) + if address == "4" or address == "6": + config["network"]["ethernets"]["eth0"][ + f"dhcp{address}" + ] = True + else: + config["network"]["ethernets"]["eth0"]["addresses"] = [ + address + ] + if isinstance(accept_ra, bool): + config["network"]["ethernets"]["eth0"][ + "accept-ra" + ] = accept_ra + else: + raise ValueError(f"Unknown network-config version: {version}") + config = safeyaml.dumps(config) + + # render + ns = self._parse_network_state_from_config(config) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + # dump the input/output for debugging test failures + print(config) + print(rendered_content["eth0"]) + + # validate the rendered content + c = ConfigParser() + c.read_string(rendered_content["eth0"]) + + if address in ["4", "6"]: + expected_dhcp = f"ipv{address}" + expected_address = None + else: + expected_dhcp = False + expected_address = address + try: + got_dhcp = c.getboolean("Network", "DHCP") + except ValueError: + got_dhcp = c.get("Network", "DHCP", fallback=None) + got_address = c.get("Address", "Address", fallback=None) + got_accept_ra = c.getboolean("Network", "IPv6AcceptRA", fallback=None) + assert ( + got_dhcp == expected_dhcp + ), f"DHCP={got_dhcp}, expected {expected_dhcp}" + assert ( + got_address == expected_address + ), f"Address={got_address}, expected {expected_address}" + assert ( + got_accept_ra == accept_ra + ), f"IPv6AcceptRA={got_accept_ra}, expected {accept_ra}" diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py index eec2db00bdf..7cb5a28e71f 100644 --- a/tests/unittests/runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -2,6 +2,7 @@ import copy import os +from unittest import mock from cloudinit import atomic_helper, safeyaml, stages, util from cloudinit.config.modules import Modules @@ -45,6 +46,15 @@ def setUp(self): self.patchOS(self.new_root) self.patchUtils(self.new_root) + self.m_doc = mock.patch( + "cloudinit.config.schema.get_meta_doc", return_value={} + ) + self.m_doc.start() + + def tearDown(self): + self.m_doc.stop() + super().tearDown() + def test_none_ds_populates_var_lib_cloud(self): """Init and run_section default behavior creates appropriate dirs.""" # Now start verifying whats created diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py index ac8e2a35418..4d85ec3c61f 100644 --- a/tests/unittests/sources/helpers/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -3,14 +3,13 @@ from unittest import mock from cloudinit.sources.helpers import openstack -from tests.unittests import helpers as test_helpers @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestConvertNetJson(test_helpers.CiTestCase): +class TestConvertNetJson: def test_phy_types(self): """Verify the different known physical types are handled.""" # network_data.json example from @@ -54,11 +53,8 @@ def test_phy_types(self): for t in openstack.KNOWN_PHYSICAL_TYPES: net_json["links"][0]["type"] = t - self.assertEqual( - expected, - openstack.convert_net_json( - network_json=net_json, known_macs=macs - ), + assert expected == openstack.convert_net_json( + network_json=net_json, known_macs=macs ) def test_subnet_dns(self): @@ -113,9 +109,6 @@ def test_subnet_dns(self): for t in openstack.KNOWN_PHYSICAL_TYPES: net_json["links"][0]["type"] = t - self.assertEqual( - expected, - openstack.convert_net_json( - network_json=net_json, known_macs=macs - ), + assert expected == openstack.convert_net_json( + network_json=net_json, known_macs=macs ) diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 566af4ec9c5..7b6001c5061 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import datetime diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py index dcb1cc71ee4..0a69596584e 100644 --- a/tests/unittests/sources/test_azure_helper.py +++ b/tests/unittests/sources/test_azure_helper.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import os import re diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py index 88d22b8dd08..5b3339fd89d 100644 --- a/tests/unittests/sources/test_common.py +++ b/tests/unittests/sources/test_common.py @@ -47,7 +47,7 @@ Hetzner.DataSourceHetzner, IBMCloud.DataSourceIBMCloud, LXD.DataSourceLXD, - MAAS.DataSourceMAAS, + MAAS.DataSourceMAASLocal, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, Oracle.DataSourceOracle, @@ -78,6 +78,7 @@ MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, + Oracle.DataSourceOracleNet, OVF.DataSourceOVFNet, UpCloud.DataSourceUpCloud, Akamai.DataSourceAkamai, diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py index 364dba6df68..e6e1aec9277 100644 --- a/tests/unittests/sources/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import json @@ -172,6 +173,28 @@ "vpc-ipv4-cidr-blocks": "172.31.0.0/16", } +NIC2_MD_IPV4_IPV6_MULTI_IP = { + "device-number": "1", + "interface-id": "eni-043cdce36ded5e79f", + "ipv6s": [ + "2600:1f16:292:100:c187:593c:4349:136", + "2600:1f16:292:100:f153:12a3:c37c:11f9", + ], + "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", + "local-ipv4s": "172.31.47.221", + "mac": "0a:75:69:92:e2:16", + "owner-id": "329910648901", + "security-group-ids": "sg-0d68fef37d8cc9b77", + "security-groups": "launch-wizard-17", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "subnet-ipv6-cidr-blocks": "2600:1f16:292:100::/64", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:292:100::/56", +} + SECONDARY_IP_METADATA_2018_09_24 = { "ami-id": "ami-0986c2ac728528ac2", "ami-launch-index": "0", @@ -693,22 +716,6 @@ def test_aws_inaccessible_imds_service_fails_with_retries(self): with mock.patch("cloudinit.url_helper.readurl") as m_readurl: # yikes, this endpoint needs help m_readurl.side_effect = ( - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, - conn_error, conn_error, conn_error, mock_success, @@ -717,7 +724,7 @@ def test_aws_inaccessible_imds_service_fails_with_retries(self): self.assertTrue(ds.wait_for_metadata_service()) # Just one /latest/api/token request - self.assertEqual(19, len(m_readurl.call_args_list)) + self.assertEqual(3, len(m_readurl.call_args_list)) for readurl_call in m_readurl.call_args_list: self.assertIn("latest/api/token", readurl_call[0][0]) @@ -1239,7 +1246,7 @@ def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): ), ) - def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): + def test_convert_ec2_metadata_network_config_multi_nics_ipv4(self): """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. Source-routing configured for secondary NICs (routing-policy and extra routing table).""" @@ -1297,6 +1304,83 @@ def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): ), ) + def test_convert_ec2_metadata_network_config_multi_nics_ipv4_ipv6_multi_ip( + self, + ): + """DHCP route-metric increases on secondary NICs for IPv4 and IPv6. + Source-routing configured for secondary NICs (routing-policy and extra + routing table).""" + mac2 = "06:17:04:d7:26:08" + macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} + network_metadata_both = copy.deepcopy(self.network_metadata) + # Add 2nd nic info + network_metadata_both["interfaces"]["macs"][ + mac2 + ] = NIC2_MD_IPV4_IPV6_MULTI_IP + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg + nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc + expected = { + "version": 2, + "ethernets": { + "eth9": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "match": {"macaddress": "06:17:04:d7:26:09"}, + "set-name": "eth9", + "dhcp6-overrides": {"route-metric": 100}, + }, + "eth10": { + "dhcp4": True, + "dhcp4-overrides": { + "route-metric": 200, + "use-routes": True, + }, + "dhcp6": True, + "match": {"macaddress": "06:17:04:d7:26:08"}, + "set-name": "eth10", + "routes": [ + # via DHCP gateway + {"to": "0.0.0.0/0", "via": "172.31.1.0", "table": 101}, + # to NIC2_MD["subnet-ipv4-cidr-block"] + {"to": "172.31.32.0/20", "table": 101}, + # to NIC2_MD["subnet-ipv6-cidr-blocks"] + {"to": "2600:1f16:292:100::/64", "table": 101}, + ], + "routing-policy": [ + # NIC2_MD["local-ipv4s"] + {"from": "172.31.47.221", "table": 101}, + { + "from": "2600:1f16:292:100:c187:593c:4349:136", + "table": 101, + }, + { + "from": "2600:1f16:292:100:f153:12a3:c37c:11f9", + "table": 101, + }, + ], + "dhcp6-overrides": { + "route-metric": 200, + "use-routes": True, + }, + "addresses": ["2600:1f16:292:100:f153:12a3:c37c:11f9/128"], + }, + }, + } + distro = mock.Mock() + distro.network_activator = activators.NetplanActivator + distro.dhcp_client.dhcp_discovery.return_value = { + "routers": "172.31.1.0" + } + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, distro, macs_to_nics + ), + ) + def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" macs_to_nics = {self.mac1: "eth9"} diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index 850fbf88a86..bee486f4dd5 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -416,7 +416,6 @@ def test_get_data_processes_read_md(self): self.assertEqual({}, self.ds.metadata) self.assertEqual("ud", self.ds.userdata_raw) self.assertEqual("net", self.ds.network_json) - self.assertEqual("vd", self.ds.vendordata_pure) self.assertEqual("uuid", self.ds.system_uuid) self.assertEqual("ibmcloud", self.ds.cloud_name) self.assertEqual("ibmcloud", self.ds.platform_type) diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py index d617219e3ba..de8ded4a32f 100644 --- a/tests/unittests/sources/test_init.py +++ b/tests/unittests/sources/test_init.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import copy import inspect @@ -904,9 +905,8 @@ def test_clear_cached_attrs_noops_on_clean_cache(self): def test_clear_cached_attrs_skips_non_attr_class_attributes(self): """Skip any cached_attr_defaults which aren't class attributes.""" self.datasource._dirty_cache = True - self.datasource.clear_cached_attrs() - for attr in ("ec2_metadata", "network_json"): - self.assertFalse(hasattr(self.datasource, attr)) + self.datasource.clear_cached_attrs(attr_defaults=(("some", "value"),)) + self.assertFalse(hasattr(self.datasource, "some")) def test_clear_cached_attrs_of_custom_attrs(self): """Custom attr_values can be passed to clear_cached_attrs.""" diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py index efc24883893..7ec67b4577d 100644 --- a/tests/unittests/sources/test_lxd.py +++ b/tests/unittests/sources/test_lxd.py @@ -333,13 +333,13 @@ def test_network_config_when_unset(self, lxd_ds): assert NETWORK_V1 == lxd_ds.network_config assert LXD_V1_METADATA == lxd_ds._crawled_metadata + @mock.patch.object(lxd, "generate_network_config", return_value=NETWORK_V1) def test_network_config_crawled_metadata_no_network_config( - self, lxd_ds_no_network_config + self, m_generate, lxd_ds_no_network_config ): """network_config is correctly computed when _network_config is unset and _crawled_metadata does not contain network_config. """ - lxd.generate_network_config = mock.Mock(return_value=NETWORK_V1) assert UNSET == lxd_ds_no_network_config._crawled_metadata assert UNSET == lxd_ds_no_network_config._network_config assert None is lxd_ds_no_network_config.userdata_raw @@ -349,7 +349,7 @@ def test_network_config_crawled_metadata_no_network_config( LXD_V1_METADATA_NO_NETWORK_CONFIG == lxd_ds_no_network_config._crawled_metadata ) - assert 1 == lxd.generate_network_config.call_count + assert 1 == m_generate.call_count class TestIsPlatformViable: diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 97cc8c94e6a..38691f718eb 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -136,7 +136,7 @@ def get_request_callback(request): responses_mock.add_callback( responses.GET, - re.compile(r"http://169.254.169.254/.*"), + re.compile(r"http://(169.254.169.254|\[fe80::a9fe:a9fe\])/.*"), callback=get_request_callback, ) @@ -315,8 +315,6 @@ def test_datasource(self, m_dhcp): self.assertEqual(EC2_META, ds_os.ec2_metadata) self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) - self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) self.assertIsNone(ds_os.vendordata_raw) m_dhcp.assert_not_called() @@ -362,8 +360,6 @@ def test_local_datasource(self, m_dhcp, m_net): self.assertEqual(EC2_META, ds_os_local.ec2_metadata) self.assertEqual(USER_DATA, ds_os_local.userdata_raw) self.assertEqual(2, len(ds_os_local.files)) - self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) self.assertIsNone(ds_os_local.vendordata_raw) m_dhcp.assert_called_with(distro, "eth9", None) @@ -388,10 +384,10 @@ def test_bad_datasource_meta(self): found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) - self.assertIn( - "InvalidMetaDataException: Broken metadata address" - " http://169.254.169.25", + self.assertRegex( self.logs.getvalue(), + r"InvalidMetaDataException: Broken metadata address" + r" http://(169.254.169.254|\[fe80::a9fe:a9fe\])", ) def test_no_datasource(self): diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index 0f0d9011894..459d24593b6 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -4,6 +4,7 @@ import copy import json import logging +from itertools import count from unittest import mock import pytest @@ -14,7 +15,6 @@ from cloudinit.sources.DataSourceOracle import OpcMetadata from cloudinit.url_helper import UrlError from tests.unittests import helpers as test_helpers -from tests.unittests.helpers import does_not_raise DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -730,84 +730,70 @@ def test_metadata_returned( assert instance_data == metadata.instance_data assert vnics_data == metadata.vnics_data - # No need to actually wait between retries in the tests @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @pytest.mark.parametrize( - "v2_failure_count,v1_failure_count,expected_body,expectation", - [ - (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 3, None, pytest.raises(UrlError)), - ], - ) - def test_retries( - self, - v2_failure_count, - v1_failure_count, - expected_body, - expectation, - mocked_responses, - ): - # Workaround https://github.com/getsentry/responses/pull/171 - # This mocking can be unrolled when Bionic is EOL - url_v2_call_count = 0 - - def url_v2_callback(request): - nonlocal url_v2_call_count - url_v2_call_count += 1 - if url_v2_call_count <= v2_failure_count: - return ( - 404, - request.headers, - f"403 Client Error: Forbidden for url: {url_v2}", - ) - return 200, request.headers, OPC_V2_METADATA - - url_v2 = "http://169.254.169.254/opc/v2/instance/" - mocked_responses.add_callback( - responses.GET, url_v2, callback=url_v2_callback + @mock.patch("cloudinit.url_helper.time.time", side_effect=count(0, 1)) + @mock.patch("cloudinit.url_helper.readurl", side_effect=UrlError) + def test_retry(self, m_readurl, m_time): + # Since wait_for_url has its own retry tests, just verify that we + # attempted to contact both endpoints multiple times + oracle.read_opc_metadata() + assert len(m_readurl.call_args_list) > 3 + assert ( + m_readurl.call_args_list[0][0][0] + == "http://169.254.169.254/opc/v2/instance/" ) - - # Workaround https://github.com/getsentry/responses/pull/171 - # This mocking can be unrolled when Bionic is EOL - url_v1_call_count = 0 - - def url_v1_callback(request): - nonlocal url_v1_call_count - url_v1_call_count += 1 - if url_v1_call_count <= v1_failure_count: - return ( - 404, - request.headers, - f"403 Client Error: Forbidden for url: {url_v1}", - ) - return 200, request.headers, OPC_V1_METADATA - - url_v1 = "http://169.254.169.254/opc/v1/instance/" - mocked_responses.add_callback( - responses.GET, url_v1, callback=url_v1_callback + assert ( + m_readurl.call_args_list[1][0][0] + == "http://169.254.169.254/opc/v1/instance/" + ) + assert ( + m_readurl.call_args_list[2][0][0] + == "http://169.254.169.254/opc/v2/instance/" + ) + assert ( + m_readurl.call_args_list[3][0][0] + == "http://169.254.169.254/opc/v1/instance/" ) - with expectation: - assert expected_body == oracle.read_opc_metadata().instance_data + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 11]) + @mock.patch( + "cloudinit.sources.DataSourceOracle.wait_for_url", + return_value=("http://hi", b'{"some": "value"}'), + ) + def test_fetch_vnics_max_wait(self, m_wait_for_url, m_time): + oracle.read_opc_metadata(fetch_vnics_data=True) + assert m_wait_for_url.call_count == 2 + # 19 because start time was 0, next time was 11 and max wait is 30 + assert m_wait_for_url.call_args_list[-1][1]["max_wait"] == 19 + + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @mock.patch("cloudinit.url_helper.time.time", side_effect=[0, 1000]) + @mock.patch( + "cloudinit.sources.DataSourceOracle.wait_for_url", + return_value=("http://hi", b'{"some": "value"}'), + ) + def test_attempt_vnics_after_max_wait_expire(self, m_wait_for_url, m_time): + oracle.read_opc_metadata(fetch_vnics_data=True) + assert m_wait_for_url.call_count == 2 + assert m_wait_for_url.call_args_list[-1][1]["max_wait"] < 0 # No need to actually wait between retries in the tests @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) def test_fetch_vnics_error(self, caplog): - def mocked_fetch(*args, path="instance", **kwargs): - if path == "vnics": - raise UrlError("cause") + def m_wait(*args, **kwargs): + for url in args[0]: + if "vnics" in url: + return False, None + return ("http://localhost", b"{}") - with mock.patch(DS_PATH + "._fetch", side_effect=mocked_fetch): + with mock.patch(DS_PATH + ".wait_for_url", side_effect=m_wait): opc_metadata = oracle.read_opc_metadata(fetch_vnics_data=True) assert None is opc_metadata.vnics_data assert ( logging.WARNING, "Failed to fetch IMDS network configuration!", - ) == caplog.record_tuples[-2][1:] + ) == caplog.record_tuples[-1][1:], caplog.record_tuples @pytest.mark.parametrize( diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py index 9ac73deddd4..d6c9e327795 100644 --- a/tests/unittests/sources/test_scaleway.py +++ b/tests/unittests/sources/test_scaleway.py @@ -476,6 +476,7 @@ def test_metadata_ipv4_404(self, dhcpv4, ds_detect): self.assertIsNone(self.datasource.get_userdata_raw()) self.assertIsNone(self.datasource.get_vendordata_raw()) + @mock.patch("cloudinit.url_helper.time.sleep", lambda x: None) @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4") def test_metadata_connection_errors_legacy_ipv4_url(self, dhcpv4): """ @@ -497,11 +498,6 @@ def test_metadata_connection_errors_legacy_ipv4_url(self, dhcpv4): callback=ConnectionError, ) self.datasource._set_metadata_url(self.datasource.metadata_urls) - if sys.version_info.minor >= 7: - self.responses.assert_call_count( - f"{self.datasource.metadata_urls[0]}/", - self.datasource.retries, - ) self.assertEqual(self.datasource.metadata, {}) self.assertIsNone(self.datasource.get_userdata_raw()) self.assertIsNone(self.datasource.get_vendordata_raw()) diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py index 4bcec4fcbfd..b4d7dbb1945 100644 --- a/tests/unittests/sources/test_smartos.py +++ b/tests/unittests/sources/test_smartos.py @@ -5,6 +5,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init + """This is a testcase for the SmartOS datasource. It replicates a serial console and acts like the SmartOS console does in diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py index 33193f89f5e..cfeff6d53a7 100644 --- a/tests/unittests/sources/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -77,6 +77,11 @@ "mask": "255.255.255.0", "scope": "global", } +VMW_IPV4_NETIFACES_ADDR = { + "broadcast": "10.85.130.255", + "netmask": "255.255.255.0", + "addr": "10.85.130.116", +} VMW_IPV6_ROUTEINFO = { "destination": "::/0", "flags": "UG", @@ -88,6 +93,18 @@ "ip": "fd42:baa2:3dd:17a:216:3eff:fe16:db54/64", "scope6": "global", } +VMW_IPV6_NETIFACES_ADDR = { + "netmask": "ffff:ffff:ffff:ffff::/64", + "addr": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", +} +VMW_IPV6_NETDEV_PEER_ADDR = { + "ip": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", + "scope6": "global", +} +VMW_IPV6_NETIFACES_PEER_ADDR = { + "netmask": "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128", + "addr": "fd42:baa2:3dd:17a:216:3eff:fe16:db54", +} def generate_test_netdev_data(ipv4=None, ipv6=None): @@ -147,6 +164,22 @@ def test_no_data_access_method(self): ret = ds.get_data() self.assertFalse(ret) + def test_convert_to_netifaces_ipv4_format(self): + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv4_format( + VMW_IPV4_NETDEV_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV4_NETIFACES_ADDR) + + def test_convert_to_netifaces_ipv6_format(self): + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv6_format( + VMW_IPV6_NETDEV_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV6_NETIFACES_ADDR) + netifaces_format = DataSourceVMware.convert_to_netifaces_ipv6_format( + VMW_IPV6_NETDEV_PEER_ADDR + ) + self.assertEqual(netifaces_format, VMW_IPV6_NETIFACES_PEER_ADDR) + @mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs") def test_get_host_info_ipv4(self, m_fn_ipaddr): m_fn_ipaddr.return_value = ("10.10.10.1", None) diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py index 7fa02b1c9bb..117fdab0f60 100644 --- a/tests/unittests/sources/test_vultr.py +++ b/tests/unittests/sources/test_vultr.py @@ -5,7 +5,7 @@ # Vultr Metadata API: # https://www.vultr.com/metadata/ -import json +import copy from cloudinit import helpers, settings from cloudinit.net.dhcp import NoDHCPLeaseError @@ -13,6 +13,20 @@ from cloudinit.sources.helpers import vultr from tests.unittests.helpers import CiTestCase, mock +VENDOR_DATA = """\ +#cloud-config +package_upgrade: true +disable_root: 0 +ssh_pwauth: 1 +chpasswd: + expire: false + list: + - root:$6$SxXx...k2mJNIzZB5vMCDBlYT1 +system_info: + default_user: + name: root +""" + # Vultr metadata test data VULTR_V1_1 = { "bgp": { @@ -58,18 +72,7 @@ "startup-script": "echo No configured startup script", "raid1-script": "", "user-data": [], - "vendor-data": [ - { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"], - }, - "system_info": {"default_user": {"name": "root"}}, - } - ], + "vendor-data": VENDOR_DATA, } VULTR_V1_2 = { @@ -130,22 +133,9 @@ "user-defined": [], "startup-script": "echo No configured startup script", "user-data": [], - "vendor-data": [ - { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], - }, - "system_info": {"default_user": {"name": "root"}}, - } - ], + "vendor-data": VENDOR_DATA, } -VULTR_V1_3 = None - SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"] CLOUD_INTERFACES = { @@ -190,20 +180,6 @@ FILTERED_INTERFACES = ["eth1", "eth2", "eth0"] -# Expected generated objects - -# Expected config -EXPECTED_VULTR_CONFIG = { - "package_upgrade": "true", - "disable_root": 0, - "ssh_pwauth": 1, - "chpasswd": { - "expire": False, - "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], - }, - "system_info": {"default_user": {"name": "root"}}, -} - # Expected network config object from generator EXPECTED_VULTR_NETWORK_1 = { "version": 1, @@ -271,28 +247,9 @@ } -FINAL_INTERFACE_USED = "" - - class TestDataSourceVultr(CiTestCase): def setUp(self): - global VULTR_V1_3 super(TestDataSourceVultr, self).setUp() - - # Create v3 - VULTR_V1_3 = VULTR_V1_2.copy() - VULTR_V1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy() - VULTR_V1_3["interfaces"] = [] - - # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1["vendor-data"][0]) - raw2 = json.dumps(VULTR_V1_2["vendor-data"][0]) - - # Make expected format - VULTR_V1_1["vendor-data"] = [raw1] - VULTR_V1_2["vendor-data"] = [raw2] - VULTR_V1_3["vendor-data"] = [raw2] - self.tmp = self.tmp_dir() # Test the datasource itself @@ -330,8 +287,8 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): # Test vendor config self.assertEqual( - EXPECTED_VULTR_CONFIG, - json.loads(vendordata[0].replace("#cloud-config", "")), + VENDOR_DATA, + vendordata, ) self.maxDiff = orig_val @@ -339,6 +296,15 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): # Test network config generation self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + def _get_metadata(self): + # Create v1_3 + vultr_v1_3 = VULTR_V1_2.copy() + vultr_v1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy() + vultr_v1_3["interfaces"] = [] + vultr_v1_3["vendor-data"] = copy.deepcopy(VULTR_V1_2["vendor-data"]) + + return vultr_v1_3 + # Test the datasource with new network config type @mock.patch("cloudinit.net.get_interfaces_by_mac") @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") @@ -346,7 +312,7 @@ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): def test_datasource_cloud_interfaces( self, mock_getmeta, mock_isvultr, mock_netmap ): - mock_getmeta.return_value = VULTR_V1_3 + mock_getmeta.return_value = self._get_metadata() mock_isvultr.return_value = True mock_netmap.return_value = INTERFACE_MAP @@ -375,7 +341,7 @@ def test_network_config(self, mock_netmap): @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_private_network_config(self, mock_netmap): mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_2["interfaces"].copy() + interf = copy.deepcopy(VULTR_V1_2["interfaces"]) # Test configuring self.assertEqual( @@ -384,27 +350,10 @@ def test_private_network_config(self, mock_netmap): # Test unconfigured interf[1]["unconfigured"] = True - expected = EXPECTED_VULTR_NETWORK_2.copy() + expected = copy.deepcopy(EXPECTED_VULTR_NETWORK_2) expected["config"].pop(2) self.assertEqual(expected, vultr.generate_network_config(interf)) - # Override ephemeral for proper unit testing - def ephemeral_init( - self, distro, iface="", connectivity_url_data=None, tmp_dir=None - ): - global FINAL_INTERFACE_USED - FINAL_INTERFACE_USED = iface - if iface == "eth0": - return - raise NoDHCPLeaseError("Generic for testing") - - # Override ephemeral for proper unit testing - def ephemeral_init_always( - self, iface="", connectivity_url_data=None, tmp_dir=None - ): - global FINAL_INTERFACE_USED - FINAL_INTERFACE_USED = iface - # Override ephemeral for proper unit testing def override_enter(self): return @@ -415,7 +364,8 @@ def override_exit(self, excp_type, excp_value, excp_traceback): # Test interface seeking to ensure we are able to find the correct one @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", ephemeral_init + "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", + side_effect=(NoDHCPLeaseError("Generic for testing"), None), ) @mock.patch( "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter @@ -431,6 +381,7 @@ def test_interface_seek( mock_interface_list, mock_read_metadata, mock_isvultr, + mock_eph_init, ): mock_read_metadata.return_value = {} mock_isvultr.return_value = True @@ -447,36 +398,4 @@ def test_interface_seek( except Exception: pass - self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3]) - - # Test route checking sucessful DHCPs - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", - ephemeral_init_always, - ) - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter - ) - @mock.patch( - "cloudinit.net.ephemeral.EphemeralDHCPv4.__exit__", override_exit - ) - @mock.patch("cloudinit.sources.helpers.vultr.get_interface_list") - @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") - @mock.patch("cloudinit.sources.helpers.vultr.read_metadata") - def test_interface_seek_route_check( - self, mock_read_metadata, mock_isvultr, mock_interface_list - ): - mock_read_metadata.return_value = {} - mock_interface_list.return_value = FILTERED_INTERFACES - mock_isvultr.return_value = True - - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) - - try: - source._get_data() - except Exception: - pass - - self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3]) + assert mock_eph_init.call_args[1]["iface"] == FILTERED_INTERFACES[1] diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index aae2e9f4ee2..0925c520a03 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -63,3 +63,9 @@ def check_file(self, path, content, omode=None, perms=0o644): def check_perms(self, path, perms): file_stat = os.stat(path) self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode)) + + def test_write_file_ensure_dirs(self): + path = self.tmp_path("ensure_dirs") + "/ensure/dir" + contents = b"Hey there\n" + atomic_helper.write_file(path, contents) + self.check_file(path, contents) diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 24762450295..2681cddde7e 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -25,6 +25,14 @@ def mock_get_user_data_file(mocker, tmpdir): ) +@pytest.fixture(autouse=True, scope="module") +def disable_setup_logging(): + # setup_basic_logging can change the logging level to WARNING, so + # ensure it is always mocked + with mock.patch(f"{M_PATH}log.setup_basic_logging", autospec=True): + yield + + class TestCLI: def _call_main(self, sysv_args=None): if not sysv_args: @@ -193,7 +201,7 @@ def test_all_subcommands_represented_in_help(self, capsys): ), ), ) - @mock.patch("cloudinit.cmd.main.setup_basic_logging") + @mock.patch("cloudinit.cmd.main.log.setup_basic_logging") def test_subcommands_log_to_stderr_via_setup_basic_logging( self, setup_basic_logging, subcommand, log_to_stderr, mocks ): @@ -300,13 +308,12 @@ def test_wb_schema_subcommand_parser(self, m_read_cfg, capsys): ["all"], [ "**Supported distros:** all", - "**Supported distros:** almalinux, alpine, centos, " - "cloudlinux, cos, debian, eurolinux, fedora, freebsd, " - "mariner, miraclelinux, " - "openbsd, openeuler, OpenCloudOS, openmandriva, " - "opensuse, opensuse-microos, opensuse-tumbleweed, " - "opensuse-leap, photon, rhel, rocky, sle_hpc, " - "sle-micro, sles, TencentOS, ubuntu, virtuozzo", + "**Supported distros:** almalinux, alpine, azurelinux, " + "centos, cloudlinux, cos, debian, eurolinux, fedora, " + "freebsd, mariner, miraclelinux, openbsd, openeuler, " + "OpenCloudOS, openmandriva, opensuse, opensuse-microos, " + "opensuse-tumbleweed, opensuse-leap, photon, rhel, rocky, " + "sle_hpc, sle-micro, sles, TencentOS, ubuntu, virtuozzo", " **resize_rootfs:** ", "(``true``/``false``/``noblock``)", "runcmd:\n - [ ls, -l, / ]\n", diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index caa2aeb4dd7..5bc0b7f2188 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -1,10 +1,10 @@ +import os from unittest import mock import pytest from cloudinit import gpg, subp from cloudinit.subp import SubpResult -from tests.unittests.helpers import CiTestCase TEST_KEY_HUMAN = """ /etc/apt/cloud-init.gpg.d/my_key.gpg @@ -35,21 +35,38 @@ TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" +@pytest.fixture() +def m_subp(): + with mock.patch.object( + gpg.subp, "subp", return_value=SubpResult("", "") + ) as m_subp, mock.patch.object(gpg.time, "sleep"): + yield m_subp + + +@pytest.fixture() +def m_sleep(): + with mock.patch("cloudinit.gpg.time.sleep") as sleep: + yield sleep + + class TestGPGCommands: def test_dearmor_bad_value(self): """This exception is handled by the callee. Ensure it is not caught internally. """ + gpg_instance = gpg.GPG() with mock.patch.object( subp, "subp", side_effect=subp.ProcessExecutionError ): with pytest.raises(subp.ProcessExecutionError): - gpg.dearmor("garbage key value") + gpg_instance.dearmor("garbage key value") - def test_gpg_list_args(self): + def test_gpg_list_args(self, m_subp): """Verify correct command gets called to list keys""" + gpg_instance = gpg.GPG() no_colons = [ "gpg", + "--no-options", "--with-fingerprint", "--no-default-keyring", "--list-keys", @@ -58,6 +75,7 @@ def test_gpg_list_args(self): ] colons = [ "gpg", + "--no-options", "--with-fingerprint", "--no-default-keyring", "--list-keys", @@ -65,80 +83,149 @@ def test_gpg_list_args(self): "--with-colons", "key", ] - with mock.patch.object( - subp, "subp", return_value=SubpResult("", "") - ) as m_subp: - gpg.list("key") - assert mock.call(colons, capture=True) == m_subp.call_args - - gpg.list("key", human_output=True) - test_calls = mock.call((no_colons), capture=True) - assert test_calls == m_subp.call_args - - def test_gpg_dearmor_args(self): + gpg_instance.list_keys("key") + assert ( + mock.call(colons, capture=True, update_env=gpg_instance.env) + == m_subp.call_args + ) + + gpg_instance = gpg.GPG() + gpg_instance.list_keys("key", human_output=True) + assert m_subp.call_args == mock.call( + no_colons, capture=True, update_env=gpg_instance.env + ) + + def test_gpg_dearmor_args(self, m_subp): """Verify correct command gets called to dearmor keys""" - with mock.patch.object( - subp, "subp", return_value=SubpResult("", "") - ) as m_subp: - gpg.dearmor("key") - test_call = mock.call( - ["gpg", "--dearmor"], data="key", decode=False - ) - assert test_call == m_subp.call_args - - @mock.patch("cloudinit.gpg.time.sleep") - @mock.patch("cloudinit.gpg.subp.subp") - class TestReceiveKeys(CiTestCase): - """Test the recv_key method.""" - - def test_retries_on_subp_exc(self, m_subp, m_sleep): - """retry should be done on gpg receive keys failure.""" - retries = (1, 2, 4) - my_exc = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - m_subp.side_effect = (my_exc, my_exc, ("", "")) - gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) - self.assertEqual( - [mock.call(1), mock.call(2)], m_sleep.call_args_list + gpg_instance = gpg.GPG() + gpg_instance.dearmor("key") + test_call = mock.call( + ["gpg", "--dearmor"], + data="key", + decode=False, + update_env=gpg_instance.env, + ) + assert test_call == m_subp.call_args + + +class TestReceiveKeys: + """Test the recv_key method.""" + + def test_retries_on_subp_exc(self, m_subp, m_sleep): + """retry should be done on gpg receive keys failure.""" + gpg_instance = gpg.GPG() + retries = (1, 2, 4) + my_exc = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + m_subp.side_effect = (my_exc, my_exc, ("", "")) + gpg_instance.recv_key("ABCD", "keyserver.example.com", retries=retries) + assert [mock.call(1), mock.call(2)], m_sleep.call_args_list + + def test_raises_error_after_retries(self, m_subp, m_sleep): + """If the final run fails, error should be raised.""" + gpg_instance = gpg.GPG() + naplen = 1 + keyid, keyserver = ("ABCD", "keyserver.example.com") + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + with pytest.raises( + ValueError, match=f"{keyid}.*{keyserver}|{keyserver}.*{keyid}" + ): + gpg_instance.recv_key(keyid, keyserver, retries=(naplen,)) + m_sleep.assert_called_once() + + def test_no_retries_on_none(self, m_subp, m_sleep): + """retry should not be done if retries is None.""" + gpg_instance = gpg.GPG() + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + with pytest.raises(ValueError): + gpg_instance.recv_key( + "ABCD", "keyserver.example.com", retries=None ) + m_sleep.assert_not_called() + + def test_expected_gpg_command(self, m_subp, m_sleep): + """Verify gpg is called with expected args.""" + gpg_instance = gpg.GPG() + key, keyserver = ("DEADBEEF", "keyserver.example.com") + retries = (1, 2, 4) + m_subp.return_value = ("", "") + gpg_instance.recv_key(key, keyserver, retries=retries) + m_subp.assert_called_once_with( + [ + "gpg", + "--no-tty", + "--keyserver=%s" % keyserver, + "--recv-keys", + key, + ], + capture=True, + update_env=gpg_instance.env, + ) + m_sleep.assert_not_called() + + def test_kill_gpg_succeeds(self, m_subp): + """ensure that when gpgconf isn't found, processes are manually + cleaned up. Also test that the context manager does cleanup - def test_raises_error_after_retries(self, m_subp, m_sleep): - """If the final run fails, error should be raised.""" - naplen = 1 - keyid, keyserver = ("ABCD", "keyserver.example.com") - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - with self.assertRaises(ValueError) as rcm: - gpg.recv_key(keyid, keyserver, retries=(naplen,)) - self.assertIn(keyid, str(rcm.exception)) - self.assertIn(keyserver, str(rcm.exception)) - m_sleep.assert_called_with(naplen) - - def test_no_retries_on_none(self, m_subp, m_sleep): - """retry should not be done if retries is None.""" - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="", exit_code=2, cmd=["mycmd"] - ) - with self.assertRaises(ValueError): - gpg.recv_key("ABCD", "keyserver.example.com", retries=None) - m_sleep.assert_not_called() - - def test_expected_gpg_command(self, m_subp, m_sleep): - """Verify gpg is called with expected args.""" - key, keyserver = ("DEADBEEF", "keyserver.example.com") - retries = (1, 2, 4) - m_subp.return_value = ("", "") - gpg.recv_key(key, keyserver, retries=retries) - m_subp.assert_called_once_with( - [ - "gpg", - "--no-tty", - "--keyserver=%s" % keyserver, - "--recv-keys", - key, - ], - capture=True, - ) - m_sleep.assert_not_called() + """ + with pytest.raises(ZeroDivisionError): + with gpg.GPG() as gpg_context: + + # run a gpg command so that we have "started" gpg + gpg_context.list_keys("") + 1 / 0 # pylint: disable=pointless-statement + m_subp.assert_has_calls( + [ + mock.call( + ["gpgconf", "--kill", "all"], + capture=True, + update_env=gpg_context.env, + ) + ] + ) + assert not os.path.isdir(str(gpg_context.temp_dir)) + + def test_do_not_kill_unstarted_gpg(self, m_subp): + """ensure that when gpg isn't started, gpg isn't killed, but the + directory is cleaned up. + """ + with pytest.raises(ZeroDivisionError): + with gpg.GPG() as gpg_context: + 1 / 0 # pylint: disable=pointless-statement + m_subp.assert_not_called() + assert not os.path.isdir(str(gpg_context.temp_dir)) + + def test_kill_gpg_failover_succeeds(self, m_subp): + """ensure that when gpgconf isn't found, processes are manually + cleaned up + """ + with mock.patch("cloudinit.gpg.subp.which", return_value=None): + gpg_instance = gpg.GPG() + + # "start" gpg (if we don't, we won't kill gpg) + gpg_instance.recv_key("", "") + gpg_instance.kill_gpg() + m_subp.assert_has_calls( + [ + mock.call( + [ + "ps", + "-o", + "ppid,pid", + "-C", + "keyboxd", + "-C", + "dirmngr", + "-C", + "gpg-agent", + ], + capture=True, + rcs=[0, 1], + ) + ] + ) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e84523a2138..e9b2009d16c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import base64 import copy diff --git a/tests/unittests/test_render_template.py b/tests/unittests/test_render_template.py index 150e61b1d8d..0ed9464821d 100644 --- a/tests/unittests/test_render_template.py +++ b/tests/unittests/test_render_template.py @@ -11,6 +11,7 @@ DISTRO_VARIANTS = [ "amazon", "arch", + "azurelinux", "centos", "debian", "eurolinux", diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py index bb5f4012627..3466732952c 100644 --- a/tests/unittests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init """Tests related to cloudinit.stages module.""" import json diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py index 531ed3cffc4..0aca3683b86 100644 --- a/tests/unittests/test_upgrade.py +++ b/tests/unittests/test_upgrade.py @@ -13,13 +13,15 @@ import operator import pathlib +from unittest import mock import pytest -from cloudinit.sources import pkl_load +from cloudinit import importer, settings, sources, type_utils from cloudinit.sources.DataSourceAzure import DataSourceAzure from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud from tests.unittests.helpers import resourceLocation +from tests.unittests.util import MockDistro DSNAME_TO_CLASS = { "Azure": DataSourceAzure, @@ -28,6 +30,137 @@ class TestUpgrade: + # Expect the following "gaps" in unpickling per-datasource. + # The presence of these attributes existed in 20.1. + ds_expected_unpickle_attrs = { + "AltCloud": {"seed", "supported_seed_starts"}, + "AliYun": {"identity", "metadata_address", "default_update_events"}, + "Azure": { + "_ephemeral_dhcp_ctx", + "_iso_dev", + "_network_config", + "_reported_ready_marker_file", + "_route_configured_for_imds", + "_route_configured_for_wireserver", + "_wireserver_endpoint", + "cfg", + "seed", + "seed_dir", + }, + "CloudSigma": {"cepko", "ssh_public_key"}, + "CloudStack": { + "api_ver", + "cfg", + "metadata_address", + "seed_dir", + "vr_addr", + }, + "ConfigDrive": { + "_network_config", + "ec2_metadata", + "files", + "known_macs", + "network_eni", + "network_json", + "seed_dir", + "source", + "version", + }, + "DigitalOcean": { + "_network_config", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "use_ip4LL", + "wait_retry", + }, + "Ec2": {"identity", "metadata_address"}, + "Exoscale": { + "api_version", + "extra_config", + "metadata_url", + "password_server_port", + "url_retries", + "url_timeout", + }, + "GCE": {"default_user", "metadata_address"}, + "Hetzner": { + "_network_config", + "dsmode", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "userdata_address", + "wait_retry", + }, + "IBMCloud": {"source", "_network_config", "network_json", "platform"}, + "RbxCloud": {"cfg", "gratuitous_arp", "seed"}, + "Scaleway": { + "_network_config", + "metadata_url", + "retries", + "timeout", + }, + "Joyent": { + "_network_config", + "network_data", + "routes_data", + "script_base_d", + }, + "MAAS": {"base_url", "seed_dir"}, + "NoCloud": { + "_network_eni", + "_network_config", + "supported_seed_starts", + "seed_dir", + "seed", + "seed_dirs", + }, + "NWCS": { + "_network_config", + "dsmode", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "wait_retry", + }, + "OpenNebula": {"network", "seed", "seed_dir"}, + "OpenStack": { + "ec2_metadata", + "files", + "metadata_address", + "network_json", + "ssl_details", + "version", + }, + "OVF": { + "cfg", + "environment", + "_network_config", + "seed", + "seed_dir", + "supported_seed_starts", + }, + "UpCloud": { + "_network_config", + "metadata_address", + "metadata_full", + "retries", + "timeout", + "wait_retry", + }, + "Vultr": {"netcfg"}, + "VMware": { + "data_access_method", + "rpctool", + "rpctool_fn", + }, + "WSL": {"instance_name"}, + } + @pytest.fixture( params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"), scope="class", @@ -39,7 +172,102 @@ def previous_obj_pkl(self, request): Test implementations _must not_ modify the ``previous_obj_pkl`` which they are passed, as that will affect tests that run after them. """ - return pkl_load(str(request.param)) + return sources.pkl_load(str(request.param)) + + @pytest.mark.parametrize( + "mode", + ( + [sources.DEP_FILESYSTEM], + [sources.DEP_FILESYSTEM, sources.DEP_NETWORK], + ), + ) + @mock.patch.object( + importer, + "match_case_insensitive_module_name", + lambda name: f"DataSource{name}", + ) + def test_all_ds_init_vs_unpickle_attributes( + self, mode, mocker, paths, tmpdir + ): + """Unpickle resets any instance attributes created in __init__ + + This test asserts that deserialization of a datasource cache + does proper initialization of any 'new' instance attributes + created as a side-effect of the __init__ method. + + Without proper _unpickle coverage for newly introduced attributes, + the new deserialized instance will hit AttributeErrors at runtime. + """ + # Load all cloud-init init-local time-frame DataSource classes + for ds_class in sources.list_sources( + settings.CFG_BUILTIN["datasource_list"], + mode, + [type_utils.obj_name(sources)], + ): + # Expected common instance attrs from __init__ that are typically + # handled via existing _unpickling and setup in _get_data + common_instance_attrs = { + "paths", + "vendordata2", + "sys_cfg", + "ud_proc", + "vendordata", + "vendordata2_raw", + "ds_cfg", + "distro", + "userdata", + "userdata_raw", + "metadata", + "vendordata_raw", + } + # Grab initial specific-class attributes from magic method + class_attrs = set(ds_class.__dict__) + + # Mock known subp calls from some datasource __init__ setup + mocker.patch("cloudinit.util.is_container", return_value=False) + mocker.patch("cloudinit.dmi.read_dmi_data", return_value="") + mocker.patch("cloudinit.subp.subp", return_value=("", "")) + + # Initialize the class to grab the instance attributes from + # instance.__dict__ magic method. + ds = ds_class(sys_cfg={}, distro=MockDistro(), paths=paths) + + if getattr(ds.__class__.__bases__[0], "dsname", None) == ds.dsname: + # We are a subclass in a different boot mode (Local/Net) and + # share a common parent with class atttributes + class_attrs.update(ds.__class__.__bases__[0].__dict__) + + # Determine new instance attributes created by __init__ + # by calling the __dict__ magic method on the instance. + # Then, subtract common_instance_attrs and + # ds_expected_unpickle_attrs from the list of current attributes. + # What's left is our 'new' instance attributes added as a + # side-effect of __init__. + init_attrs = ( + set(ds.__dict__) + - class_attrs + - common_instance_attrs + - self.ds_expected_unpickle_attrs.get(ds_class.dsname, set()) + ) + + # Remove all side-effect attributes added by __init__ + for side_effect_attr in init_attrs: + delattr(ds, side_effect_attr) + + # Pickle the version of the DataSource with all init_attrs removed + sources.pkl_store(ds, tmpdir.join(f"{ds.dsname}.obj.pkl")) + + # Reload the pickled bare-bones datasource to ensure all instance + # attributes are reconstituted by _unpickle helpers. + ds2 = sources.pkl_load(tmpdir.join(f"{ds.dsname}.obj.pkl")) + unpickled_attrs = ( + set(ds2.__dict__) - class_attrs - common_instance_attrs + ) + missing_unpickled_attrs = init_attrs - unpickled_attrs + assert not missing_unpickled_attrs, ( + f"New {ds_class.dsname} attributes need unpickle coverage:" + f" {missing_unpickled_attrs}" + ) def test_pkl_load_defines_all_init_side_effect_attributes( self, previous_obj_pkl @@ -51,14 +279,15 @@ def test_pkl_load_defines_all_init_side_effect_attributes( paths = previous_obj_pkl.paths ds = ds_class(sys_cfg, distro, paths) if ds.dsname == "NoCloud" and previous_obj_pkl.__dict__: - expected = ( - set({"seed_dirs"}), # LP: #1568150 handled with getattr checks - set(), - ) + # seed_dirs is covered by _unpickle + # _network_config and _network_eni were already initialized + # outside of __init__ so shouldn't need unpickling + expected = {"seed_dirs", "_network_config", "_network_eni"} else: expected = (set(),) missing_attrs = ds.__dict__.keys() - previous_obj_pkl.__dict__.keys() - assert missing_attrs in expected + for attr in missing_attrs: + assert attr in expected def test_networking_set_on_distro(self, previous_obj_pkl): """We always expect to have ``.networking`` on ``Distro`` objects.""" diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py index bcb150c83c4..bc3d9857e0e 100644 --- a/tests/unittests/test_url_helper.py +++ b/tests/unittests/test_url_helper.py @@ -1,9 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import logging +import re from functools import partial from threading import Event from time import process_time +from unittest.mock import ANY, call import pytest import requests @@ -444,20 +447,72 @@ def test_dual_stack_staggered(self): """Assert expected call intervals occur""" stagger = 0.1 with mock.patch(M_PATH + "_run_func_with_delay") as delay_func: + + def identity_of_first_arg(x, _): + return x + dual_stack( - lambda x, _y: x, + identity_of_first_arg, ["you", "and", "me", "and", "dog"], stagger_delay=stagger, timeout=1, ) - # ensure that stagger delay for each subsequent call is: + # ensure that stagger delay for each call is made with args: # [ 0 * N, 1 * N, 2 * N, 3 * N, 4 * N, 5 * N] where N = stagger # it appears that without an explicit wait/join we can't assert # number of calls - for delay, call_item in enumerate(delay_func.call_args_list): - _, kwargs = call_item - assert stagger * delay == kwargs.get("delay") + calls = [ + call( + func=identity_of_first_arg, + addr="you", + timeout=1, + event=ANY, + delay=stagger * 0, + ), + call( + func=identity_of_first_arg, + addr="and", + timeout=1, + event=ANY, + delay=stagger * 1, + ), + call( + func=identity_of_first_arg, + addr="me", + timeout=1, + event=ANY, + delay=stagger * 2, + ), + call( + func=identity_of_first_arg, + addr="and", + timeout=1, + event=ANY, + delay=stagger * 3, + ), + call( + func=identity_of_first_arg, + addr="dog", + timeout=1, + event=ANY, + delay=stagger * 4, + ), + ] + num_calls = 0 + for call_instance in calls: + if call_instance in delay_func.call_args_list: + num_calls += 1 + + # we can't know the order of the submitted functions' execution + # we can't know how many of the submitted functions get called + # in advance + # + # we _do_ know what the possible arg combinations are + # we _do_ know from the mocked function how many got called + # assert that all calls that occurred had known valid arguments + # by checking for the correct number of matches + assert num_calls == len(delay_func.call_args_list) ADDR1 = "https://addr1/" @@ -470,6 +525,21 @@ class TestUrlHelper: fail = "FAIL" event = Event() + @pytest.fixture + def retry_mocks(self, mocker): + self.mock_time_value = 0 + m_readurl = mocker.patch( + f"{M_PATH}readurl", side_effect=self.readurl_side_effect + ) + m_sleep = mocker.patch( + f"{M_PATH}time.sleep", side_effect=self.sleep_side_effect + ) + mocker.patch(f"{M_PATH}time.time", side_effect=self.time_side_effect) + + yield m_readurl, m_sleep + + self.mock_time_value = 0 + @classmethod def response_wait(cls, _request): cls.event.wait(0.1) @@ -530,7 +600,7 @@ def test_order(self, addresses, expected_address_index, response): assert response.encode() == response_contents @responses.activate - def test_timeout(self): + def test_timeout(self, caplog): """If no endpoint responds in time, expect no response""" self.event.clear() @@ -540,7 +610,7 @@ def test_timeout(self): responses.GET, address, callback=( - self.response_wait + requests.ConnectTimeout if "sleep" in address else self.response_nowait ), @@ -558,3 +628,74 @@ def test_timeout(self): self.event.set() assert not url assert not response_contents + assert re.search( + r"open 'https:\/\/sleep1\/'.*Timed out", caplog.text, re.DOTALL + ) + + def test_explicit_arguments(self, retry_mocks): + """Ensure that explicit arguments are respected""" + m_readurl, m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], + max_wait=23, + timeout=5, + sleep_time=3, + ) + + assert len(m_readurl.call_args_list) == 3 + assert len(m_sleep.call_args_list) == 2 + + for readurl_call in m_readurl.call_args_list: + assert readurl_call[1]["timeout"] == 5 + for sleep_call in m_sleep.call_args_list: + assert sleep_call[0][0] == 3 + + # Call 1 starts 0 + # Call 2 starts at 8-ish after 5 second timeout and 3 second sleep + # Call 3 starts at 16-ish for same reasons + # The 5 second timeout puts us at 21-ish and now we break + # because 21-ish + the sleep time puts us over max wait of 23 + assert pytest.approx(self.mock_time_value) == 21 + + def test_shortened_timeout(self, retry_mocks): + """Test that we shorten the last timeout to align with max_wait""" + m_readurl, _m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], max_wait=10, timeout=9, sleep_time=0 + ) + + assert len(m_readurl.call_args_list) == 2 + assert m_readurl.call_args_list[-1][1]["timeout"] == pytest.approx(1) + + def test_default_sleep_time(self, retry_mocks): + """Test default sleep behavior when not specified""" + _m_readurl, m_sleep = retry_mocks + wait_for_url( + urls=["http://localhost/"], + max_wait=50, + timeout=1, + ) + + expected_sleep_times = [1] * 5 + [2] * 5 + [3] * 5 + actual_sleep_times = [ + m_sleep.call_args_list[i][0][0] + for i in range(len(m_sleep.call_args_list)) + ] + assert actual_sleep_times == expected_sleep_times + + # These side effect methods are a way of having a somewhat predictable + # output for time.time(). Otherwise, we have to track too many calls + # to time.time() and unrelated changes to code being called could cause + # these tests to fail. + # 0.0000001 is added to simulate additional execution time but keep it + # small enough for pytest.approx() to work + def sleep_side_effect(self, sleep_time): + self.mock_time_value += sleep_time + 0.0000001 + + def time_side_effect(self): + return self.mock_time_value + + def readurl_side_effect(self, *args, **kwargs): + if "timeout" in kwargs: + self.mock_time_value += kwargs["timeout"] + 0.0000001 + raise UrlError("test") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 70edb40bbf6..d1127fdae15 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -400,6 +400,20 @@ """ ) +OS_RELEASE_AZURELINUX = dedent( + """\ + NAME="Microsoft Azure Linux" + VERSION="3.0.20240206" + ID=azurelinux + VERSION_ID="3.0" + PRETTY_NAME="Microsoft Azure Linux 3.0" + ANSI_COLOR="1;34" + HOME_URL="https://aka.ms/azurelinux" + BUG_REPORT_URL="https://aka.ms/azurelinux" + SUPPORT_URL="https://aka.ms/azurelinux" +""" +) + @pytest.mark.usefixtures("fake_filesystem") class TestUtil: @@ -1249,6 +1263,16 @@ def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): dist = util.get_linux_distro() self.assertEqual(("mariner", "2.0", ""), dist) + @mock.patch("cloudinit.util.load_text_file") + def test_get_linux_azurelinux_os_release( + self, m_os_release, m_path_exists + ): + """Verify we get the correct name and machine arch on Azure Linux""" + m_os_release.return_value = OS_RELEASE_AZURELINUX + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(("azurelinux", "3.0", ""), dist) + @mock.patch(M_PATH + "load_text_file") def test_get_linux_openmandriva(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on OpenMandriva""" @@ -1310,6 +1334,7 @@ class TestGetVariant: ({"system": "Linux", "dist": ("almalinux",)}, "almalinux"), ({"system": "linux", "dist": ("alpine",)}, "alpine"), ({"system": "linux", "dist": ("arch",)}, "arch"), + ({"system": "linux", "dist": ("azurelinux",)}, "azurelinux"), ({"system": "linux", "dist": ("centos",)}, "centos"), ({"system": "linux", "dist": ("cloudlinux",)}, "cloudlinux"), ({"system": "linux", "dist": ("debian",)}, "debian"), diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 43de88b19ad..06f92583682 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -46,6 +46,8 @@ dankm dark2phoenix david-caro dbungert +ddstreet +ddstreetmicrosoft ddymko dermotbradley dhalturin @@ -59,6 +61,7 @@ einsibjarni emmanuelthome eslerm esposem +fionn frantisekz GabrielNagy garzdin @@ -128,6 +131,7 @@ onitake orndorffgrant Oursin outscale-mdr +philsphicas phsm phunyguy qubidt diff --git a/tools/ds-identify b/tools/ds-identify index e994d43f150..b4d0da687ea 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -74,6 +74,7 @@ PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" +PATH_DI_ENV="${PATH_DI_ENV:-${PATH_ROOT}/usr/libexec/ds-identify-env}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" @@ -1137,7 +1138,7 @@ has_ovf_cdrom() { is_disabled() { if [ -f /etc/cloud/cloud-init.disabled ]; then - debug 1 "disabled by marker file /etc/cloud-init.disabled" + debug 1 "disabled by marker file /etc/cloud/cloud-init.disabled" return 0 fi if [ "${KERNEL_CMDLINE:-}" = "cloud-init=disabled" ]; then @@ -1966,6 +1967,16 @@ set_run_path() { DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" } +# set ds-identify internal variables by providing an env file +# testing only - NOT use for production code, it is NOT supported +get_environment() { + if [ -f "$PATH_DI_ENV" ]; then + debug 0 "WARN: loading environment file [${PATH_DI_ENV}]"; + # shellcheck source=/dev/null + . "$PATH_DI_ENV" + fi +} + _main() { local dscheck_fn="" ret_dis=1 ret_en=0 @@ -2092,6 +2103,7 @@ _main() { main() { local ret="" + get_environment ensure_sane_path read_uname_info set_run_path @@ -2121,7 +2133,14 @@ noop() { : } +get_environment case "${DI_MAIN}" in + # builtin DI_MAIN implementations main|print_info|noop) "${DI_MAIN}" "$@";; - *) error "unexpected value for DI_MAIN"; exit 1;; + + # side-load an alternate implementation + # testing only - NOT use for production code, it is NOT supported + *) + debug 0 "WARN: side-loading alternate implementation: [${DI_MAIN}]"; + exec "${DI_MAIN}" "$@";; esac diff --git a/tools/render-template b/tools/render-template index 5ef5a374bc8..c3af642a08f 100755 --- a/tools/render-template +++ b/tools/render-template @@ -15,6 +15,7 @@ def main(): "alpine", "amazon", "arch", + "azurelinux", "benchmark", "centos", "cloudlinux", diff --git a/tools/run-container b/tools/run-container index 8584bbcafd7..01b0fca8786 100755 --- a/tools/run-container +++ b/tools/run-container @@ -41,6 +41,7 @@ Usage: ${0##*/} [ options ] [images:]image-ref -u | --unittest run unit tests --vm use a VM instead of a container --wait-max max time to wait or a container or VM to be ready + --commitish commit to package and run, default: HEAD Example: * ${0##*/} --package --source-package --unittest centos/6 @@ -112,8 +113,8 @@ inject_cloud_init(){ esac # attempt to get branch name. - commitish=$(git rev-parse --abbrev-ref HEAD) || { - errorrc "Failed git rev-parse --abbrev-ref HEAD" + commitish=$(git rev-parse --abbrev-ref "$COMMITISH") || { + errorrc "Failed git rev-parse --abbrev-ref $COMMITISH" return } if [ "$commitish" = "HEAD" ]; then @@ -402,7 +403,7 @@ run_self_inside_as_cd() { main() { local short_opts="a:hknpsuv" - local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose,vm,wait-max:" + local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose,vm,wait-max:,commitish:" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -428,10 +429,12 @@ main() { -v|--verbose) VERBOSITY=$((VERBOSITY+1));; --vm) use_vm=true;; --wait-max) WAIT_MAX="$next"; shift;; + --commitish) COMMITISH="$next"; shift;; --) shift; break;; esac shift; done + COMMITISH=${COMMITISH:-HEAD} [ $# -eq 1 ] || { bad_Usage "Expected 1 arg, got $# ($*)"; return; } local img_ref_in="$1" diff --git a/tox.ini b/tox.ini index cad37a1d746..473e937cb25 100644 --- a/tox.ini +++ b/tox.ini @@ -328,7 +328,7 @@ markers = serial: tests that do not work in parallel, skipped with py3-fast unstable: skip this test because it is flakey user_data: the user data to be passed to the test instance - allow_dns_lookup: disable autochecking for host network configuration + allow_dns_lookup: disable autochecking for host network configuration [coverage:paths] source =