diff --git a/ChangeLog b/ChangeLog index 95b4eb7925a..a175ed7fba8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,213 @@ +23.4 + - tests: datasourcenone use client.restart to block until done (#4635) + - tests: increase number of retries across reboot to 90 (#4651) + - fix: Add schema for merge types (#4648) + - feat: Allow aliyun ds to fetch data in init-local (#4590) [qidong.ld] + - azure: report failure to eject as error instead of debug (#4643) + [Chris Patterson] + - bug(schema): write network-config if instance dir present (#4635) + - test: fix schema fuzzing test (#4639) + - Update build-on-openbsd dependencies (#4644) [CodeBleu] + - fix(test): Fix expected log for ipv6-only ephemeral network (#4641) + - refactor: Remove metaclass from network_state.py (#4638) + - schema: non-root fallback to default paths on perm errors (# 4631) + - fix: Don't loosen the permissions of the log file (#4628) + - Revert "logging: keep current file mode of log file if its stricter + than the new mode (#4250)" + - ephemeral: Handle link up failure for both ipv4 and ipv6 (#4547) + - fix(main): Don't call logging too early (#4595) + - fix: Remove Ubuntu-specific kernel naming convention assertion (#4617) + - fix(log): Do not implement handleError with a self parameter (#4617) + - fix(log): Don't try to reuse stderr logger (#4617) + - feat: Standardize logging output to stderr (#4617) + - chore: Sever unmaintained TODO.rst (#4625) + - test: Skip failing tests + - distros: Add suse + - test: Add default hello package version (#4614) + - fix(net): Improve DHCPv4 SUSE code, add test + - net: Fix DHCPv4 not enabled on SUSE in some cases [bin456789] + - fix(schema): Warn if missing dependency (#4616) + - fix(cli): main source cloud_config for schema validation (#4562) + - feat(schema): annotation path for invalid top-level keys (#4562) + - feat(schema): top-level additionalProperties: false (#4562) + - test: ensure top-level properties tests will pass (#4562) + - fix(schema): Add missing schema definitions (#4562) + - test: Fix snap tests (#4562) + - azure: Check for stale pps data from IMDS (#4596) [Ksenija Stanojevic] + - test: Undo dhcp6 integration test changes (#4612) + - azure: update diagnostic from warning level to debug [Chris Patterson] + - azure/imds: remove limit for connection errors if route present (#4604) + [Chris Patterson] + - [enhancement]: Add shellcheck to CI (#4488) [Aviral Singh] + - chore: add conventional commits template (#4593) + - Revert "net: allow dhcp6 configuration from + generate_fallback_configuration()" (#4607) + - azure: workaround to disable reporting IMDS failures on Azure Stack + [Chris Patterson] + - cc_apt_pipelining: Update docs, deprecate options (#4571) + - test: add gh workflows on push to main, update status badges (#4597) + - util: Remove function abs_join() (#4587) + - url_helper: Remove unused function retry_on_url_exc() (#4587) + - cc_resizefs: Add bcachefs resize support (#4594) + - integration_tests: Support non-Ubuntu distros (#4586) + - fix(cmdline): fix cmdline parsing with MAC containing cc: + - azure/errors: include http code in reason for IMDS failure + [Chris Patterson] + - tests: cloud-init schema --system does not return exit code 2 + - github: allow pull request to specify desired rebase and merge + - tests: fix integration test expectations of exit 2 on schema warning + - tests: fix schema test expected cli output Valid schema + - fix(schema cli): check raw userdata when processed cloud-config empty + - azure: report failure to host if ephemeral DHCP secondary NIC (#4558) + [Chris Patterson] + - man: Document cloud-init error codes (#4500) + - Add support for cloud-init "degraded" state (#4500) + - status.json: Don't override detail key with error condition (#4500) + - status: Remove duplicated data (#4500) + - refactor: Rename exported_errors in status.json (#4500) + - test: Remove stale status.json value (#4500) + - tools/render-template: Make yaml loading opt-in, fix setup.py (#4564) + - Add summit digest/trip report to docs (#4561) [Sally] + - doc: Fix incorrect statement about `cloud-init analyze` + - azure/imds: ensure new errors are logged immediately when retrying + (#4468) [Chris Patterson] + - Clarify boothook docs (#4543) + - boothook: allow stdout/stderr to emit to cloud-init-output.log + - summit-notes: add 2023 notes for reference in mailinglist/discourse + - fix: added mock to stop leaking journalctl that slows down unit test + (#4556) [Alec Warren] + - tests: maas test for DataSourceMAASLocal get_data + - maas tests: avoid using CiTest case and prefer pytest.tmpdir fixture + - MAAS: Add datasource to init-local timeframe + - Ensure all tests passed and/or are skipped + - Support QEMU in integration tests + - fix(read-dependencies): handle version specifiers containing [~!] + - test: unpin pytest + - schema: network-config optional network key. route uses oneOf (#4482) + - schema: add cloud_init_deepest_matches for best error message (#4482) + - network: warn invalid cfg add /run/cloud-init/network-config (#4482) + - schema: add network-config support to schema subcommand (#4482) + - Update version number and merge ChangeLog from 23.3.3 into main (#4553) + - azure: check for primary interface when performing DHCP (#4465) + [Chris Patterson] + - Fix hypothesis failure + - subp: add a log when skipping a file for execution for lack of exe + permission (#4506) [Ani Sinha] + - azure/imds: refactor max_connection_errors definition (#4467) + [Chris Patterson] + - chore: fix PR template rendering (#4526) + - fix(cc_apt_configure): avoid unneeded call to apt-install (#4519) + - comment difference between sysconfig and NetworkManager renderer (#4517) + [Ani Sinha] + - Set Debian's default locale to be c.UTF-8 (#4503) (LP: #2038945) + - Convert test_debian.py to pytest (#4503) + - doc: fix cloudstack link + - doc: fix development/contributing.html references + - doc: hide duplicated links + - Revert "ds-identify/CloudStack: $DS_MAYBE if vm running on vmware/xen + (#4281)" (#4511) (LP: #2039453) + - Fix the missing mcopy argument [Vladimir Pouzanov] + - tests: Add logging fix (#4499) + - Update upgrade test to account for dhcp6 + - Remove logging of PPID path (#4502) + - Make Python 3.12 CI test non-experimental (#4498) + - ds-identify: exit 2 on disabled state from marker or cmdline (#4399) + - cloud-init-generator: Various performance optimizations (#4399) + - systemd: Standardize cloud-init systemd enablement (#4399) + - benchmark: benchmark cloud-init-generator independent of ds-identify + (#4399) + - tests/integration_tests: add cloud-init disablement coverage (#4399) + - doc: Describe disabling cloud-init using an environment variable (#4399) + - fix: cloud-init status --wait broken with KERNEL_CMDLINE (#4399) + - azure/imds: retry on 429 errors for reprovisiondata (#4470) + [Chris Patterson] + - cmd: Don't write json status files for non-boot stages (#4478) + - ds-identify: Allow disable service and override environment (#4485) + [Mina Galić] + - Update DataSourceNWCS.py (#4496) [shell-skrimp] + - Add r00ta to CLA signers file + - Fix override of systemd_locale_conf in rhel [Jacopo Rota] + - ci(linkcheck): minor fixes (#4495) + - integration test fix for deb822 URI format (#4492) + - test: use a mantic-compatible tz in t/i/m/test_combined.py (#4494) + - ua: shift CLI command from ua to pro for all interactions + - pro: avoid double-dash when enabling inviddual services on CLI + - net: allow dhcp6 configuration from generate_fallback_configuration() + (#4474) [Ani Sinha] + - tests: apt re.search to match alternative ordering of installed pkgs + - apt: doc apt_pkg performance improvement over subp apt-config dump + - Tidy up contributing docs (#4469) [Sally] + - [enhancement]: Automatically linkcheck in CI (#4479) [Aviral Singh] + - Revert allowing pro service warnings (#4483) + - Export warning logs to status.json (#4455) + - Fix regression in package installation (#4466) + - schema: cloud-init schema in early boot or in dev environ (#4448) + - schema: annotation of nested dicts lists in schema marks (#4448) + - feat(apport): collect ubuntu-pro logs if ubuntu-advantage.log present + (#4443) + - apt_configure: add deb822 support for default sources file (#4437) + - net: remove the word "on instance boot" from cloud-init generated config + (#4457) [Ani Sinha] + - style: Make cloudinit.log functions use snake case (#4449) + - Don't recommend using cloud-init as a library (#4459) + - vmware: Fall back to vmtoolsd if vmware-rpctool errs (#4444) + [Andrew Kutz] + - azure: add option to enable/disable secondary ip config (#4432) + [Ksenija Stanojevic] + - Allow installing snaps via package_update_upgrade_install module (#4202) + - docs: Add cloud-init overview/introduction (#4440) [Sally] + - apt: install software-properties-common when absent but needed (#4441) + - sources/Azure: Ignore system volume information folder while scanning + for files in the ntfs resource disk (#4446) [Anh Vo] + - refactor: Remove unnecessary __main__.py file + - style: Drop vi format comments + - cloudinit.log: Use more appropriate exception (#4435) + - cloudinit.log: Don't configure NullHandler (#4435) + - commit 6bbbfbbb030831c72b5aa2bba9cb8492f19d56f4 + - cloudinit.log: Remove unnecessary module function and variables (#4435) + - cloudinit.log: Remove unused getLogger wrapper (#4435) + - cloudinit.log: Standardize use of cloudinit's logging module (#4435) + - Remove unnecessary logging wrapper in Cloud class (#4435) + - integration test: allow pro service warnings (#4447) + - integration tests: fix mount indentation (#4445) + - sources/Azure: fix for conflicting reports to platform (#4434) + [Chris Patterson] + - docs: link the cloud-config validation service (#4442) + - Fix pip-managed ansible on pip < 23.0.1 (#4403) + - Install gnupg if gpg not found (#4431) + - Add "phsm" as contributor (#4429) [Phsm Qwerty] + - cc_ubuntu_advantage: do not rely on uaclient.messages module (#4397) + [Grant Orndorff] + - tools/ds-identify: match Azure datasource's ds_detect() behavior (#4430) + [Chris Patterson] + - Refactor test_apt_source_v1.py to use pytest (#4427) + - sources: do not override datasource detection if None is in list (#4426) + [Chris Patterson] + - feat: check for create_hostname_file key before writing /etc/hostname + (SC-1588) (#4330) [Cat Red] + - Pytestify apt config test modules (#4424) + - upstream gentoo patch (#4422) + - Work around no instance ip (#4419) + - Fix typing issues in subp module (#4401) + - net: fix ipv6_dhcpv6_stateful/stateless/slaac configuration for rhel + (#4395) [Ani Sinha] + - Release 23.3.1 + - apt: kill dirmngr/gpg-agent without gpgconf dependency (LP: #2034273) + - integration tests: fix mount indentation (#4405) + - Use grep for faster parsing of cloud config in ds-identify (#4327) + [Scott Moser] (LP: #2030729) + - doc: fix instructions on how to disable cloud-init from kernel command + line (#4406) [Ani Sinha] + - doc/vmware: Update contents relevant to disable_vmware_customization + [PengpengSun] + - Bring back flake8 for python 3.6 (#4394) + - integration tests: Fix cgroup parsing (#4402) + - summary: Update template parameter descriptions in docs [MJ Moshiri] + - Log PPID for better debugging (#4398) + - integration tests: don't clean when KEEP_* flags true (#4400) + - clean: add a new option to clean generated config files [Ani Sinha] + - pep-594: drop deprecated pipes module import + 23.3.3 - Fix pip-managed ansible on pip < 23.0.1 (#4403) diff --git a/TODO.rst b/TODO.rst deleted file mode 100644 index 7d12686426c..00000000000 --- a/TODO.rst +++ /dev/null @@ -1,43 +0,0 @@ -============================================== -Things that cloud-init may do (better) someday -============================================== - -- Consider making ``failsafe`` ``DataSource`` - - sets the user password, writing it to console - -- Consider a ``previous`` ``DataSource``, if no other data source is - found, fall back to the ``previous`` one that worked. -- Rewrite ``cloud-init-query`` (currently not implemented) -- Possibly have a ``DataSource`` expose explicit fields: - - - instance-id - - hostname - - mirror - - release - - ssh public keys - -- Remove the conversion of the ubuntu network interface format conversion - to a RH/fedora format and replace it with a top level format that uses - the netcf libraries format instead (which itself knows how to translate - into the specific formats). See for example `netcf`_ which seems to be - an active project that has this capability. -- Replace the ``apt*`` modules with variants that now use the distro classes - to perform distro independent packaging commands (wherever possible). -- Replace some the LOG.debug calls with a LOG.info where appropriate instead - of how right now there is really only 2 levels (``WARN`` and ``DEBUG``) -- Remove the ``cc_`` prefix for config modules, either have them fully - specified (ie ``cloudinit.config.resizefs``) or by default only look in - the ``cloudinit.config`` namespace for these modules (or have a combination - of the above), this avoids having to understand where your modules are - coming from (which can be altered by the current python inclusion path) -- Instead of just warning when a module is being ran on a ``unknown`` - distribution perhaps we should not run that module in that case? Or we might - want to start reworking those modules so they will run on all - distributions? Or if that is not the case, then maybe we want to allow - fully specified python paths for modules and start encouraging - packages of ``ubuntu`` modules, packages of ``rhel`` specific modules that - people can add instead of having them all under the cloud-init ``root`` - tree? This might encourage more development of other modules instead of - having to go edit the cloud-init code to accomplish this. - -.. _netcf: https://fedorahosted.org/netcf/ diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 1b4e1ce7efb..cd218bce146 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -2,21 +2,10 @@ """Common cloud-init devel commandline utility functions.""" - -import logging - -from cloudinit import log from cloudinit.helpers import Paths from cloudinit.stages import Init -def addLogHandlerCLI(logger, log_level): - """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter("%(levelname)s: %(message)s") - log.setup_basic_logging(log_level, formatter=formatter) - return logger - - def read_cfg_paths(fetch_existing_datasource: str = "") -> Paths: """Return a Paths object based on the system configuration on disk. diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 277d33746f2..8b2c68ccd49 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -10,7 +10,6 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from cloudinit.cmd.devel import addLogHandlerCLI from cloudinit.handlers import INCLUSION_TYPES_MAP NAME = "make-mime" @@ -116,7 +115,6 @@ def handle_args(name, args): @return 0 on success, 1 on failure. """ - addLogHandlerCLI(LOG, logging.DEBUG if args.debug else logging.WARNING) if args.list_types: print("\n".join(get_content_types(strip_prefix=True))) return 0 diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 4fc7081af79..71d27552dcf 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -9,7 +9,7 @@ import os import sys -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths +from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers.jinja_template import ( JinjaLoadError, NotJinjaError, @@ -61,7 +61,6 @@ def render_template(user_data_path, instance_data_path=None, debug=False): @return 0 on success, 1 on failure. """ - addLogHandlerCLI(LOG, logging.DEBUG if debug else logging.WARNING) if instance_data_path: instance_data_fn = instance_data_path else: diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py old mode 100755 new mode 100644 index e918cce72b2..9e9454ee646 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -41,8 +41,10 @@ setup_logging, reset_logging, configure_root_logger, + DEPRECATED, ) from cloudinit.reporting import events +from cloudinit.safeyaml import load from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG # Welcome message template @@ -220,11 +222,17 @@ def attempt_cmdline_url(path, network=True, cmdline=None) -> Tuple[int, str]: is_cloud_cfg = False if is_cloud_cfg: if cmdline_name == "url": - util.deprecate( - deprecated="The kernel command line key `url`", - deprecated_version="22.3", - extra_message=" Please use `cloud-config-url` " - "kernel command line parameter instead", + return ( + DEPRECATED, + str( + util.deprecate( + deprecated="The kernel command line key `url`", + deprecated_version="22.3", + extra_message=" Please use `cloud-config-url` " + "kernel command line parameter instead", + return_log=True, + ), + ), ) else: if cmdline_name == "cloud-config-url": @@ -477,9 +485,10 @@ def main_init(name, args): return (init.datasource, ["Consuming user data failed!"]) # Validate user-data adheres to schema definition - if os.path.exists(init.paths.get_ipath_cur("userdata_raw")): + cloud_cfg_path = init.paths.get_ipath_cur("cloud_config") + if os.path.exists(cloud_cfg_path) and os.stat(cloud_cfg_path).st_size != 0: validate_cloudconfig_schema( - config=init.cfg, + config=load(util.load_file(cloud_cfg_path)), strict=False, log_details=False, log_deprecations=True, @@ -1046,9 +1055,14 @@ def main(sysv_args=None): # Subparsers.required = True and each subparser sets action=(name, functor) (name, functor) = args.action - # Setup basic logging to start (until reinitialized) - # iff in debug mode. - if args.debug: + # Setup basic logging for cloud-init: + # - for cloud-init stages if --debug + # - for all other subcommands: + # - if --debug is passed, logging.DEBUG + # - if --debug is not passed, logging.WARNING + if name not in ("init", "modules"): + setup_basic_logging(logging.DEBUG if args.debug else logging.WARNING) + elif args.debug: setup_basic_logging() # Setup signal handlers before running diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index c795e1e7c04..a8178cf8a86 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -22,7 +22,7 @@ from errno import EACCES from cloudinit import atomic_helper, util -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths +from cloudinit.cmd.devel import read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, @@ -262,7 +262,6 @@ def _find_instance_data_leaf_by_varname_path( def handle_args(name, args): """Handle calls to 'cloud-init query' as a subcommand.""" - addLogHandlerCLI(LOG, logging.DEBUG if args.debug else logging.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( "Expected one of the options: --all, --format," diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index b2d57ff7c94..89966d7062c 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -10,9 +10,19 @@ from collections import defaultdict from collections.abc import Iterable from copy import deepcopy +from errno import EACCES from functools import partial from itertools import chain -from typing import TYPE_CHECKING, List, NamedTuple, Optional, Type, Union, cast +from typing import ( + TYPE_CHECKING, + DefaultDict, + List, + NamedTuple, + Optional, + Type, + Union, + cast, +) import yaml @@ -575,6 +585,17 @@ def validate_cloudconfig_schema( validator.iter_errors(config), key=lambda e: e.path ): path = ".".join([str(p) for p in schema_error.path]) + if ( + not path + and schema_error.validator == "additionalProperties" + and schema_error.schema == schema + ): + # an issue with invalid top-level property + prop_match = re.match( + r".*\('(?P.*)' was unexpected\)", schema_error.message + ) + if prop_match: + path = prop_match["name"] problem = (SchemaProblem(path, schema_error.message),) if isinstance( schema_error, SchemaDeprecationError @@ -608,6 +629,7 @@ def validate_cloudconfig_schema( "see the schema errors." ) LOG.warning(details) + return True class _Annotator: @@ -627,7 +649,7 @@ def _build_footer(title: str, content: List[str]) -> str: return f"# {title}: -------------\n{body}\n\n" def _build_errors_by_line(self, schema_problems: SchemaProblems): - errors_by_line = defaultdict(list) + errors_by_line: DefaultDict[Union[str, int], List] = defaultdict(list) for path, msg in schema_problems: match = re.match(r"format-l(?P\d+)\.c(?P\d+).*", path) if match: @@ -937,9 +959,14 @@ def validate_cloudconfig_file( ) return False try: - validate_cloudconfig_schema( + if not validate_cloudconfig_schema( cloudconfig, schema, strict=True, log_deprecations=False - ) + ): + print( + f"Skipping {schema_type} schema validation." + " Jsonschema dependency missing." + ) + return False except SchemaValidationError as e: if e.has_errors(): errors += e.schema_errors @@ -1293,14 +1320,15 @@ def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str: if defs.get(meta["id"]): schema = defs.get(meta["id"], {}) schema = cast(dict, schema) - try: - meta_copy["property_doc"] = _get_property_doc( - schema, defs=defs, prefix=" " - ) - except AttributeError: - LOG.warning("Unable to render property_doc due to invalid schema") - meta_copy["property_doc"] = "" - if not meta_copy["property_doc"]: + if any(schema["properties"].values()): + try: + meta_copy["property_doc"] = _get_property_doc( + schema, defs=defs, prefix=" " + ) + except AttributeError: + LOG.warning("Unable to render property_doc due to invalid schema") + meta_copy["property_doc"] = "" + if not meta_copy.get("property_doc", ""): meta_copy[ "property_doc" ] = " No schema definitions for this module" @@ -1463,10 +1491,18 @@ def handle_schema_args(name, args): return try: paths = read_cfg_paths(fetch_existing_datasource="trust") + except (IOError, OSError) as e: + if e.errno == EACCES: + LOG.debug( + "Using default instance-data/user-data paths for non-root user" + ) + paths = read_cfg_paths() + else: + raise except DataSourceNotFoundException: paths = read_cfg_paths() - print( - "WARNING: datasource not detected, using default" + LOG.warning( + "datasource not detected, using default" " instance-data/user-data paths." ) if args.instance_data: diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 06861526f20..a553c52cada 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -421,6 +421,52 @@ ] } }, + "merge_defintion": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "name", + "settings" + ], + "properties": { + "name": { + "type": "string", + "enum": [ + "list", + "dict", + "str" + ] + }, + "settings": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "allow_delete", + "no_replace", + "replace", + "append", + "prepend", + "recurse_dict", + "recurse_list", + "recurse_array", + "recurse_str" + ] + } + } + } + } + } + ] + }, "base_config": { "type": "object", "properties": { @@ -432,6 +478,16 @@ }, "cloud_final_modules": { "$ref": "#/$defs/modules_definition" + }, + "launch-index": { + "type": "integer", + "description": "The launch index for the specified cloud-config." + }, + "merge_how": { + "$ref": "#/$defs/merge_defintion" + }, + "merge_type": { + "$ref": "#/$defs/merge_defintion" } } }, @@ -3382,6 +3438,62 @@ } } }, + "output_log_operator": { + "oneOf": [ + { + "type": "string", + "description": "A filepath operation configuration. This is a string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + }, + { + "type": "array", + "description": "A list specifying filepath operation configuration for stdout and stderror", + "items": { + "type": [ + "string" + ] + }, + "minItems": 2, + "maxItems": 2 + }, + { + "type": "object", + "additionalProperties": false, + "properties": { + "output": { + "type": "string", + "description": "A filepath operation configuration. This is a string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + }, + "error": { + "type": "string", + "description": "A filepath operation configuration. A string containing a filepath and an optional leading operator: '>', '>>' or '|'. Operators '>' and '>>' indicate whether to overwrite or append to the file. The operator '|' redirects content to the command arguments specified." + } + } + } + ] + }, + "output_config": { + "type": "object", + "properties": { + "output": { + "type": "object", + "additionalProperties": false, + "properties": { + "all": { + "$ref": "#/$defs/output_log_operator" + }, + "init": { + "$ref": "#/$defs/output_log_operator" + }, + "config": { + "$ref": "#/$defs/output_log_operator" + }, + "final": { + "$ref": "#/$defs/output_log_operator" + } + } + } + } + }, "reporting_config": { "type": "object", "properties": { @@ -3678,6 +3790,108 @@ }, { "$ref": "#/$defs/reporting_config" + }, + { + "$ref": "#/$defs/output_config" } - ] + ], + "properties": { + "allow_public_ssh_keys": {}, + "ansible": {}, + "apk_repos": {}, + "apt": {}, + "apt_pipelining": {}, + "apt_reboot_if_required": {}, + "apt_update": {}, + "apt_upgrade": {}, + "authkey_hash": {}, + "autoinstall": {}, + "bootcmd": {}, + "byobu_by_default": {}, + "ca-certs": {}, + "ca_certs": {}, + "chef": {}, + "chpasswd": {}, + "cloud_config_modules": {}, + "cloud_final_modules": {}, + "cloud_init_modules": {}, + "create_hostname_file": {}, + "device_aliases": {}, + "disable_ec2_metadata": {}, + "disable_root": {}, + "disable_root_opts": {}, + "disk_setup": {}, + "drivers": {}, + "fan": {}, + "final_message": {}, + "fqdn": {}, + "fs_setup": {}, + "groups": {}, + "growpart": {}, + "grub-dpkg": {}, + "grub_dpkg": {}, + "hostname": {}, + "keyboard": {}, + "landscape": {}, + "launch-index": {}, + "locale": {}, + "locale_configfile": {}, + "lxd": {}, + "manage_etc_hosts": {}, + "manage_resolv_conf": {}, + "mcollective": {}, + "merge_how": {}, + "merge_type": {}, + "migrate": {}, + "mount_default_fields": {}, + "mounts": {}, + "no_ssh_fingerprints": {}, + "ntp": {}, + "output": {}, + "package_reboot_if_required": {}, + "package_update": {}, + "package_upgrade": {}, + "packages": {}, + "password": {}, + "phone_home": {}, + "power_state": {}, + "prefer_fqdn_over_hostname": {}, + "preserve_hostname": {}, + "puppet": {}, + "random_seed": {}, + "reporting": {}, + "resize_rootfs": {}, + "resolv_conf": {}, + "rh_subscription": {}, + "rsyslog": {}, + "runcmd": {}, + "salt_minion": {}, + "snap": {}, + "spacewalk": {}, + "ssh": {}, + "ssh_authorized_keys": {}, + "ssh_deletekeys": {}, + "ssh_fp_console_blacklist": {}, + "ssh_genkeytypes": {}, + "ssh_import_id": {}, + "ssh_key_console_blacklist": {}, + "ssh_keys": {}, + "ssh_publish_hostkeys": {}, + "ssh_pwauth": {}, + "ssh_quiet_keygen": {}, + "swap": {}, + "timezone": {}, + "ubuntu_advantage": {}, + "updates": {}, + "user": {}, + "users": {}, + "vendor_data": {}, + "version": {}, + "wireguard": {}, + "write_files": {}, + "yum_repo_dir": {}, + "yum_repos": {}, + "zypper": {} + }, + "additionalProperties": false } diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 87390f634b4..79e2623562f 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -87,6 +87,7 @@ "sle_hpc", "sle-micro", "sles", + "suse", ], "openeuler": ["openeuler"], "OpenCloudOS": ["OpenCloudOS", "TencentOS"], diff --git a/cloudinit/distros/suse.py b/cloudinit/distros/suse.py new file mode 100644 index 00000000000..a57abff82ee --- /dev/null +++ b/cloudinit/distros/suse.py @@ -0,0 +1,5 @@ +from cloudinit.distros import opensuse + + +class Distro(opensuse.Distro): + pass diff --git a/cloudinit/log.py b/cloudinit/log.py index 0579b61c773..fcfc5ef8b24 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -22,17 +22,12 @@ from typing import DefaultDict DEFAULT_LOG_FORMAT = "%(asctime)s - %(filename)s[%(levelname)s]: %(message)s" +DEPRECATED = 35 def setup_basic_logging(level=logging.DEBUG, formatter=None): formatter = formatter or logging.Formatter(DEFAULT_LOG_FORMAT) root = logging.getLogger() - for handler in root.handlers: - if hasattr(handler, "stream") and hasattr(handler.stream, "name"): - if handler.stream.name == "": - handler.setLevel(level) - return - # Didn't have an existing stderr handler; create a new handler console = logging.StreamHandler(sys.stderr) console.setFormatter(formatter) console.setLevel(level) @@ -50,7 +45,7 @@ def flush_loggers(root): flush_loggers(root.parent) -def define_deprecation_logger(lvl=35): +def define_deprecation_logger(lvl=DEPRECATED): logging.addLevelName(lvl, "DEPRECATED") def deprecated(self, message, *args, **kwargs): @@ -155,7 +150,7 @@ def setup_backup_logging(): which may ease debugging. """ fallback_handler = logging.StreamHandler(sys.stderr) - fallback_handler.handleError = lambda self, record: None + fallback_handler.handleError = lambda record: None fallback_handler.setFormatter( logging.Formatter( "FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s" diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index 3b5f9b96a2e..28c851cd706 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -8,12 +8,12 @@ from typing import Any, Callable, Dict, List, Optional import cloudinit.net as net -from cloudinit import subp from cloudinit.net.dhcp import ( IscDhclient, NoDHCPLeaseError, maybe_perform_dhcp_discovery, ) +from cloudinit.subp import ProcessExecutionError LOG = logging.getLogger(__name__) @@ -106,11 +106,7 @@ def __enter__(self): self._bringup_static_routes() elif self.router: self._bringup_router() - except subp.ProcessExecutionError: - LOG.error( - "Error bringing up EphemeralIPv4Network. " - "Datasource setup cannot continue" - ) + except ProcessExecutionError: self.__exit__(None, None, None) raise @@ -130,7 +126,7 @@ def _bringup_device(self): ) try: self.distro.net_ops.add_addr(self.interface, cidr, self.broadcast) - except subp.ProcessExecutionError as e: + except ProcessExecutionError as e: if "File exists" not in str(e.stderr): raise LOG.debug( @@ -274,11 +270,9 @@ def __exit__(self, excp_type, excp_value, excp_traceback): def clean_network(self): """Exit _ephipv4 context to teardown of ip configuration performed.""" - if self.lease: - self.lease = None - if not self._ephipv4: - return - self._ephipv4.__exit__(None, None, None) + self.lease = None + if self._ephipv4: + self._ephipv4.__exit__(None, None, None) def obtain_lease(self): """Perform dhcp discovery in a sandboxed environment if possible. @@ -350,7 +344,13 @@ def get_first_option_value( class EphemeralIPNetwork: - """Marries together IPv4 and IPv6 ephemeral context managers""" + """Combined ephemeral context manager for IPv4 and IPv6 + + Either ipv4 or ipv6 ephemeral network may fail to initialize, but if either + succeeds, then this context manager will not raise exception. This allows + either ipv4 or ipv6 ephemeral network to succeed, but requires that error + handling for networks unavailable be done within the context. + """ def __init__( self, @@ -367,24 +367,47 @@ def __init__( self.distro = distro def __enter__(self): - # ipv6 dualstack might succeed when dhcp4 fails - # therefore catch exception unless only v4 is used - try: - if self.ipv4: + if not (self.ipv4 or self.ipv6): + # no ephemeral network requested, but this object still needs to + # function as a context manager + return self + exceptions = [] + ephemeral_obtained = False + if self.ipv4: + try: self.stack.enter_context( - EphemeralDHCPv4(self.distro, self.interface) + EphemeralDHCPv4( + self.distro, + self.interface, + ) ) - if self.ipv6: + ephemeral_obtained = True + except (ProcessExecutionError, NoDHCPLeaseError) as e: + LOG.info("Failed to bring up %s for ipv4.", self) + exceptions.append(e) + + if self.ipv6: + try: self.stack.enter_context( - EphemeralIPv6Network(self.distro, self.interface) + EphemeralIPv6Network( + self.distro, + self.interface, + ) ) - # v6 link local might be usable - # caller may want to log network state - except NoDHCPLeaseError as e: - if self.ipv6: - self.state_msg = "using link-local ipv6" - else: - raise e + ephemeral_obtained = True + if exceptions or not self.ipv4: + self.state_msg = "using link-local ipv6" + except ProcessExecutionError as e: + LOG.info("Failed to bring up %s for ipv6.", self) + exceptions.append(e) + if not ephemeral_obtained: + # Ephemeral network setup failed in linkup for both ipv4 and + # ipv6. Raise only the first exception found. + LOG.error( + "Failed to bring up EphemeralIPNetwork. " + "Datasource setup cannot continue" + ) + raise exceptions[0] return self def __exit__(self, *_args): diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 00bcff7c09b..14c57cdcc5f 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -123,25 +123,6 @@ def decorator(self, command, *args, **kwargs): return wrapper -class CommandHandlerMeta(type): - """Metaclass that dynamically creates a 'command_handlers' attribute. - - This will scan the to-be-created class for methods that start with - 'handle_' and on finding those will populate a class attribute mapping - so that those methods can be quickly located and called. - """ - - def __new__(cls, name, parents, dct): - command_handlers = {} - for attr_name, attr in dct.items(): - if callable(attr) and attr_name.startswith("handle_"): - handles_what = attr_name[len("handle_") :] - if handles_what: - command_handlers[handles_what] = attr - dct["command_handlers"] = command_handlers - return super(CommandHandlerMeta, cls).__new__(cls, name, parents, dct) - - class NetworkState: def __init__( self, network_state: dict, version: int = NETWORK_STATE_VERSION @@ -228,7 +209,7 @@ def to_passthrough(cls, network_state: dict) -> "NetworkState": return cls({"config": network_state}, **kwargs) -class NetworkStateInterpreter(metaclass=CommandHandlerMeta): +class NetworkStateInterpreter: initial_network_state = { "interfaces": {}, "routes": [], @@ -253,6 +234,21 @@ def __init__( self._parsed = False self._interface_dns_map: dict = {} self._renderer = renderer + self.command_handlers = { + "bond": self.handle_bond, + "bonds": self.handle_bonds, + "bridge": self.handle_bridge, + "bridges": self.handle_bridges, + "ethernets": self.handle_ethernets, + "infiniband": self.handle_infiniband, + "loopback": self.handle_loopback, + "nameserver": self.handle_nameserver, + "physical": self.handle_physical, + "route": self.handle_route, + "vlan": self.handle_vlan, + "vlans": self.handle_vlans, + "wifis": self.handle_wifis, + } @property def network_state(self) -> NetworkState: @@ -319,7 +315,7 @@ def parse_config_v1(self, skip_broken=True): "No handler found for command '%s'" % command_type ) from e try: - handler(self, command) + handler(command) except InvalidCommand: if not skip_broken: raise @@ -361,7 +357,7 @@ def parse_config_v2(self, skip_broken=True): "No handler found for command '%s'" % command_type ) from e try: - handler(self, command) + handler(command) self._v2_common(command) except InvalidCommand: if not skip_broken: diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a30cff08121..7570a5e39c2 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -429,7 +429,7 @@ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): if subnet_type == "dhcp6" or subnet_type == "ipv6_dhcpv6-stateful": if flavor == "suse": # User wants dhcp for both protocols - if iface_cfg["BOOTPROTO"] == "dhcp4": + if iface_cfg["BOOTPROTO"] in ("dhcp4", "dhcp"): iface_cfg["BOOTPROTO"] = "dhcp" else: # Only IPv6 is DHCP, IPv4 may be static @@ -450,7 +450,7 @@ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): elif subnet_type == "ipv6_dhcpv6-stateless": if flavor == "suse": # User wants dhcp for both protocols - if iface_cfg["BOOTPROTO"] == "dhcp4": + if iface_cfg["BOOTPROTO"] in ("dhcp4", "dhcp"): iface_cfg["BOOTPROTO"] = "dhcp" else: # Only IPv6 is DHCP, IPv4 may be static @@ -468,7 +468,7 @@ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): elif subnet_type == "ipv6_slaac": if flavor == "suse": # User wants dhcp for both protocols - if iface_cfg["BOOTPROTO"] == "dhcp4": + if iface_cfg["BOOTPROTO"] in ("dhcp4", "dhcp"): iface_cfg["BOOTPROTO"] = "dhcp" else: # Only IPv6 is DHCP, IPv4 may be static @@ -481,10 +481,10 @@ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): elif subnet_type in ["dhcp4", "dhcp"]: bootproto_in = iface_cfg["BOOTPROTO"] iface_cfg["BOOTPROTO"] = "dhcp" - if flavor == "suse" and subnet_type == "dhcp4": + if flavor == "suse": # If dhcp6 is already specified the user wants dhcp # for both protocols - if bootproto_in != "dhcp6": + if bootproto_in not in ("dhcp6", "dhcp"): # Only IPv4 is DHCP, IPv6 may be static iface_cfg["BOOTPROTO"] = "dhcp4" elif subnet_type in ["static", "static6"]: diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index b69d7ffa0fb..606ade165f4 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -1,11 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging from typing import List from cloudinit import dmi, sources +from cloudinit.event import EventScope, EventType from cloudinit.sources import DataSourceEc2 as EC2 from cloudinit.sources import DataSourceHostname +LOG = logging.getLogger(__name__) + ALIYUN_PRODUCT = "Alibaba Cloud ECS" @@ -23,6 +27,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): def imdsv2_token_put_header(self): return "X-aliyun-ecs-metadata-token" + def __init__(self, sys_cfg, distro, paths): + super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) + self.default_update_events[EventScope.NETWORK].add(EventType.BOOT) + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): hostname = self.metadata.get("hostname") is_default = False @@ -61,12 +69,24 @@ def parse_public_keys(public_keys): return keys +class DataSourceAliYunLocal(DataSourceAliYun): + """Datasource run at init-local which sets up network to query metadata. + + In init-local, no network is available. This subclass sets up minimal + networking with dhclient on a viable nic so that it can talk to the + metadata service. If the metadata service provides network configuration + then render the network configuration for that instance based on metadata. + """ + + perform_dhcp_setup = True + + # Used to match classes to dependencies datasources = [ + (DataSourceAliYunLocal, (sources.DEP_FILESYSTEM,)), # Run at init-local (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] - # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b0e36193bad..11c14e2001f 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -676,6 +676,15 @@ def crawl_metadata(self): # fetch metadata again as it has changed after reprovisioning imds_md = self.get_metadata_from_imds(report_failure=True) + # validate imds pps metadata + imds_ppstype = self._ppstype_from_imds(imds_md) + if imds_ppstype not in (None, PPSType.NONE.value): + self._report_failure( + errors.ReportableErrorImdsInvalidMetadata( + key="extended.compute.ppsType", value=imds_ppstype + ) + ) + # Report errors if IMDS network configuration is missing data. self.validate_imds_network_metadata(imds_md=imds_md) @@ -769,11 +778,19 @@ def get_metadata_from_imds(self, report_failure: bool) -> Dict: start_time = time() retry_deadline = start_time + 300 + # As a temporary workaround to support Azure Stack implementations + # which may not enable IMDS, limit connection errors to 11. + if not self._route_configured_for_imds: + max_connection_errors = 11 + else: + max_connection_errors = None + error_string: Optional[str] = None error_report: Optional[errors.ReportableError] = None try: return imds.fetch_metadata_with_api_fallback( - retry_deadline=retry_deadline + max_connection_errors=max_connection_errors, + retry_deadline=retry_deadline, ) except UrlError as error: error_string = str(error) @@ -1012,7 +1029,7 @@ def _wait_for_nic_detach(self, nl_sock): else: report_diagnostic_event( "The preprovisioned nic %s is detached" % ifname, - logger_func=LOG.warning, + logger_func=LOG.debug, ) except AssertionError as error: report_diagnostic_event(str(error), logger_func=LOG.error) diff --git a/cloudinit/sources/azure/errors.py b/cloudinit/sources/azure/errors.py index 876ed657172..f7ac45fc45d 100644 --- a/cloudinit/sources/azure/errors.py +++ b/cloudinit/sources/azure/errors.py @@ -170,3 +170,11 @@ def __init__(self, exception: Exception) -> None: self.supporting_data["exception"] = repr(exception) self.supporting_data["traceback_base64"] = trace_base64 + + +class ReportableErrorImdsInvalidMetadata(ReportableError): + def __init__(self, *, key: str, value: Any) -> None: + super().__init__(f"invalid IMDS metadata for key={key}") + + self.supporting_data["key"] = key + self.supporting_data["value"] = repr(value) diff --git a/cloudinit/sources/azure/imds.py b/cloudinit/sources/azure/imds.py index 7a831f85247..a9b2c455d04 100644 --- a/cloudinit/sources/azure/imds.py +++ b/cloudinit/sources/azure/imds.py @@ -30,7 +30,7 @@ def __init__( self, *, logging_backoff: float = 1.0, - max_connection_errors: int = 11, + max_connection_errors: Optional[int] = None, retry_codes=( 404, # not found (yet) 410, # gone / unavailable (yet) @@ -66,7 +66,9 @@ def exception_callback(self, req_args, exception) -> bool: # Check for connection errors which may occur early boot, but # are otherwise indicative that we are not connecting with the # primary NIC. - if isinstance(exception.cause, requests.ConnectionError): + if self.max_connection_errors is not None and isinstance( + exception.cause, requests.ConnectionError + ): self.max_connection_errors -= 1 if self.max_connection_errors <= 0: retry = False @@ -110,7 +112,7 @@ def exception_callback(self, req_args, exception) -> bool: def _fetch_url( url: str, *, - retry_deadline: float, + retry_handler: ReadUrlRetryHandler, log_response: bool = True, timeout: int = 30, ) -> bytes: @@ -123,12 +125,10 @@ def _fetch_url( :raises UrlError: on error fetching metadata. """ - handler = ReadUrlRetryHandler(retry_deadline=retry_deadline) - try: response = readurl( url, - exception_cb=handler.exception_callback, + exception_cb=retry_handler.exception_callback, headers={"Metadata": "true"}, infinite=True, log_req_resp=log_response, @@ -146,7 +146,8 @@ def _fetch_url( def _fetch_metadata( url: str, - retry_deadline: float, + *, + retry_handler: ReadUrlRetryHandler, ) -> Dict: """Fetch IMDS metadata. @@ -156,7 +157,7 @@ def _fetch_metadata( :raises UrlError: on error fetching metadata. :raises ValueError: on error parsing metadata. """ - metadata = _fetch_url(url, retry_deadline=retry_deadline) + metadata = _fetch_url(url, retry_handler=retry_handler) try: return util.load_json(metadata) @@ -168,7 +169,9 @@ def _fetch_metadata( raise -def fetch_metadata_with_api_fallback(retry_deadline: float) -> Dict: +def fetch_metadata_with_api_fallback( + retry_deadline: float, max_connection_errors: Optional[int] = None +) -> Dict: """Fetch extended metadata, falling back to non-extended as required. :param retry_deadline: time()-based deadline to retry until. @@ -176,17 +179,25 @@ def fetch_metadata_with_api_fallback(retry_deadline: float) -> Dict: :raises UrlError: on error fetching metadata. :raises ValueError: on error parsing metadata. """ + retry_handler = ReadUrlRetryHandler( + max_connection_errors=max_connection_errors, + retry_deadline=retry_deadline, + ) try: url = IMDS_URL + "/instance?api-version=2021-08-01&extended=true" - return _fetch_metadata(url, retry_deadline=retry_deadline) + return _fetch_metadata(url, retry_handler=retry_handler) except UrlError as error: if error.code == 400: report_diagnostic_event( "Falling back to IMDS api-version: 2019-06-01", logger_func=LOG.warning, ) + retry_handler = ReadUrlRetryHandler( + max_connection_errors=max_connection_errors, + retry_deadline=retry_deadline, + ) url = IMDS_URL + "/instance?api-version=2019-06-01" - return _fetch_metadata(url, retry_deadline=retry_deadline) + return _fetch_metadata(url, retry_handler=retry_handler) raise diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 47f25afd065..9d85c6b4e40 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -794,7 +794,7 @@ def eject_iso(self, iso_dev) -> None: except Exception as e: report_diagnostic_event( "Failed ejecting the provisioning iso: %s" % e, - logger_func=LOG.debug, + logger_func=LOG.error, ) @azure_ds_telemetry_reporter diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 891dfa73e14..3b6405f54db 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -10,6 +10,7 @@ import os import sys from collections import namedtuple +from contextlib import suppress from typing import Dict, Iterable, List, Optional, Set from cloudinit import ( @@ -219,25 +220,24 @@ def purge_cache(self, rm_instance_lnk=False): def initialize(self): self._initialize_filesystem() + @staticmethod + def _get_strictest_mode(mode_1: int, mode_2: int) -> int: + return mode_1 & mode_2 + def _initialize_filesystem(self): mode = 0o640 - fmode = None util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, "def_log_file") if log_file: # At this point the log file should have already been created - # in the setup_logging function of log.py - - try: - fmode = util.get_permissions(log_file) - except OSError: - pass - - # if existing file mode fmode is stricter, do not change it. - if fmode and util.compare_permission(fmode, mode) < 0: - mode = fmode + # in the setupLogging function of log.py + with suppress(OSError): + mode = self._get_strictest_mode( + 0o640, util.get_permissions(log_file) + ) + # set file mode to the strictest of 0o640 and the current mode util.ensure_file(log_file, mode, preserve_mode=False) perms = self.cfg.get("syslog_fix_perms") if not perms: @@ -388,6 +388,30 @@ def _get_ipath(self, subname=None): ) return instance_dir + def _write_network_config_json(self, netcfg: dict): + """Create /var/lib/cloud/instance/network-config.json + + Only attempt once /var/lib/cloud/instance exists which is created + by Init.instancify once a datasource is detected. + """ + + if not os.path.islink(self.paths.instance_link): + # Datasource hasn't been detected yet, so we may not + # have visibility to datasource applicable network-config + return + ncfg_instance_path = self.paths.get_ipath_cur("network_config") + network_link = self.paths.get_runpath("network_config") + if os.path.exists(ncfg_instance_path): + # Compare and only write on delta of current network-config + if netcfg != util.load_json(util.load_file(ncfg_instance_path)): + atomic_helper.write_json( + ncfg_instance_path, netcfg, mode=0o600 + ) + else: + atomic_helper.write_json(ncfg_instance_path, netcfg, mode=0o600) + if not os.path.islink(network_link): + util.sym_link(ncfg_instance_path, network_link) + def _reflect_cur_instance(self): # Remove the old symlink and attach a new one so # that further reads/writes connect into the right location @@ -934,22 +958,6 @@ def _find_networking_config(self): ) def _apply_netcfg_names(self, netcfg): - ncfg_instance_path = self.paths.get_ipath_cur("network_config") - network_link = self.paths.get_runpath("network_config") - if not self._network_already_configured(): - if os.path.exists(ncfg_instance_path): - if netcfg != util.load_json( - util.load_file(ncfg_instance_path) - ): - atomic_helper.write_json( - ncfg_instance_path, netcfg, mode=0o600 - ) - else: - atomic_helper.write_json( - ncfg_instance_path, netcfg, mode=0o600 - ) - if not os.path.islink(network_link): - util.sym_link(ncfg_instance_path, network_link) try: LOG.debug("applying net config names for %s", netcfg) self.distro.networking.apply_network_config_names(netcfg) @@ -1009,6 +1017,7 @@ def should_run_on_boot_event(): # refresh netcfg after update netcfg, src = self._find_networking_config() + self._write_network_config_json(netcfg) if netcfg and netcfg.get("version") == 1: validate_cloudconfig_schema( diff --git a/cloudinit/util.py b/cloudinit/util.py index 253c80d6a2d..3295735ceea 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2143,29 +2143,6 @@ def safe_int(possible_int): return None -def compare_permission(mode1, mode2): - """Compare two file modes in octal. - - If mode1 is less restrictive than mode2 return 1 - If mode1 is more restrictive than mode2 return -1 - If mode1 is same as mode2, return 0 - - The comparison starts from the permission of the - set of users in "others" and then works up to the - permission of "user" set. - """ - # Convert modes to octal and reverse the last 3 digits - # so 0o640 would be become 0o046 - mode1_oct = oct(mode1)[2:].rjust(3, "0") - mode2_oct = oct(mode2)[2:].rjust(3, "0") - m1 = int(mode1_oct[:-3] + mode1_oct[-3:][::-1], 8) - m2 = int(mode2_oct[:-3] + mode2_oct[-3:][::-1], 8) - - # Then do a traditional cmp() - # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons - return (m1 > m2) - (m1 < m2) - - def chmod(path, mode): real_mode = safe_int(mode) if path and real_mode: @@ -3233,6 +3210,7 @@ def deprecate( deprecated_version: str, extra_message: Optional[str] = None, schedule: int = 5, + return_log: bool = False, ): """Mark a "thing" as deprecated. Deduplicated deprecations are logged. @@ -3249,6 +3227,8 @@ def deprecate( @param schedule: Manually set the deprecation schedule. Defaults to 5 years. Leave a comment explaining your reason for deviation if setting this value. + @param return_log: Return log text rather than logging it. Useful for + running prior to logging setup. Note: uses keyword-only arguments to improve legibility """ @@ -3258,13 +3238,15 @@ def deprecate( dedup = hash(deprecated + message + deprecated_version + str(schedule)) version = Version.from_str(deprecated_version) version_removed = Version(version.major + schedule, version.minor) + deprecate_msg = ( + f"{deprecated} is deprecated in " + f"{deprecated_version} and scheduled to be removed in " + f"{version_removed}. {message}" + ).rstrip() + if return_log: + return deprecate_msg if dedup not in deprecate._log: # type: ignore deprecate._log.add(dedup) # type: ignore - deprecate_msg = ( - f"{deprecated} is deprecated in " - f"{deprecated_version} and scheduled to be removed in " - f"{version_removed}. {message}" - ).rstrip() if hasattr(LOG, "deprecated"): LOG.deprecated(deprecate_msg) # type: ignore else: diff --git a/cloudinit/version.py b/cloudinit/version.py index a7a1d552864..2a789e24abe 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "23.3.3" +__VERSION__ = "23.4" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ diff --git a/debian/changelog b/debian/changelog index 0cd07b320be..5c8a7c7f2fb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,10 +1,35 @@ -cloud-init (23.3-0ubuntu0~23.04.1) UNRELEASED; urgency=medium +cloud-init (23.4-0ubuntu1~23.04.1) lunar; urgency=medium * d/p/status-do-not-remove-duplicated-data.patch: - Revert behavior downstream, leave duplicate data + * d/control: add python3-apt as Recommends to read APT config from apt_pkg + * Upstream snapshot based on 23.4. (LP: #2045582). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/23.4/ChangeLog + + -- Alberto Contreras Tue, 05 Dec 2023 12:34:11 +0100 + +cloud-init (23.3.3-0ubuntu0~23.04.1) lunar; urgency=medium + + * Upstream snapshot based on 23.3.3. (LP: #2040291). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/23.3.3/ChangeLog + + -- James Falcon Tue, 24 Oct 2023 10:47:46 -0500 + +cloud-init (23.3.2-0ubuntu0~23.04.1) lunar; urgency=medium + * d/p/do-not-block-user-login.patch: - Revert behavior, allow user login after cloud-init stage (LP: #2039505) - * d/control: add python3-apt as Recommends to read APT config from apt_pkg + * Upstream snapshot based on 23.3.2. (LP: #2039453). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/23.3.2/ChangeLog + + -- Brett Holman Mon, 17 Oct 2023 15:54:22 -0600 + +cloud-init (23.3.1-0ubuntu1~23.04.1) lunar; urgency=medium + + * Upstream snapshot based on upstream/main at ee9078a7. * d/cloud-init.maintscript: Remove the unused hook-network-manager conffile. (LP: #2027861) * d/patches/retain-old-groups.patch: @@ -13,12 +38,11 @@ cloud-init (23.3-0ubuntu0~23.04.1) UNRELEASED; urgency=medium * d/cloud-init.templates: enable Akamai by default. Add Akamai to the default templates to allow datasource discovery. * d/po/templates.pot: refresh with debconf-updatepo - * Upstream snapshot based on 23.3. (LP: #2033310). + * Upstream snapshot based on 23.3.1. (LP: #2033310). List of changes from upstream can be found at - https://raw.githubusercontent.com/canonical/cloud-init/23.3/ChangeLog - * Upstream snapshot based on upstream/main at 0b90fbf5. + https://raw.githubusercontent.com/canonical/cloud-init/23.3.1/ChangeLog - -- James Falcon Thu, 16 Nov 2023 17:56:16 -0600 + -- James Falcon Wed, 06 Sep 2023 11:28:36 -0500 cloud-init (23.2.2-0ubuntu0~23.04.1) lunar; urgency=medium diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt index 241fbf9b460..c152046a488 100644 --- a/doc/examples/cloud-config-chef-oneiric.txt +++ b/doc/examples/cloud-config-chef-oneiric.txt @@ -1,6 +1,6 @@ #cloud-config # -# This is an example file to automatically install chef-client and run a +# This is an example file to automatically install chef-client and run a # list of recipes when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. @@ -8,12 +8,12 @@ # This example assumes the instance is 11.10 (oneiric) -# The default is to install from packages. +# The default is to install from packages. # Key from http://apt.opscode.com/packages@opscode.com.gpg.key apt: sources: - source1: + source1: source: "deb http://apt.opscode.com/ $RELEASE-0.10 main" key: | -----BEGIN PGP PUBLIC KEY BLOCK----- @@ -85,8 +85,3 @@ chef: prefork: maxclients: 100 keepalive: "off" - - -# Capture all subprocess output into a logfile -# Useful for troubleshooting cloud-init issues -output: {all: '| tee -a /var/log/cloud-init-output.log'} diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index 9bb3c150901..b4e22c2fbfb 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -102,7 +102,3 @@ chef: # If encrypted data bags are used, the client needs to have a secrets file # configured to decrypt them encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" - -# Capture all subprocess output into a logfile -# Useful for troubleshooting cloud-init issues -output: {all: '| tee -a /var/log/cloud-init-output.log'} diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 5806cee3206..f1a76bec624 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -184,14 +184,14 @@ ssh_import_id: [smoser] # # Default: none # -debconf_selections: - # Force debconf priority to critical. - set1: debconf debconf/priority select critical - - # Override default frontend to readline, but allow user to select. - set2: | - debconf debconf/frontend select readline - debconf debconf/frontend seen false +apt: + debconf_selections: + # Force debconf priority to critical. + set1: debconf debconf/priority select critical + # Override default frontend to readline, but allow user to select. + set2: | + debconf debconf/frontend select readline + debconf debconf/frontend seen false # manage byobu defaults # byobu_by_default: @@ -330,25 +330,6 @@ resize_rootfs: True # want jinja, you have to start the line with '## template:jinja\n' final_message: "The system is finally up, after $UPTIME seconds" -# configure where output will go -# 'output' entry is a dict with 'init', 'config', 'final' or 'all' -# entries. Each one defines where -# cloud-init, cloud-config, cloud-config-final or all output will go -# each entry in the dict can be a string, list or dict. -# if it is a string, it refers to stdout and stderr -# if it is a list, entry 0 is stdout, entry 1 is stderr -# if it is a dict, it is expected to have 'output' and 'error' fields -# default is to write to console only -# the special entry "&1" for an error means "same location as stdout" -# (Note, that '&1' has meaning in yaml, so it must be quoted) -output: - init: "> /var/log/my-cloud-init.log" - config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ] - final: - output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout" - error: "&1" - - # phone_home: if this dictionary is present, then the phone_home # cloud-config module will post specified data back to the given # url @@ -366,26 +347,6 @@ phone_home: # the value of 'timezone' must exist in /usr/share/zoneinfo timezone: US/Eastern -# def_log_file and syslog_fix_perms work together -# if -# - logging is set to go to a log file 'L' both with and without syslog -# - and 'L' does not exist -# - and syslog is configured to write to 'L' -# then 'L' will be initially created with root:root ownership (during -# cloud-init), and then at cloud-config time (when syslog is available) -# the syslog daemon will be unable to write to the file. -# -# to remedy this situation, 'def_log_file' can be set to a filename -# and syslog_fix_perms to a string containing ":" -# if syslog_fix_perms is a list, it will iterate through and use the -# first pair that does not raise error. -# -# the default values are '/var/log/cloud-init.log' and 'syslog:adm' -# the value of 'def_log_file' should match what is configured in logging -# if either is empty, then no change of ownership will be done -def_log_file: /var/log/my-logging-file.log -syslog_fix_perms: syslog:root - # you can set passwords for a user or multiple users # this is off by default. # to set the default user's password, use the 'password' option. @@ -435,16 +396,6 @@ password: passw0rd chpasswd: { expire: False } ssh_pwauth: True -# manual cache clean. -# By default, the link from /var/lib/cloud/instance to -# the specific instance in /var/lib/cloud/instances/ is removed on every -# boot. The cloud-init code then searches for a DataSource on every boot -# if your DataSource will not be present on every boot, then you can set -# this option to 'True', and maintain (remove) that link before the image -# will be booted as a new instance. -# default is False -manual_cache_clean: False - ## configure interaction with ssh server # ssh_svcname: ssh # set the name of the option to 'service restart' diff --git a/doc/rtd/reference/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst index 953fc2188e1..85a474f5e53 100644 --- a/doc/rtd/reference/base_config_reference.rst +++ b/doc/rtd/reference/base_config_reference.rst @@ -258,6 +258,13 @@ Format is a dict with ``enabled`` and ``prefix`` keys: ``vendor_data``. * ``prefix``: A path to prepend to any ``vendor_data``-provided script. +``manual_cache_clean`` +^^^^^^^^^^^^^^^^^^^^^^ + +By default, cloud-init searches for a datasource on every boot. Setting +this to ``true`` will disable this behaviour. This is useful if your datasource +information will not be present every boot. Default: ``false``. + Example ======= @@ -400,6 +407,32 @@ On an Ubuntu system, :file:`/etc/cloud/cloud.cfg` should look similar to: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh + # configure where output will go + output: + init: "> /var/log/my-cloud-init.log" + config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ] + final: + output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout" + error: "&1" + + # Set `true` to enable the stop searching for a datasource on boot. + manual_cache_clean: False + + # def_log_file and syslog_fix_perms work together + # if + # - logging is set to go to a log file 'L' both with and without syslog + # - and 'L' does not exist + # - and syslog is configured to write to 'L' + # then 'L' will be initially created with root:root ownership (during + # cloud-init), and then at cloud-config time (when syslog is available) + # the syslog daemon will be unable to write to the file. + # + # to remedy this situation, 'def_log_file' can be set to a filename + # and syslog_fix_perms to a string containing ":" + def_log_file: /var/log/my-logging-file.log + syslog_fix_perms: syslog:root + + .. _configuration is templated: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl .. _cc_final_message.py: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/cc_final_message.py diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index ec92aeb6254..631285955d7 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -54,6 +54,9 @@ CURRENT_RELEASE < FOCAL, reason="Tested on Focal and above" ) @pytest.mark.lxd_use_exec +@pytest.mark.skip( + reason="Network online race. GH: #4350, GH: #4451, LP: #2036968" +) class TestInterfaceListingWithOpenvSwitch: def test_ovs_member_interfaces_not_excluded(self, client): # We need to install openvswitch for our provided network configuration diff --git a/tests/integration_tests/datasources/test_none.py b/tests/integration_tests/datasources/test_none.py new file mode 100644 index 00000000000..6d7216e3dca --- /dev/null +++ b/tests/integration_tests/datasources/test_none.py @@ -0,0 +1,67 @@ +"""DataSourceNone integration tests on LXD.""" +import json + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_clean_log + +DS_NONE_BASE_CFG = """\ +datasource_list: [None] +datasource: + None: + metadata: + instance-id: my-iid-uuid + userdata_raw: | + #cloud-config + runcmd: + - touch /var/tmp/success-with-datasource-none +""" + + +def test_datasource_none_discovery(client: IntegrationInstance): + """Integration test for #4635. + + Test that DataSourceNone detection (used by live installers) doesn't + generate errors or warnings. + """ + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + # Limit datasource detection to DataSourceNone. + client.write_to_file( + "/etc/cloud/cloud.cfg.d/99-force-dsnone.cfg", DS_NONE_BASE_CFG + ) + if client.settings.PLATFORM in ["lxd_container"]: + # DataSourceNone provides no network_config. + # To avoid changing network config from platform desired net cfg + # to fallback config, copy out the rendered network config + # to /etc/cloud/cloud.cfg.d/99-orig-net.cfg so it is + # setup by the DataSourceNone case as well. + # Otherwise (LXD specifically) we'll have network torn down due + # to virtual NICs present which results in not network being + # brought up when we emit fallback config which attempts to + # match on PermanentMACAddress. LP:#2022947 + client.execute( + "cp /etc/netplan/50-cloud-init.yaml" + " /etc/cloud/cloud.cfg.d/99-orig-net.cfg" + ) + client.execute("cloud-init clean --logs") + client.restart() + status = json.loads(client.execute("cloud-init status --format=json")) + assert [] == status["errors"] + expected_warnings = [ + "Used fallback datasource", + "Falling back to a hard restart of systemd-networkd.service", + ] + unexpected_warnings = [] + for current_warning in status["recoverable_errors"].get("WARNING", []): + if [w for w in expected_warnings if w in current_warning]: + # Found a matching expected_warning substring in current_warning + continue + unexpected_warnings.append(current_warning) + + if unexpected_warnings: + raise AssertionError( + f"Unexpected recoverable errors: {list(unexpected_warnings)}" + ) + log = client.read_from_file("/var/log/cloud-init.log") + verify_clean_log(log) + assert client.execute("test -f /var/tmp/success-with-datasource-none").ok diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py index aea29b7dede..ab7139c3485 100644 --- a/tests/integration_tests/modules/test_ansible.py +++ b/tests/integration_tests/modules/test_ansible.py @@ -312,6 +312,7 @@ def test_ansible_pull_distro(client): CURRENT_RELEASE < FOCAL, reason="Pip install is not supported for Ansible on release", ) +@pytest.mark.skip(reason="Need proxy support first. GH: #4527") def test_ansible_controller(client): log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 72385971043..609d5ae0692 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -308,7 +308,6 @@ def _check_common_metadata(self, data): assert data["sys_info"]["dist"][0] == CURRENT_RELEASE.os v1_data = data["v1"] - assert re.match(r"\d\.\d+\.\d+-\d+", v1_data["kernel_release"]) assert v1_data["variant"] == CURRENT_RELEASE.os assert v1_data["distro"] == CURRENT_RELEASE.os assert v1_data["distro_release"] == CURRENT_RELEASE.series diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 69d8416e535..98e6ef7815a 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -103,9 +103,16 @@ def test_snap_packages_are_installed(self, class_client): @pytest.mark.skipif(not IS_UBUNTU, reason="Uses Apt") def test_versioned_packages_are_installed(session_cloud: IntegrationCloud): - pkg_version = HELLO_VERSIONS_BY_RELEASE[CURRENT_RELEASE.series] + pkg_version = HELLO_VERSIONS_BY_RELEASE.get( + CURRENT_RELEASE.series, "2.10-3" + ) with session_cloud.launch( user_data=VERSIONED_USER_DATA.format(pkg_version=pkg_version) ) as client: verify_clean_log(client.read_from_file("/var/log/cloud-init.log")) - assert f"hello {pkg_version}" == client.execute("dpkg-query -W hello") + assert f"hello {pkg_version}" == client.execute( + "dpkg-query -W hello" + ), ( + "If this is failing for a new release, add it to " + "HELLO_VERSIONS_BY_RELEASE" + ) diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 83b87065923..38efb772b1b 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -3,7 +3,6 @@ import os import pytest -import yaml from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.conftest import get_validated_source @@ -138,18 +137,7 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): assert post_json["v1"]["datasource"].startswith( "DataSourceAzure" ) - if PLATFORM in ["gce", "qemu"]: - # GCE regenerates network config per boot AND - # GCE uses fallback config AND - # #4474 changed fallback configuration. - # Once the baseline includes #4474, this can be removed - pre_network = yaml.load(pre_network, Loader=yaml.Loader) - post_network = yaml.load(post_network, Loader=yaml.Loader) - for values in post_network["network"]["ethernets"].values(): - values.pop("dhcp6") - assert yaml.dump(pre_network) == yaml.dump(post_network) - else: - assert pre_network == post_network + assert pre_network == post_network # Calculate and log all the boot numbers pre_analyze_totals = [ diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index e50bfd472b2..0a15203cbdd 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -55,7 +55,7 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): "Found unexpected errors: %s" % "\n".join(error_logs) ) - warning_count = log.count("WARN") + warning_count = log.count("[WARNING]") expected_warnings = 0 traceback_count = log.count("Traceback") expected_tracebacks = 0 @@ -75,6 +75,11 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True): warning_texts.append( "canonical-livepatch returned error when checking status" ) + if "found network data from DataSourceNone" in log: + warning_texts.append("Used fallback datasource") + warning_texts.append( + "Falling back to a hard restart of systemd-networkd.service" + ) if "oracle" in log: # LP: #1842752 lease_exists_text = "Stderr: RTNETLINK answers: File exists" diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py index d95bbc70b64..0a5efe7fc20 100644 --- a/tests/unittests/cmd/test_query.py +++ b/tests/unittests/cmd/test_query.py @@ -33,7 +33,6 @@ def setup_mocks(mocker): mocker.patch("cloudinit.cmd.query.read_cfg_paths", return_value=Paths({})) -@mock.patch(M_PATH + "addLogHandlerCLI", lambda *args: "") class TestQuery: Args = namedtuple( "Args", @@ -84,10 +83,7 @@ def test_handle_args_error_on_missing_param(self, caplog, capsys): vendor_data=None, varname=None, ) - with mock.patch( - M_PATH + "addLogHandlerCLI", return_value="" - ) as m_cli_log: - assert 1 == query.handle_args("anyname", args) + assert 1 == query.handle_args("anyname", args) expected_error = ( "Expected one of the options: --all, --format, --list-keys" " or varname\n" @@ -95,7 +91,6 @@ def test_handle_args_error_on_missing_param(self, caplog, capsys): assert expected_error in caplog.text out, _err = capsys.readouterr() assert "usage: query" in out - assert 1 == m_cli_log.call_count @pytest.mark.parametrize( "inst_data,varname,expected_error", @@ -131,10 +126,9 @@ def test_handle_args_error_on_invalid_varname_paths( paths, _, _, _ = self._setup_paths(tmpdir) with mock.patch(M_PATH + "read_cfg_paths") as m_paths: m_paths.return_value = paths - with mock.patch(M_PATH + "addLogHandlerCLI", return_value=""): - with mock.patch(M_PATH + "load_userdata") as m_lud: - m_lud.return_value = "ud" - assert 1 == query.handle_args("anyname", args) + with mock.patch(M_PATH + "load_userdata") as m_lud: + m_lud.return_value = "ud" + assert 1 == query.handle_args("anyname", args) assert expected_error in caplog.text def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py index af320d24a47..65088dd51d4 100644 --- a/tests/unittests/config/test_cc_snap.py +++ b/tests/unittests/config/test_cc_snap.py @@ -165,7 +165,6 @@ def test_add_assertions_adds_assertions_as_dict( class TestRunCommands(CiTestCase): - with_logs = True allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] @@ -266,13 +265,23 @@ class TestSnapSchema: ({"snap": {"commands": {"01": "also valid"}}}, None), ({"snap": {"assertions": ["valid"]}}, None), ({"snap": {"assertions": {"01": "also valid"}}}, None), - ({"commands": [["echo", "bye"], ["echo", "bye"]]}, None), - ({"commands": ["echo bye", "echo bye"]}, None), + ({"snap": {"commands": [["echo", "bye"], ["echo", "bye"]]}}, None), + ({"snap": {"commands": ["echo bye", "echo bye"]}}, None), + ( + { + "snap": { + "commands": { + "00": ["echo", "bye"], + "01": ["echo", "bye"], + } + } + }, + None, + ), ( - {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}}, + {"snap": {"commands": {"00": "echo bye", "01": "echo bye"}}}, None, ), - ({"commands": {"00": "echo bye", "01": "echo bye"}}, None), # Invalid ({"snap": "wrong type"}, "'wrong type' is not of type 'object'"), ( diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index b658d26b0a2..28f0b39d090 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -1,6 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. - import importlib import inspect import itertools @@ -12,6 +11,7 @@ import unittest from collections import namedtuple from copy import deepcopy +from errno import EACCES from pathlib import Path from textwrap import dedent from types import ModuleType @@ -39,6 +39,7 @@ from cloudinit.distros import OSFAMILIES from cloudinit.safeyaml import load, load_with_marks from cloudinit.settings import FREQUENCIES +from cloudinit.sources import DataSourceNotFoundException from cloudinit.util import load_file, write_file from tests.hypothesis import given from tests.hypothesis_jsonschema import from_schema @@ -212,7 +213,13 @@ def test_get_schema_coalesces_known_schema(self): [meta["id"] for meta in get_metas().values() if meta is not None] ) assert "http://json-schema.org/draft-04/schema#" == schema["$schema"] - assert ["$defs", "$schema", "allOf"] == sorted(list(schema.keys())) + assert [ + "$defs", + "$schema", + "additionalProperties", + "allOf", + "properties", + ] == sorted(list(schema.keys())) # New style schema should be defined in static schema file in $defs expected_subschema_defs = [ {"$ref": "#/$defs/base_config"}, @@ -271,6 +278,7 @@ def test_get_schema_coalesces_known_schema(self): {"$ref": "#/$defs/cc_yum_add_repo"}, {"$ref": "#/$defs/cc_zypper_add_repo"}, {"$ref": "#/$defs/reporting_config"}, + {"$ref": "#/$defs/output_config"}, ] found_subschema_defs = [] legacy_schema_keys = [] @@ -1626,6 +1634,29 @@ def test_annotated_cloudconfig_file_annotates_separate_line_items(self): schema_errors=schema_errors, ) + @skipUnlessJsonSchema() + def test_annotated_invalid_top_level_key(self, tmp_path: Path, capsys): + expected_err = dedent( + """\ + #cloud-config + invalid_key: value # E1 + + # Errors: ------------- + # E1: Additional properties are not allowed ('invalid_key' was unexpected) + """ # noqa: E501 + ) + config_file = tmp_path / "my.yaml" + config_file.write_text("#cloud-config\ninvalid_key: value\n") + with pytest.raises( + SchemaValidationError, + match="errors: invalid_key: Additional properties are not allowed", + ): + validate_cloudconfig_file( + str(config_file), get_schema(), annotate=True + ) + out, _err = capsys.readouterr() + assert out.strip() == expected_err.strip() + @mock.patch(M_PATH + "read_cfg_paths") # called by parse_args help docs class TestMain: @@ -1784,7 +1815,7 @@ def test_main_validates_system_userdata_vendordata_and_network_config( vd_file = paths.get_ipath_cur("vendor_cloud_config") write_file(vd_file, b"#cloud-config\nssh_import_id: [me]") vd2_file = paths.get_ipath_cur("vendor2_cloud_config") - write_file(vd2_file, b"#cloud-config\nssh_pw_auth: true") + write_file(vd2_file, b"#cloud-config\nssh_pwauth: true") network_file = paths.get_ipath_cur("network_config") write_file(network_file, net_config) myargs = ["mycmd", "--system"] @@ -1832,9 +1863,12 @@ def test_main_system_userdata_requires_root( assert expected == err -def _get_meta_doc_examples( - file_glob="cloud-config*.txt", exclusion_match=r"^cloud-config-archive.*" -): +def _get_meta_doc_examples(file_glob="cloud-config*.txt"): + exlusion_patterns = [ + "^cloud-config-archive.*", + "cloud-config-datasources.txt", + ] + exclusion_match = f"({'|'.join(exlusion_patterns)})" examples_dir = Path(cloud_init_project_dir("doc/examples")) assert examples_dir.is_dir() return ( @@ -2106,6 +2140,8 @@ def clean_schema( remove_modules(schema, set(modules)) if defs: remove_defs(schema, set(defs)) + del schema["properties"] + del schema["additionalProperties"] return schema @@ -2119,7 +2155,12 @@ class TestSchemaFuzz: @skipUnlessHypothesisJsonSchema() @given(from_schema(SCHEMA)) - def test_validate_full_schema(self, config): + def test_validate_full_schema(self, orig_config): + config = deepcopy(orig_config) + valid_props = get_schema()["properties"].keys() + for key in orig_config.keys(): + if key not in valid_props: + del config[key] try: validate_cloudconfig_schema(config, strict=True) except SchemaValidationError as ex: @@ -2128,11 +2169,60 @@ def test_validate_full_schema(self, config): class TestHandleSchemaArgs: - Args = namedtuple( "Args", "config_file schema_type docs system annotate instance_data" ) + @pytest.mark.parametrize( + "failure, expected_logs", + ( + ( + IOError("No permissions on /var/lib/cloud/instance"), + ["Using default instance-data/user-data paths for non-root"], + ), + ( + DataSourceNotFoundException("No cached datasource found yet"), + ["datasource not detected"], + ), + ), + ) + @mock.patch(M_PATH + "read_cfg_paths") + def test_handle_schema_unable_to_read_cfg_paths( + self, + read_cfg_paths, + failure, + expected_logs, + paths, + capsys, + caplog, + tmpdir, + ): + if isinstance(failure, IOError): + failure.errno = EACCES + read_cfg_paths.side_effect = [failure, paths] + user_data_fn = tmpdir.join("user-data") + with open(user_data_fn, "w") as f: + f.write( + dedent( + """\ + #cloud-config + packages: [sl] + """ + ) + ) + args = self.Args( + config_file=str(user_data_fn), + schema_type="cloud-config", + annotate=False, + docs=None, + system=None, + instance_data=None, + ) + handle_schema_args("unused", args) + assert "Valid schema" in capsys.readouterr().out + for expected_log in expected_logs: + assert expected_log in caplog.text + @pytest.mark.parametrize( "annotate, expected_output", [ diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py index 6747867aa24..91dbd06f16c 100644 --- a/tests/unittests/conftest.py +++ b/tests/unittests/conftest.py @@ -78,6 +78,13 @@ def side_effect(args, *other_args, **kwargs): log.configure_root_logger() + +@pytest.fixture(autouse=True) +def disable_root_logger_setup(request): + with mock.patch("cloudinit.cmd.main.configure_root_logger", autospec=True): + yield + + PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split("."))) if PYTEST_VERSION_TUPLE < (3, 9, 0): diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 36d184262d1..96f407f2c9d 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -21,7 +21,7 @@ import responses import cloudinit -from cloudinit import cloud, distros +from cloudinit import atomic_helper, cloud, distros from cloudinit import helpers as ch from cloudinit import subp, util from cloudinit.config.schema import ( @@ -293,6 +293,9 @@ def patchUtils(self, new_root): ("sym_link", -1), ("copy", -1), ], + atomic_helper: [ + ("write_json", 1), + ], } for (mod, funcs) in patch_funcs.items(): for (f, am) in funcs: diff --git a/tests/unittests/net/test_ephemeral.py b/tests/unittests/net/test_ephemeral.py index 99fbcb0bf6b..77ac95c912f 100644 --- a/tests/unittests/net/test_ephemeral.py +++ b/tests/unittests/net/test_ephemeral.py @@ -5,6 +5,8 @@ import pytest from cloudinit.net.ephemeral import EphemeralIPNetwork +from cloudinit.subp import ProcessExecutionError +from tests.unittests.helpers import does_not_raise from tests.unittests.util import MockDistro M_PATH = "cloudinit.net.ephemeral." @@ -51,3 +53,52 @@ def test_stack_order( expected_call_args_list == m_exit_stack.return_value.enter_context.call_args_list ) + + @pytest.mark.parametrize( + "m_v4, m_v6, m_context, m_side_effects", + [ + pytest.param( + False, True, does_not_raise(), [None, None], id="v6_only" + ), + pytest.param( + True, False, does_not_raise(), [None, None], id="v4_only" + ), + pytest.param( + True, + True, + does_not_raise(), + [ProcessExecutionError, None], + id="v4_error", + ), + pytest.param( + True, + True, + does_not_raise(), + [None, ProcessExecutionError], + id="v6_error", + ), + pytest.param( + True, + True, + pytest.raises(ProcessExecutionError), + [ + ProcessExecutionError, + ProcessExecutionError, + ], + id="v4_v6_error", + ), + ], + ) + def test_interface_init_failures( + self, m_v4, m_v6, m_context, m_side_effects, mocker + ): + mocker.patch( + "cloudinit.net.ephemeral.EphemeralDHCPv4" + ).return_value.__enter__.side_effect = m_side_effects[0] + mocker.patch( + "cloudinit.net.ephemeral.EphemeralIPv6Network" + ).return_value.__enter__.side_effect = m_side_effects[1] + distro = MockDistro() + with m_context: + with EphemeralIPNetwork(distro, "eth0", ipv4=m_v4, ipv6=m_v6): + pass diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py index d9c6495ce5e..0a2b142d68b 100644 --- a/tests/unittests/runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -3,9 +3,10 @@ import copy import os -from cloudinit import safeyaml, stages, util +from cloudinit import atomic_helper, safeyaml, stages, util from cloudinit.config.modules import Modules from cloudinit.settings import PER_INSTANCE +from cloudinit.sources import NetworkConfigSource from tests.unittests import helpers @@ -47,6 +48,15 @@ def setUp(self): def test_none_ds_populates_var_lib_cloud(self): """Init and run_section default behavior creates appropriate dirs.""" # Now start verifying whats created + netcfg = { + "version": 1, + "config": [{"type": "physical", "name": "eth9"}], + } + + def fake_network_config(): + return netcfg, NetworkConfigSource.FALLBACK + + self.assertFalse(os.path.exists("/var/lib/cloud")) initer = stages.Init() initer.read_cfg() initer.initialize() @@ -55,10 +65,20 @@ def test_none_ds_populates_var_lib_cloud(self): self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) initer.fetch() + self.assertFalse(os.path.islink("var/lib/cloud/instance")) iid = initer.instancify() self.assertEqual(iid, "iid-datasource-none") initer.update() self.assertTrue(os.path.islink("var/lib/cloud/instance")) + initer._find_networking_config = fake_network_config + self.assertFalse( + os.path.exists("/var/lib/cloud/instance/network-config.json") + ) + initer.apply_network_config(False) + self.assertEqual( + f"{atomic_helper.json_dumps(netcfg)}\n", + util.load_file("/var/lib/cloud/instance/network-config.json"), + ) def test_none_ds_runs_modules_which_do_not_define_distros(self): """Any modules which do not define a distros attribute are run.""" diff --git a/tests/unittests/sources/azure/test_errors.py b/tests/unittests/sources/azure/test_errors.py index da794e32bbf..f310be72e32 100644 --- a/tests/unittests/sources/azure/test_errors.py +++ b/tests/unittests/sources/azure/test_errors.py @@ -225,3 +225,13 @@ def test_unhandled_exception(): quoted_value = quote_csv_value(f"exception={source_error!r}") assert f"|{quoted_value}|" in error.as_encoded_report() + + +def test_imds_invalid_metadata(): + key = "compute" + value = "Running" + error = errors.ReportableErrorImdsInvalidMetadata(key=key, value=value) + + assert error.reason == "invalid IMDS metadata for key=compute" + assert error.supporting_data["key"] == key + assert error.supporting_data["value"] == repr(value) diff --git a/tests/unittests/sources/azure/test_imds.py b/tests/unittests/sources/azure/test_imds.py index cb87a39dc6a..1fe44b483d0 100644 --- a/tests/unittests/sources/azure/test_imds.py +++ b/tests/unittests/sources/azure/test_imds.py @@ -437,10 +437,12 @@ def test_will_retry_errors_on_fallback( @pytest.mark.parametrize( "error_count,retry_deadline", [(1, 0.0), (2, 1.0), (301, 300.0)] ) + @pytest.mark.parametrize("max_connection_errors", [None, 1, 11]) def test_retry_until_failure( self, error, error_count, + max_connection_errors, retry_deadline, caplog, mock_requests, @@ -452,25 +454,25 @@ def test_retry_until_failure( with pytest.raises(UrlError) as exc_info: imds.fetch_metadata_with_api_fallback( - retry_deadline=retry_deadline + max_connection_errors=max_connection_errors, + retry_deadline=retry_deadline, ) error_regex = regex_for_http_error(error) assert re.search(error_regex, str(exc_info.value.cause)) - # Connection errors max out at 11 attempts. - if error == REQUESTS_CONNECTION_ERROR and error_count > 11: - error_count = ( - 11 - if error == REQUESTS_CONNECTION_ERROR and error_count > 11 - else error_count - ) + max_attempts = ( + min(max_connection_errors, int(retry_deadline) + 1) + if isinstance(error, requests.ConnectionError) + and isinstance(max_connection_errors, int) + else error_count + ) - # mock_requests will assert since not all calls were made. + if max_attempts < error_count: mock_requests.assert_all_requests_are_fired = False assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)] * ( - error_count - 1 + max_attempts - 1 ) logs = [x for x in caplog.record_tuples if x[0] == LOG_PATH] @@ -483,7 +485,7 @@ def test_retry_until_failure( f"{error_regex}" ), ) - for i in range(1, error_count + 1) + for i in range(1, max_attempts + 1) ] + [ ( LOG_PATH, diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py index e1d5dd27d9e..3d65462cbb1 100644 --- a/tests/unittests/sources/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -179,6 +179,53 @@ def test_with_mock_server(self, m_is_aliyun, m_resolv): "metadata (http://100.100.100.200)", self.ds.subplatform ) + @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network") + @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") + @mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable") + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") + @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") + def test_aliyun_local_with_mock_server( + self, + m_is_bsd, + m_dhcp, + m_fallback_nic, + m_is_aliyun, + m_resolva, + m_net4, + m_net6, + ): + m_is_aliyun.return_value = True + m_fallback_nic.return_value = "eth9" + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "broadcast-address": "192.168.2.255", + } + ] + m_is_bsd.return_value = False + cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}} + distro = mock.MagicMock() + paths = helpers.Paths({"run_dir": self.tmp_dir()}) + self.ds = ay.DataSourceAliYunLocal(cfg, distro, paths) + self.regist_default_server() + ret = self.ds.get_data() + self.assertEqual(True, ret) + self.assertEqual(1, m_is_aliyun.call_count) + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + self.assertEqual("aliyun", self.ds.cloud_name) + self.assertEqual("ec2", self.ds.platform) + self.assertEqual( + "metadata (http://100.100.100.200)", self.ds.subplatform + ) + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): """If is_aliyun returns false, then get_data should return False.""" diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index c2acca5f86f..2a477f80239 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy +import datetime import json import os import stat @@ -273,6 +274,14 @@ def mock_subp_subp(): yield m +@pytest.fixture +def mock_timestamp(): + timestamp = datetime.datetime.utcnow() + with mock.patch.object(errors, "datetime", autospec=True) as m: + m.utcnow.return_value = timestamp + yield timestamp + + @pytest.fixture def mock_util_ensure_dir(): with mock.patch( @@ -3629,6 +3638,7 @@ def provisioning_setup( mock_netlink, mock_readurl, mock_subp_subp, + mock_timestamp, mock_util_ensure_dir, mock_util_find_devs_with, mock_util_load_file, @@ -3657,6 +3667,7 @@ def provisioning_setup( self.mock_netlink = mock_netlink self.mock_readurl = mock_readurl self.mock_subp_subp = mock_subp_subp + self.mock_timestmp = mock_timestamp self.mock_util_ensure_dir = mock_util_ensure_dir self.mock_util_find_devs_with = mock_util_find_devs_with self.mock_util_load_file = mock_util_load_file @@ -3760,13 +3771,78 @@ def test_no_pps(self): assert len(self.mock_kvp_report_failure_to_host.mock_calls) == 0 assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 + @pytest.mark.parametrize("pps_type", ["Savable", "Running"]) + def test_stale_pps(self, pps_type): + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = pps_type + + nl_sock = mock.MagicMock() + self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock + self.mock_readurl.side_effect = [ + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), + mock.MagicMock(contents=construct_ovf_env().encode()), + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), + ] + self.mock_azure_get_metadata_from_fabric.return_value = [] + + self.azure_ds._check_and_get_data() + + assert self.mock_readurl.mock_calls == [ + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers={"Metadata": "true"}, + infinite=True, + log_req_resp=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/reprovisiondata?" + "api-version=2019-06-01", + exception_cb=mock.ANY, + headers={"Metadata": "true"}, + log_req_resp=False, + infinite=True, + timeout=30, + ), + mock.call( + "http://169.254.169.254/metadata/instance?" + "api-version=2021-08-01&extended=true", + exception_cb=mock.ANY, + headers={"Metadata": "true"}, + infinite=True, + log_req_resp=True, + timeout=30, + ), + ] + + # Verify DMI usage. + assert self.mock_dmi_read_dmi_data.mock_calls == [ + mock.call("chassis-asset-tag"), + mock.call("system-uuid"), + mock.call("system-uuid"), + ] + + # Verify reports via KVP. + assert len(self.mock_kvp_report_success_to_host.mock_calls) == 1 + + assert self.mock_kvp_report_failure_to_host.mock_calls == [ + mock.call( + errors.ReportableErrorImdsInvalidMetadata( + key="extended.compute.ppsType", value=pps_type + ), + ), + ] + def test_running_pps(self): - self.imds_md["extended"]["compute"]["ppsType"] = "Running" + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = "Running" nl_sock = mock.MagicMock() self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock self.mock_readurl.side_effect = [ - mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), mock.MagicMock(contents=construct_ovf_env().encode()), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -3869,7 +3945,8 @@ def test_running_pps(self): assert len(self.mock_kvp_report_success_to_host.mock_calls) == 2 def test_savable_pps(self): - self.imds_md["extended"]["compute"]["ppsType"] = "Savable" + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = "Savable" nl_sock = mock.MagicMock() self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock @@ -3878,7 +3955,7 @@ def test_savable_pps(self): "ethAttached1" ) self.mock_readurl.side_effect = [ - mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), mock.MagicMock(contents=construct_ovf_env().encode()), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -4007,7 +4084,8 @@ def test_savable_pps(self): ], ) def test_savable_pps_early_unplug(self, fabric_side_effect): - self.imds_md["extended"]["compute"]["ppsType"] = "Savable" + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = "Savable" nl_sock = mock.MagicMock() self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock @@ -4016,7 +4094,7 @@ def test_savable_pps_early_unplug(self, fabric_side_effect): "ethAttached1" ) self.mock_readurl.side_effect = [ - mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), mock.MagicMock(contents=construct_ovf_env().encode()), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -4136,10 +4214,11 @@ def test_savable_pps_early_unplug(self, fabric_side_effect): @pytest.mark.parametrize("pps_type", ["Savable", "Running", "None"]) def test_recovery_pps(self, pps_type): self.patched_reported_ready_marker_path.write_text("") - self.imds_md["extended"]["compute"]["ppsType"] = pps_type + imds_md_source = copy.deepcopy(self.imds_md) + imds_md_source["extended"]["compute"]["ppsType"] = pps_type self.mock_readurl.side_effect = [ - mock.MagicMock(contents=json.dumps(self.imds_md).encode()), + mock.MagicMock(contents=json.dumps(imds_md_source).encode()), mock.MagicMock(contents=construct_ovf_env().encode()), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -4401,13 +4480,17 @@ def test_errors( azure_ds._route_configured_for_imds = route_configured_for_imds mock_imds_fetch_metadata_with_api_fallback.side_effect = exception mock_time.return_value = 0.0 + max_connection_errors = None if route_configured_for_imds else 11 assert ( azure_ds.get_metadata_from_imds(report_failure=report_failure) == {} ) assert mock_imds_fetch_metadata_with_api_fallback.mock_calls == [ - mock.call(retry_deadline=mock.ANY) + mock.call( + max_connection_errors=max_connection_errors, + retry_deadline=mock.ANY, + ) ] expected_duration = 300 @@ -4425,11 +4508,11 @@ def test_errors( mock.call(reported_error) ] - if report_failure and ( - route_configured_for_imds - or not isinstance(exception, url_helper.UrlError) - or not isinstance(exception.cause, requests.ConnectionError) - ): + connection_error = isinstance( + exception, url_helper.UrlError + ) and isinstance(exception.cause, requests.ConnectionError) + report_skipped = not route_configured_for_imds and connection_error + if report_failure and not report_skipped: assert mock_azure_report_failure_to_fabric.mock_calls == [ mock.call(endpoint=mock.ANY, error=reported_error) ] diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py index 48db1de2697..9e3481fc37d 100644 --- a/tests/unittests/sources/test_common.py +++ b/tests/unittests/sources/test_common.py @@ -36,6 +36,7 @@ from tests.unittests import helpers as test_helpers DEFAULT_LOCAL = [ + AliYun.DataSourceAliYunLocal, Azure.DataSourceAzure, CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index a1098228374..8faa3cd2d1a 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -86,3 +86,11 @@ def test_log_deduplication(self, caplog): schedule=6, ) assert 2 == len(caplog.records) + + +def test_logger_prints_to_stderr(capsys): + message = "to stdout" + log.setup_basic_logging() + LOG = logging.getLogger() + LOG.warning(message) + assert message in capsys.readouterr().err diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 83b0f88188f..891031ef02b 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -7,8 +7,15 @@ import re import string +import pytest + from cloudinit import helpers as c_helpers from cloudinit import util +from cloudinit.config.schema import ( + SchemaValidationError, + get_schema, + validate_cloudconfig_schema, +) from cloudinit.handlers import CONTENT_END, CONTENT_START, cloud_config from tests.unittests import helpers @@ -255,3 +262,154 @@ def test_compat_merge_sub_list(self): c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d) + + +class TestMergingSchema: + @pytest.mark.parametrize( + "config, error_msg", + [ + ({"merge_how": "list()+dict()+str()"}, None), + ({"merge_type": "list()+dict()+str()"}, None), + ({"merge_how": []}, "\\[\\] is too short"), + ( + {"merge_how": {"name": "list", "settings": ["append"]}}, + "is not of type", + ), + ( + {"merge_how": [{"name": "list", "settings": "append"}]}, + "'append' is not of type 'array'", + ), + ( + { + "merge_how": [ + { + "settings": ["recurse_list"], + } + ] + }, + "'name' is a required property", + ), + ( + { + "merge_how": [ + { + "name": "list", + } + ] + }, + "'settings' is a required property", + ), + ( + { + "merge_how": [ + { + "name": "str", + "settings": ["recurse_list"], + "badkey": "append", + } + ] + }, + ( + "Additional properties are not allowed " + "\\('badkey' was unexpected\\)" + ), + ), + ( + { + "merge_how": [ + { + "name": "str", + "settings": ["badvalue"], + } + ] + }, + "'badvalue' is not one of", + ), + ( + { + "merge_how": [ + { + "name": "badvalue", + "settings": ["append"], + } + ] + }, + re.escape("'badvalue' is not one of ['list', 'dict', 'str']"), + ), + ( + { + "merge_how": [ + { + "name": "str", + "settings": [ + "append", + "recurse_dict", + "recurse_list", + ], + }, + { + "name": "dict", + "settings": [ + "allow_delete", + "no_replace", + "replace", + "recurse_array", + ], + }, + { + "name": "list", + "settings": [ + "append", + "prepend", + "no_replace", + "replace", + "recurse_str", + ], + }, + ] + }, + None, + ), + ( + { + "merge_type": [ + { + "name": "str", + "settings": [ + "append", + "recurse_dict", + "recurse_list", + ], + }, + { + "name": "dict", + "settings": [ + "allow_delete", + "no_replace", + "replace", + "recurse_array", + ], + }, + { + "name": "list", + "settings": [ + "append", + "prepend", + "no_replace", + "replace", + "recurse_str", + ], + }, + ] + }, + None, + ), + ], + ) + @helpers.skipUnlessJsonSchema() + def test_schema_validation(self, config, error_msg): + if error_msg is None: + validate_cloudconfig_schema(config, get_schema(), strict=True) + else: + with pytest.raises(SchemaValidationError, match=error_msg): + validate_cloudconfig_schema(config, get_schema(), strict=True) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 3e8f721f098..c5509536a2c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -988,6 +988,58 @@ """.lstrip() NETWORK_CONFIGS = { + "small_v1_suse_dhcp6": { + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto""" + ), + }, + "yaml": textwrap.dedent( + """ + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth99 + mac_address: c0:d6:9f:2c:e8:80 + subnets: + - type: dhcp4 + - type: dhcp6 + - type: static + address: 192.168.21.3/24 + dns_nameservers: + - 8.8.8.8 + - 8.8.4.4 + dns_search: barley.maas sach.maas + routes: + - gateway: 65.61.151.37 + netmask: 0.0.0.0 + network: 0.0.0.0 + metric: 10000 + - type: physical + name: eth1 + mac_address: cf:d6:af:48:e8:80 + - type: nameserver + address: + - 1.2.3.4 + - 5.6.7.8 + search: + - wark.maas + """ + ), + }, "small_v1": { "expected_networkd_eth99": textwrap.dedent( """\ @@ -2538,7 +2590,7 @@ ), "ifcfg-eth5": textwrap.dedent( """\ - BOOTPROTO=dhcp + BOOTPROTO=dhcp4 LLADDR=98:bb:9f:2c:e8:8a STARTMODE=manual""" ), @@ -5740,7 +5792,7 @@ def test_config_with_explicit_loopback(self): expected = """\ # Created by cloud-init automatically, do not edit. # -BOOTPROTO=dhcp +BOOTPROTO=dhcp4 STARTMODE=auto """ self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) @@ -5797,6 +5849,12 @@ def test_small_config_v1(self): self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_small_config_v1_suse(self): + entry = NETWORK_CONFIGS["small_v1_suse_dhcp6"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + def test_small_config_v2(self): entry = NETWORK_CONFIGS["small_v1"] found = self._render_and_read(network_config=yaml.load(entry["yaml"])) diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py index 3afee411c13..773060892a8 100644 --- a/tests/unittests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -10,7 +10,7 @@ from cloudinit import sources, stages from cloudinit.event import EventScope, EventType from cloudinit.sources import NetworkConfigSource -from cloudinit.util import write_file +from cloudinit.util import sym_link, write_file from tests.unittests.helpers import mock from tests.unittests.util import TEST_INSTANCE_ID, FakeDataSource @@ -28,7 +28,8 @@ def setup(self, tmpdir): "paths": {"cloud_dir": self.tmpdir, "run_dir": self.tmpdir}, } } - tmpdir.mkdir("instance") + tmpdir.mkdir("instance-uuid") + sym_link(tmpdir.join("instance-uuid"), tmpdir.join("instance")) self.init.datasource = FakeDataSource(paths=self.init.paths) self._real_is_new_instance = self.init.is_new_instance self.init.is_new_instance = mock.Mock(return_value=True) @@ -393,9 +394,12 @@ def fake_network_config(): assert caplog.records[0].levelname == "INFO" assert f"network config is disabled by {disable_file}" in caplog.text + @pytest.mark.parametrize("instance_dir_present", (True, False)) @mock.patch("cloudinit.net.get_interfaces_by_mac") @mock.patch("cloudinit.distros.ubuntu.Distro") - def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): + def test_apply_network_on_new_instance( + self, m_ubuntu, m_macs, instance_dir_present + ): """Call distro apply_network_config methods on is_new_instance.""" net_cfg = { "version": 1, @@ -415,20 +419,26 @@ def fake_network_config(): m_macs.return_value = {"42:42:42:42:42:42": "eth9"} self.init._find_networking_config = fake_network_config - + if not instance_dir_present: + self.tmpdir.join("instance").remove() + self.tmpdir.join("instance-uuid").remove() self.init.apply_network_config(True) networking = self.init.distro.networking networking.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_called_with( net_cfg, bring_up=True ) - assert net_cfg == json.loads( - self.tmpdir.join("instance/network-config.json").read() - ) - assert net_cfg == json.loads( - self.tmpdir.join("network-config.json").read() - ) - assert os.path.islink(self.tmpdir.join("network-config.json")) + if instance_dir_present: + assert net_cfg == json.loads( + self.tmpdir.join("network-config.json").read() + ) + assert os.path.islink(self.tmpdir.join("network-config.json")) + else: + for path in ( + "instance/network-config.json", + "network-config.json", + ): + assert not self.tmpdir.join(path).exists() @mock.patch("cloudinit.distros.ubuntu.Distro") def test_apply_network_on_same_instance_id(self, m_ubuntu, caplog): @@ -526,12 +536,9 @@ def test_apply_network_disabled_when_no_default_boot( self, m_ubuntu, m_macs, caplog ): """Don't apply network if datasource has no BOOT event.""" - net_cfg = self._apply_network_setup(m_macs) + self._apply_network_setup(m_macs) self.init.apply_network_config(True) self.init.distro.apply_network_config.assert_not_called() - assert net_cfg == json.loads( - self.tmpdir.join("network-config.json").read() - ) assert ( "No network config applied. Neither a new instance nor datasource " "network update allowed" in caplog.text @@ -623,31 +630,43 @@ def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir): assert 0o640 == stat.S_IMODE(log_file.stat().mode) @pytest.mark.parametrize( - "set_perms,expected_perms", + "input, expected", [ + (0o777, 0o640), (0o640, 0o640), - (0o606, 0o640), - (0o600, 0o600), + (0o606, 0o600), + (0o501, 0o400), ], ) - def test_existing_file_permissions( - self, init, tmpdir, set_perms, expected_perms - ): + def test_existing_file_permissions(self, init, tmpdir, input, expected): """Test file permissions are set as expected. - CIS Hardening requires 640 permissions. If the file has looser - permissions, then hard code 640. If the file has tighter - permissions, then leave them as they are + CIS Hardening requires file mode 0o640 or stricter. Set the + permissions to the subset of 0o640 and the current + mode. See https://bugs.launchpad.net/cloud-init/+bug/1900837. """ log_file = tmpdir.join("cloud-init.log") log_file.ensure() - # Use a mode that will never be made the default so this test will - # always be valid - log_file.chmod(set_perms) + log_file.chmod(input) init._cfg = {"def_log_file": str(log_file)} - - init._initialize_filesystem() - - assert expected_perms == stat.S_IMODE(log_file.stat().mode) + with mock.patch.object(stages.util, "ensure_file") as ensure: + init._initialize_filesystem() + assert expected == ensure.call_args[0][1] + + +@pytest.mark.parametrize( + "mode_1, mode_2, expected", + [ + (0o777, 0o640, 0o640), + (0o640, 0o777, 0o640), + (0o640, 0o541, 0o440), + (0o111, 0o050, 0o010), + (0o631, 0o640, 0o600), + (0o661, 0o640, 0o640), + (0o453, 0o611, 0o411), + ], +) +def test_strictest_permissions(mode_1, mode_2, expected): + assert expected == stages.Init._get_strictest_mode(mode_1, mode_2) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 4c6ec4e308d..519ef63c135 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -3150,30 +3150,6 @@ def test_file(self, tmp_path): ) -class TestComparePermissions: - @pytest.mark.parametrize( - "perm1,perm2,expected", - [ - (0o777, 0o777, 0), - (0o000, 0o000, 0), - (0o421, 0o421, 0), - (0o1640, 0o1640, 0), - (0o1407, 0o1600, 1), - (0o1600, 0o1407, -1), - (0o407, 0o600, 1), - (0o600, 0o407, -1), - (0o007, 0o700, 1), - (0o700, 0o007, -1), - (0o077, 0o100, 1), - (0o644, 0o640, 1), - (0o640, 0o600, 1), - (0o600, 0o400, 1), - ], - ) - def test_compare_permissions(self, perm1, perm2, expected): - assert util.compare_permission(perm1, perm2) == expected - - class TestMaybeB64Decode: """Test the maybe_b64decode helper function.""" diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index be26ca477b9..dbdb9cfa5ce 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -33,6 +33,7 @@ chrislalos ciprianbadescu citrus-it cjp256 +CodeBleu Conan-Kudo cvstealth dankenigsberg @@ -85,6 +86,7 @@ kallioli klausenbusk KsenijaS landon912 +ld9379435 licebmi linitio lkundrak diff --git a/tools/build-on-openbsd b/tools/build-on-openbsd index 201844f1889..fd038afa2d5 100755 --- a/tools/build-on-openbsd +++ b/tools/build-on-openbsd @@ -10,13 +10,14 @@ pkgs=" py3-configobj py3-jinja2 py3-jsonschema + py3-netifaces py3-oauthlib py3-requests py3-setuptools py3-yaml sudo-- " -[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages" +[ -f "$depschecked" ] || pkg_add "${pkgs}" || fail "install packages" touch $depschecked