From 4454a91fa9c25d27d342de4dbb499213e7b76eed Mon Sep 17 00:00:00 2001 From: Ibrahim Awwad Date: Wed, 11 Dec 2024 15:48:50 +0200 Subject: [PATCH] Add a charm config to add custom annotations to loadbalancers created by traefik (#428) * add anotations to traefik lb * fix config name * mock patch charm._get_loadbalancer_status * bump KubernetesServicePatch * Delete LB if annotations are invalid * move static functions outside main class * fmt test case * refactor `self.service_patch` * fix inclusive warning * draft: remove k8s_service_patch dep and move needed code into traefik * fix tests and refactor k8s `_is_patched` * fix itests * switch to kubernetes resource manager * fix tcp tester charm * increase traefik route test timeout * trigger _reconcile_lb() on every hook --- config.yaml | 13 + .../v1/kubernetes_service_patch.py | 433 ------------------ requirements.txt | 5 + src/charm.py | 262 +++++++++-- tests/integration/conftest.py | 1 - tests/integration/test_charm_route.py | 2 +- tests/integration/testers/tcp/charmcraft.yaml | 1 - .../integration/testers/tcp/requirements.txt | 1 - tests/integration/testers/tcp/src/charm.py | 12 +- tests/interface/conftest.py | 2 +- tests/scenario/conftest.py | 16 +- tests/scenario/test_setup.py | 5 +- tests/scenario/test_status.py | 6 +- tests/scenario/test_workload_version.py | 1 - tests/unit/conftest.py | 15 + tests/unit/test_charm.py | 191 +++++--- tests/unit/test_route.py | 1 - tests/unit/test_tls_certificates.py | 22 +- 18 files changed, 422 insertions(+), 567 deletions(-) delete mode 100644 lib/charms/observability_libs/v1/kubernetes_service_patch.py create mode 100644 tests/unit/conftest.py diff --git a/config.yaml b/config.yaml index 545e607b..52372498 100644 --- a/config.yaml +++ b/config.yaml @@ -8,6 +8,19 @@ options: This feature is experimental and may be unstable. type: boolean default: False + loadbalancer_annotations: + description: | + A comma-separated list of annotations to apply to the LoadBalancer service. + The format should be: `key1=value1,key2=value2,key3=value3`. + These annotations are passed directly to the Kubernetes LoadBalancer service, + enabling customization for specific cloud provider settings or integrations. + + Example: + "external-dns.alpha.kubernetes.io/hostname=example.com,service.beta.kubernetes.io/aws-load-balancer-type=nlb" + + Ensure the annotations are correctly formatted and adhere to Kubernetes' syntax and character set : https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + Invalid values will result in LoadBalancer being removed and all previously set annotations will be lost. + type: string basic_auth_user: description: | Enables the `basicAuth` middleware for **all** routes on this proxy. diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index e85834be..00000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,433 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# KubernetesServicePatch Library. - -This library is designed to enable developers to more simply patch the Kubernetes Service created -by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a -service named after the application in the namespace (named after the Juju model). This service by -default contains a "placeholder" port, which is 65535/TCP. - -When modifying the default set of resources managed by Juju, one must consider the lifecycle of the -charm. In this case, any modifications to the default service (created during deployment), will be -overwritten during a charm upgrade. - -When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` -events which applies the patch to the cluster. This should ensure that the service ports are -correct throughout the charm's life. - -The constructor simply takes a reference to the parent charm, and a list of -[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the -service. For information regarding the `lightkube` `ServicePort` model, please visit the -`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). - -Optionally, a name of the service (in case service name needs to be patched as well), labels, -selectors, and annotations can be provided as keyword arguments. - -## Getting Started - -To get started using the library, you just need to fetch the library using `charmcraft`. **Note -that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** - -```shell -cd some-charm -charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch -cat << EOF >> requirements.txt -lightkube -lightkube-models -EOF -``` - -Then, to initialise the library: - -For `ClusterIP` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch(self, [port]) - # ... -``` - -For `LoadBalancer`/`NodePort` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) - self.service_patcher = KubernetesServicePatch( - self, [port], "LoadBalancer" - ) - # ... -``` - -Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") - udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") - sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") - self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) - # ... -``` - -Bound with custom events by providing `refresh_event` argument: -For example, you would like to have a configurable port in your charm and want to apply -service patch every time charm config is changed. - -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - refresh_event=self.on.config_changed - ) - # ... -``` - -Creating a new k8s lb service instead of patching the one created by juju -Service name is optional. If not provided, it defaults to {app_name}-lb. -If provided and equal to app_name, it also defaults to {app_name}-lb to prevent conflicts with the Juju default service. -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - service_type="LoadBalancer", - service_name="application-lb" - ) - # ... -``` - -Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library -does not try to make any API calls, or open any files during testing that are unlikely to be -present, and could break your tests. The easiest way to do this is during your test `setUp`: - -```python -# ... - -@patch("charm.KubernetesServicePatch", lambda x, y: None) -def setUp(self, *unused): - self.harness = Harness(SomeCharm) - # ... -``` -""" - -import logging -from types import MethodType -from typing import Any, List, Literal, Optional, Union - -from lightkube import ApiError, Client # pyright: ignore -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops import UpgradeCharmEvent -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 12 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name or self._app - # To avoid conflicts with the default Juju service, append "-lb" to the service name. - # The Juju application name is retained for the default service created by Juju. - if self.service_name == self._app and service_type == "LoadBalancer": - self.service_name = f"{self._app}-lb" - self.service_type = service_type - self.service = self._service_object( - ports, - self.service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._on_upgrade_charm) - self.framework.observe(charm.on.update_status, self._patch) - # Sometimes Juju doesn't clean-up a manually created LB service, - # so we clean it up ourselves just in case. - self.framework.observe(charm.on.remove, self._remove_service) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() # pyright: ignore - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - if not self.service_type == "LoadBalancer": - self._delete_and_create_service(client) - else: - self._create_lb_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def _create_lb_service(self, client: Client): - try: - client.get(Service, self.service_name, namespace=self._namespace) - except ApiError: - client.create(self.service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() # pyright: ignore - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - def _on_upgrade_charm(self, event: UpgradeCharmEvent): - """Handle the upgrade charm event.""" - # If a charm author changed the service type from LB to ClusterIP across an upgrade, we need to delete the previous LB. - if self.service_type == "ClusterIP": - - client = Client() # pyright: ignore - - # Define a label selector to find services related to the app - selector: dict[str, Any] = {"app.kubernetes.io/name": self._app} - - # Check if any service of type LoadBalancer exists - services = client.list(Service, namespace=self._namespace, labels=selector) - for service in services: - if ( - not service.metadata - or not service.metadata.name - or not service.spec - or not service.spec.type - ): - logger.warning( - "Service patch: skipping resource with incomplete metadata: %s.", service - ) - continue - if service.spec.type == "LoadBalancer": - client.delete(Service, service.metadata.name, namespace=self._namespace) - logger.info(f"LoadBalancer service {service.metadata.name} deleted.") - - # Continue the upgrade flow normally - self._patch(event) - - def _remove_service(self, _): - """Remove a Kubernetes service associated with this charm. - - Specifically designed to delete the load balancer service created by the charm, since Juju only deletes the - default ClusterIP service and not custom services. - - Returns: - None - - Raises: - ApiError: for deletion errors, excluding when the service is not found (404 Not Found). - """ - client = Client() # pyright: ignore - - try: - client.delete(Service, self.service_name, namespace=self._namespace) - logger.info("The patched k8s service '%s' was deleted.", self.service_name) - except ApiError as e: - if e.status.code == 404: - # Service not found, so no action needed - return - # Re-raise for other statuses - raise - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/requirements.txt b/requirements.txt index 7baf081b..1c414e95 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,6 +23,11 @@ jsonschema lightkube >= 0.8.1 lightkube-models >= 1.22.0.4 +# A collection of helpers and shared code for using Lightkube +# Code: https://github.com/canonical/lightkube-extensions +# Deps: charm +lightkube-extensions @ git+https://github.com/canonical/lightkube-extensions.git@main + # Operator Framework # Code: https://github.com/canonical/operator/ # Docs: https://ops.rtfd.io/ diff --git a/src/charm.py b/src/charm.py index cefe522f..0138ce66 100755 --- a/src/charm.py +++ b/src/charm.py @@ -5,7 +5,6 @@ """Charmed traefik operator.""" import contextlib import enum -import functools import itertools import json import logging @@ -34,7 +33,6 @@ ForwardAuthRequirerConfig, ) from charms.observability_libs.v1.cert_handler import CertHandler -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config @@ -52,8 +50,11 @@ from deepmerge import always_merger from lightkube.core.client import Client from lightkube.core.exceptions import ApiError -from lightkube.models.core_v1 import ServicePort +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Service +from lightkube_extensions.batch import KubernetesResourceManager, create_charm_default_labels +from ops import main from ops.charm import ( ActionEvent, CharmBase, @@ -65,7 +66,6 @@ UpdateStatusEvent, ) from ops.framework import StoredState -from ops.main import main from ops.model import ( ActiveStatus, BlockedStatus, @@ -92,6 +92,38 @@ _TRAEFIK_CONTAINER_NAME = "traefik" + +# Regex for Kubernetes annotation values: +# - Allows alphanumeric characters, dots (.), dashes (-), and underscores (_) +# - Matches the entire string +# - Does not allow empty strings +# - Example valid: "value1", "my-value", "value.name", "value_name" +# - Example invalid: "value@", "value#", "value space" +ANNOTATION_VALUE_PATTERN = re.compile(r"^[\w.\-_]+$") + +# Based on https://github.com/kubernetes/apimachinery/blob/v0.31.3/pkg/util/validation/validation.go#L204 +# Regex for DNS1123 subdomains: +# - Starts with a lowercase letter or number ([a-z0-9]) +# - May contain dashes (-), but not consecutively, and must not start or end with them +# - Segments can be separated by dots (.) +# - Example valid: "example.com", "my-app.io", "sub.domain" +# - Example invalid: "-example.com", "example..com", "example-.com" +DNS1123_SUBDOMAIN_PATTERN = re.compile( + r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +) + +# Based on https://github.com/kubernetes/apimachinery/blob/v0.31.3/pkg/util/validation/validation.go#L32 +# Regex for Kubernetes qualified names: +# - Starts with an alphanumeric character ([A-Za-z0-9]) +# - Can include dashes (-), underscores (_), dots (.), or alphanumeric characters in the middle +# - Ends with an alphanumeric character +# - Must not be empty +# - Example valid: "annotation", "my.annotation", "annotation-name" +# - Example invalid: ".annotation", "annotation.", "-annotation", "annotation@key" +QUALIFIED_NAME_PATTERN = re.compile(r"^[A-Za-z0-9]([-A-Za-z0-9_.]*[A-Za-z0-9])?$") + +LB_LABEL = "traefik-loadbalancer" + PYDANTIC_IS_V1 = int(pydantic.version.VERSION.split(".")[0]) < 2 @@ -117,7 +149,6 @@ class ExternalHostNotReadyError(Exception): IPAv1, IngressPerUnitProvider, TraefikRouteProvider, - KubernetesServicePatch, ), ) class TraefikIngressCharm(CharmBase): @@ -148,6 +179,9 @@ def __init__(self, *args): self.container = self.unit.get_container(_TRAEFIK_CONTAINER_NAME) + self._lightkube_client = None + self._lightkube_field_manager: str = self.app.name + self._lb_name: str = f"{self.app.name}-lb" sans = self.server_cert_sans_dns self.cert = CertHandler( self, @@ -171,8 +205,8 @@ def __init__(self, *args): # FIXME # stored.tcp_entrypoints would be used for this list instead, but it's never accessed. # intentional or can it be used so we don't need to worry about ordering? - self.ingress_per_appv1 = ipa_v1 = IPAv1(charm=self) - self.ingress_per_appv2 = ipa_v2 = IPAv2(charm=self) + self.ingress_per_appv1 = IPAv1(charm=self) + self.ingress_per_appv2 = IPAv2(charm=self) self.ingress_per_unit = IngressPerUnitProvider(charm=self) @@ -210,22 +244,6 @@ def __init__(self, *args): ), ) - self.service_patch = KubernetesServicePatch( - charm=self, - service_type="LoadBalancer", - ports=self._service_ports, - refresh_event=[ - ipa_v1.on.data_provided, # type: ignore - ipa_v2.on.data_provided, # type: ignore - ipa_v1.on.data_removed, # type: ignore - ipa_v2.on.data_removed, # type: ignore - self.ingress_per_unit.on.data_provided, # type: ignore - self.ingress_per_unit.on.data_removed, # type: ignore - self.traefik_route.on.ready, # type: ignore - self.traefik_route.on.data_removed, # type: ignore - self.on.traefik_pebble_ready, # type: ignore - ], - ) # Observability integrations # Provide grafana dashboards over a relation interface @@ -266,6 +284,7 @@ def __init__(self, *args): observe(self.on.traefik_pebble_ready, self._on_traefik_pebble_ready) # type: ignore observe(self.on.start, self._on_start) observe(self.on.stop, self._on_stop) + observe(self.on.remove, self._on_remove) observe(self.on.update_status, self._on_update_status) observe(self.on.config_changed, self._on_config_changed) observe( @@ -340,6 +359,93 @@ def _basic_auth_user(self) -> Optional[str]: """ return cast(Optional[str], self.config.get("basic_auth_user", None)) + @property + def _loadbalancer_annotations(self) -> Optional[Dict[str, str]]: + """Parses and returns annotations to apply to the LoadBalancer service. + + The annotations are expected as a string in the configuration, + formatted as: "key1=value1,key2=value2,key3=value3". This string is + parsed into a dictionary where each key-value pair corresponds to an annotation. + + Returns: + Optional[Dict[str, str]]: A dictionary of annotations if provided in the Juju config and valid, otherwise None. + """ + lb_annotations = cast(Optional[str], self.config.get("loadbalancer_annotations", None)) + return parse_annotations(lb_annotations) + + @property + def lightkube_client(self): + """Returns a lightkube client configured for this charm.""" + if self._lightkube_client is None: + self._lightkube_client = Client( + namespace=self.model.name, field_manager=self._lightkube_field_manager + ) + return self._lightkube_client + + def _get_lb_resource_manager(self): + return KubernetesResourceManager( + labels=create_charm_default_labels(self.app.name, self.model.name, scope=LB_LABEL), + resource_types={Service}, + lightkube_client=self.lightkube_client, + logger=logger, + ) + + def _construct_lb(self) -> Service: + return Service( + metadata=ObjectMeta( + name=f"{self.app.name}-lb", + namespace=self.model.name, + labels={"app.kubernetes.io/name": self.app.name}, + annotations=self._loadbalancer_annotations, + ), + spec=ServiceSpec( + ports=self._service_ports, + selector={"app.kubernetes.io/name": self.app.name}, + type="LoadBalancer", + ), + ) + + def _reconcile_lb(self): + """Reconcile the LoadBalancer's state.""" + klm = self._get_lb_resource_manager() + + resources_list = [] + if self._annotations_valid: + resources_list.append(self._construct_lb()) + klm.reconcile(resources_list) + + @property + def _get_loadbalancer_status(self) -> Optional[str]: + try: + traefik_service = self.lightkube_client.get( + Service, name=self._lb_name, namespace=self.model.name + ) + except ApiError as e: + logger.error(f"Failed to fetch LoadBalancer {self._lb_name}: {e}") + return None + + if not (status := getattr(traefik_service, "status", None)): + return None + if not (load_balancer_status := getattr(status, "loadBalancer", None)): + return None + if not (ingress_addresses := getattr(load_balancer_status, "ingress", None)): + return None + if not (ingress_address := ingress_addresses[0]): + return None + + return ingress_address.hostname or ingress_address.ip + + @property + def _annotations_valid(self) -> bool: + """Check if the annotations are valid. + + :return: True if the annotations are valid, False otherwise. + """ + if self._loadbalancer_annotations is None: + logger.error("Annotations are invalid or could not be parsed.") + return False + return True + def _on_forward_auth_config_changed(self, event: AuthConfigChangedEvent): if self._is_forward_auth_enabled: if self.forward_auth.is_ready(): @@ -357,6 +463,7 @@ def _on_recv_ca_cert_available(self, event: CertificateTransferAvailableEvent): if not self.container.can_connect(): return self._update_received_ca_certs(event) + self._reconcile_lb() def _update_received_ca_certs(self, event: Optional[CertificateTransferAvailableEvent] = None): """Push the cert attached to the event, if it is given; otherwise push all certs. @@ -383,6 +490,7 @@ def _update_received_ca_certs(self, event: Optional[CertificateTransferAvailable def _on_recv_ca_cert_removed(self, event: CertificateTransferRemovedEvent): # Assuming only one cert per relation (this is in line with the original lib design). self.traefik.remove_cas([event.relation_id]) + self._reconcile_lb() def _is_tls_enabled(self) -> bool: """Return True if TLS is enabled.""" @@ -526,6 +634,10 @@ def _on_stop(self, _): # the workload version from before the upgrade. self.unit.set_workload_version("") + def _on_remove(self, _): + klm = self._get_lb_resource_manager() + klm.delete() + def _on_update_status(self, _: UpdateStatusEvent): self._process_status_and_configurations() self._set_workload_version() @@ -557,6 +669,7 @@ def _update_config_if_changed(self): # that we're processing a config-changed event, doesn't necessarily mean that our config has changed (duh!) # If the config hash has changed since we last calculated it, we need to # recompute our state from scratch, based on all data sent over the relations and all configs + self._reconcile_lb() new_config_hash = self._config_hash if self._stored.config_hash != new_config_hash: self._stored.config_hash = new_config_hash @@ -570,6 +683,7 @@ def _update_config_if_changed(self): self._process_status_and_configurations() def _process_status_and_configurations(self): + self._reconcile_lb() if ( self.config.get("tls-ca", None) or self.config.get("tls-cert", None) @@ -601,7 +715,9 @@ def _process_status_and_configurations(self): if not hostname: self._wipe_ingress_for_all_relations() - self.unit.status = WaitingStatus("gateway address unavailable") + self.unit.status = BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ) return if hostname != urlparse(f"scheme://{hostname}").hostname: @@ -682,7 +798,9 @@ def ready(self) -> bool: """Check whether we have an external host set, and traefik is running.""" if not self._external_host: self._wipe_ingress_for_all_relations() # fixme: no side-effects in prop - self.unit.status = WaitingStatus("gateway address unavailable") + self.unit.status = BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ) return False if not self.traefik.is_ready: self.unit.status = WaitingStatus(f"waiting for service: '{self.traefik.service_name}'") @@ -713,6 +831,7 @@ def _handle_ingress_data_removed(self, event: RelationEvent): # self._process_status_and_configurations(). For this reason, the static config in # traefik.STATIC_CONFIG_PATH will be updated only on update-status. # https://github.com/canonical/operator/issues/888 + self._reconcile_lb() def _handle_traefik_route_ready(self, event: TraefikRouteRequirerReadyEvent): """A traefik_route charm has published some ingress data.""" @@ -744,7 +863,7 @@ def _handle_traefik_route_ready(self, event: TraefikRouteRequirerReadyEvent): "Failed to merge traefik-route static configs. " "Check logs for details." ) return - + self._reconcile_lb() self.unit.status = ActiveStatus(f"Serving at {self._external_host}") def _process_ingress_relation(self, relation: Relation): @@ -1112,9 +1231,7 @@ def _external_host(self) -> Optional[str]: if external_hostname := self.model.config.get("external_hostname"): return cast(str, external_hostname) - return _get_loadbalancer_status( - namespace=self.model.name, service_name=f"{self.app.name}-lb" - ) + return self._get_loadbalancer_status @property def external_host(self) -> str: @@ -1220,25 +1337,82 @@ def is_valid_hostname(hostname: str) -> bool: return all(allowed.match(label) for label in labels) -@functools.lru_cache -def _get_loadbalancer_status(namespace: str, service_name: str) -> Optional[str]: - client = Client() # type: ignore +def validate_annotation_key(key: str) -> bool: + """Validate the annotation key.""" + if len(key) > 253: + logger.error(f"Invalid annotation key: '{key}'. Key length exceeds 253 characters.") + return False + + if not is_qualified_name(key.lower()): + logger.error(f"Invalid annotation key: '{key}'. Must follow Kubernetes annotation syntax.") + return False + + if key.startswith(("kubernetes.io/", "k8s.io/")): + logger.error(f"Invalid annotation: Key '{key}' uses a reserved prefix.") + return False + + return True + + +def validate_annotation_value(value: str) -> bool: + """Validate the annotation value.""" + if not ANNOTATION_VALUE_PATTERN.match(value): + logger.error( + f"Invalid annotation value: '{value}'. Must follow Kubernetes annotation syntax." + ) + return False + + return True + + +def parse_annotations(annotations: Optional[str]) -> Optional[Dict[str, str]]: + """Parse and validate annotations from a string. + + logic is based on Kubernetes annotation validation as described here: + https://github.com/kubernetes/apimachinery/blob/v0.31.3/pkg/api/validation/objectmeta.go#L44 + """ + if not annotations: + return {} + + annotations = annotations.strip().rstrip(",") # Trim spaces and trailing commas + try: - traefik_service = client.get(Service, name=service_name, namespace=namespace) - except ApiError as e: - logger.warning(f"Got ApiError when trying to get Loadbalancer status: {e}") + parsed_annotations = { + key.strip(): value.strip() + for key, value in (pair.split("=", 1) for pair in annotations.split(",") if pair) + } + except ValueError: + logger.error( + "Invalid format for 'loadbalancer_annotations'. " + "Expected format: key1=value1,key2=value2." + ) return None - if not (status := traefik_service.status): # type: ignore - return None - if not (load_balancer_status := status.loadBalancer): - return None - if not (ingress_addresses := load_balancer_status.ingress): - return None - if not (ingress_address := ingress_addresses[0]): - return None + # Validate each key-value pair + for key, value in parsed_annotations.items(): + if not validate_annotation_key(key) or not validate_annotation_value(value): + return None + + return parsed_annotations + + +def is_qualified_name(value: str) -> bool: + """Check if a value is a valid Kubernetes qualified name.""" + parts = value.split("/") + if len(parts) > 2: + return False # Invalid if more than one '/' + + if len(parts) == 2: # If prefixed + prefix, name = parts + if not prefix or not DNS1123_SUBDOMAIN_PATTERN.match(prefix): + return False + else: + name = parts[0] # No prefix + + if not name or len(name) > 63 or not QUALIFIED_NAME_PATTERN.match(name): + return False - return ingress_address.hostname or ingress_address.ip + return True def _get_relation_type(relation: Relation) -> _IngressRelationType: diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 6d3e6a3d..a532d495 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -71,7 +71,6 @@ def copy_traefik_library_into_tester_charms(ops_test): libraries = [ "traefik_k8s/v2/ingress.py", "traefik_k8s/v1/ingress_per_unit.py", - "observability_libs/v1/kubernetes_service_patch.py", "traefik_k8s/v0/traefik_route.py", ] for tester in ["forward-auth", "ipa", "ipu", "tcp", "route"]: diff --git a/tests/integration/test_charm_route.py b/tests/integration/test_charm_route.py index f7022a7c..a4342280 100644 --- a/tests/integration/test_charm_route.py +++ b/tests/integration/test_charm_route.py @@ -77,7 +77,7 @@ async def test_added_entrypoint_reachable(ops_test: OpsTest): req = Request(f"http://{traefik_ip}:4545") with pytest.raises(urllib.error.HTTPError, match="404"): - urlopen(req, timeout=5) + urlopen(req, timeout=60) @pytest.mark.teardown diff --git a/tests/integration/testers/tcp/charmcraft.yaml b/tests/integration/testers/tcp/charmcraft.yaml index 65dafb1c..b4f11527 100644 --- a/tests/integration/testers/tcp/charmcraft.yaml +++ b/tests/integration/testers/tcp/charmcraft.yaml @@ -12,7 +12,6 @@ parts: charm: charm-binary-python-packages: - ops - - lightkube - pydantic>=2 build-packages: - git diff --git a/tests/integration/testers/tcp/requirements.txt b/tests/integration/testers/tcp/requirements.txt index 72bb598b..2d81d3bb 100644 --- a/tests/integration/testers/tcp/requirements.txt +++ b/tests/integration/testers/tcp/requirements.txt @@ -1,2 +1 @@ ops -lightkube diff --git a/tests/integration/testers/tcp/src/charm.py b/tests/integration/testers/tcp/src/charm.py index 1d4d49d7..20d0378d 100755 --- a/tests/integration/testers/tcp/src/charm.py +++ b/tests/integration/testers/tcp/src/charm.py @@ -3,10 +3,6 @@ # See LICENSE file for licensing details. from pathlib import Path -from charms.observability_libs.v1.kubernetes_service_patch import ( - KubernetesServicePatch, - ServicePort, -) from charms.traefik_k8s.v1.ingress_per_unit import IngressPerUnitRequirer from ops.charm import CharmBase, PebbleReadyEvent from ops.model import ActiveStatus, Container, WaitingStatus @@ -14,16 +10,12 @@ class TCPRequirerMock(CharmBase): - _tcp_port = 9999 # port is hard-coded in workload.py + _tcp_port = 9999 def __init__(self, framework): super().__init__(framework) - self.service_patch = KubernetesServicePatch( - charm=self, - ports=[ServicePort(self._tcp_port, name=f"{self.app.name}")], - ) - + self.unit.open_port("tcp", self._tcp_port) self.unit.status = ActiveStatus("ready") # dummy charm: only create the relation AFTER pebble ready has fired. diff --git a/tests/interface/conftest.py b/tests/interface/conftest.py index 5df2abb2..b3de8f8b 100644 --- a/tests/interface/conftest.py +++ b/tests/interface/conftest.py @@ -18,7 +18,7 @@ # to include the new identifier/location. @pytest.fixture def interface_tester(interface_tester: InterfaceTester): - with patch("charm.KubernetesServicePatch", lambda **unused: None): + with patch("charm.KubernetesLoadBalancer", lambda **unused: None): interface_tester.configure( charm_type=TraefikIngressCharm, state_template=State( diff --git a/tests/scenario/conftest.py b/tests/scenario/conftest.py index 51fc1db3..befd8afb 100644 --- a/tests/scenario/conftest.py +++ b/tests/scenario/conftest.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import PropertyMock, patch import pytest from ops import pebble @@ -11,13 +11,13 @@ @pytest.fixture def traefik_charm(): - with patch("charm.KubernetesServicePatch"): - with patch("lightkube.core.client.GenericSyncClient"): - with patch( - "charm._get_loadbalancer_status", - return_value=MOCK_LB_ADDRESS, - ): - yield TraefikIngressCharm + with patch("lightkube.core.client.GenericSyncClient"): + with patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=MOCK_LB_ADDRESS, + ): + yield TraefikIngressCharm @pytest.fixture diff --git a/tests/scenario/test_setup.py b/tests/scenario/test_setup.py index 6e85d040..45f67eda 100644 --- a/tests/scenario/test_setup.py +++ b/tests/scenario/test_setup.py @@ -51,7 +51,10 @@ def test_start_traefik_no_hostname(*_, traefik_ctx): containers=[Container(name="traefik", can_connect=False)], ) out = Context(charm_type=TraefikIngressCharm).run("start", state) - assert out.unit_status == ("waiting", "gateway address unavailable") + assert out.unit_status == ( + "blocked", + "Traefik load balancer is unable to obtain an IP or hostname from the cluster.", + ) @patch("charm.TraefikIngressCharm._external_host", PropertyMock(return_value="foo.bar")) diff --git a/tests/scenario/test_status.py b/tests/scenario/test_status.py index f386a8a6..27dfe6c7 100644 --- a/tests/scenario/test_status.py +++ b/tests/scenario/test_status.py @@ -3,7 +3,7 @@ from unittest.mock import PropertyMock, patch -from ops import ActiveStatus, WaitingStatus +from ops import ActiveStatus, BlockedStatus, WaitingStatus from scenario import Container, State @@ -32,7 +32,9 @@ def test_start_traefik_no_hostname(traefik_ctx, *_): out = traefik_ctx.run("start", state) # THEN unit status is `waiting` - assert out.unit_status == WaitingStatus("gateway address unavailable") + assert out.unit_status == BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ) @patch("charm.TraefikIngressCharm._external_host", PropertyMock(return_value="foo.bar")) diff --git a/tests/scenario/test_workload_version.py b/tests/scenario/test_workload_version.py index 4a10e57c..bb1c3afb 100644 --- a/tests/scenario/test_workload_version.py +++ b/tests/scenario/test_workload_version.py @@ -10,7 +10,6 @@ from charm import TraefikIngressCharm -@patch("charm.KubernetesServicePatch") @patch("lightkube.core.client.GenericSyncClient") @patch("charm.TraefikIngressCharm._static_config_changed", PropertyMock(return_value=False)) @patch("charm.TraefikIngressCharm._external_host", PropertyMock(return_value="foo.bar")) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 00000000..8fb49a1e --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,15 @@ +from unittest.mock import patch + +import pytest +from lightkube import Client + + +@pytest.fixture(autouse=True) +def mock_lightkube_client(): + """Global mock for the Lightkube Client to avoid loading kubeconfig in CI.""" + with patch.object(Client, "__init__", lambda self, *args, **kwargs: None): + with patch.object(Client, "_client", create=True): + with patch.object(Client, "get"): + with patch.object(Client, "patch"): + with patch.object(Client, "list"): + yield diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 84089a03..cf4c93a2 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -4,13 +4,13 @@ import json import socket import unittest -from unittest.mock import Mock, patch +from unittest.mock import Mock, PropertyMock, patch import ops.testing import yaml from charms.traefik_k8s.v2.ingress import IngressRequirerAppData, IngressRequirerUnitData from ops.charm import ActionEvent -from ops.model import ActiveStatus, Application, BlockedStatus, Relation, WaitingStatus +from ops.model import ActiveStatus, Application, BlockedStatus, Relation from ops.pebble import PathError from ops.testing import Harness @@ -144,7 +144,6 @@ def setUp(self): self.mock_version = patcher.start() self.addCleanup(patcher.stop) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_service_get(self): self.harness.update_config({"external_hostname": "testhostname"}) self.harness.set_leader(True) @@ -153,7 +152,6 @@ def test_service_get(self): self.assertTrue(self.harness.charm.traefik.is_ready) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_bad_routing_mode_config_and_recovery(self): """Test round-trip bootstrap and relation with a consumer.""" self.harness.update_config({"external_hostname": "testhostname"}) @@ -182,15 +180,21 @@ def test_bad_routing_mode_config_and_recovery(self): self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_pebble_ready_without_gateway_address(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_pebble_ready_without_gateway_address(self, mock_get_loadbalancer_status): """Test that requirers do not get addresses until the gateway address is available.""" self.harness.set_leader(True) self.harness.begin_with_initial_hooks() self.assertEqual( - self.harness.charm.unit.status, WaitingStatus("gateway address unavailable") + self.harness.charm.unit.status, + BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ), ) self.harness.container_pebble_ready("traefik") @@ -203,12 +207,18 @@ def test_pebble_ready_without_gateway_address(self): assert not requirer.is_ready() self.assertEqual( - self.harness.charm.unit.status, WaitingStatus("gateway address unavailable") + self.harness.charm.unit.status, + BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ), ) - @patch("charm._get_loadbalancer_status", lambda **__: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_pebble_ready_with_joined_relations(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def test_pebble_ready_with_joined_relations(self, mock_get_loadbalancer_status): self.harness.set_leader(True) self.harness.begin_with_initial_hooks() @@ -227,9 +237,12 @@ def test_pebble_ready_with_joined_relations(self): ) self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - @patch("charm._get_loadbalancer_status", lambda **__: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_gateway_address_change_with_joined_relations(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def test_gateway_address_change_with_joined_relations(self, mock_get_loadbalancer_status): self.harness.set_leader(True) self.harness.begin_with_initial_hooks() @@ -256,9 +269,14 @@ def test_gateway_address_change_with_joined_relations(self): ) self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_gateway_address_becomes_unavailable_after_relation_join(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_gateway_address_becomes_unavailable_after_relation_join( + self, mock_get_loadbalancer_status + ): self.harness.update_config({"external_hostname": "testhostname"}) self.harness.set_leader(True) self.harness.begin_with_initial_hooks() @@ -279,12 +297,14 @@ def test_gateway_address_becomes_unavailable_after_relation_join(self): self.harness.update_config(unset=["external_hostname"]) self.assertEqual( - self.harness.charm.unit.status, WaitingStatus("gateway address unavailable") + self.harness.charm.unit.status, + BlockedStatus( + "Traefik load balancer is unable to obtain an IP or hostname from the cluster." + ), ) self.assertEqual(requirer.urls, {}) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_relation_broken(self): self.harness.update_config({"external_hostname": "testhostname"}) self.harness.set_leader(True) @@ -310,9 +330,12 @@ def test_relation_broken(self): except (FileNotFoundError, PathError): pass - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_show_proxied_endpoints_action_no_relations(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_show_proxied_endpoints_action_no_relations(self, mock_get_loadbalancer_status): self.harness.begin_with_initial_hooks() action_event = Mock(spec=ActionEvent) self.harness.update_config({"external_hostname": "foo"}) @@ -321,9 +344,14 @@ def test_show_proxied_endpoints_action_no_relations(self): {"proxied-endpoints": '{"traefik-k8s": {"url": "http://foo"}}'} ) - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_show_proxied_endpoints_action_only_ingress_per_app_relations(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_show_proxied_endpoints_action_only_ingress_per_app_relations( + self, mock_get_loadbalancer_status + ): self.harness.set_leader(True) self.harness.update_config({"external_hostname": "testhostname"}) self.harness.begin_with_initial_hooks() @@ -353,9 +381,14 @@ def test_show_proxied_endpoints_action_only_ingress_per_app_relations(self): } ) - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_show_proxied_endpoints_action_only_ingress_per_unit_relations(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_show_proxied_endpoints_action_only_ingress_per_unit_relations( + self, mock_get_loadbalancer_status + ): self.harness.set_leader(True) self.harness.update_config({"external_hostname": "testhostname"}) self.harness.begin_with_initial_hooks() @@ -380,9 +413,12 @@ def test_show_proxied_endpoints_action_only_ingress_per_unit_relations(self): } ) - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_base_static_config(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_base_static_config(self, mock_get_loadbalancer_status): """Verify that the static config that should always be there, is in fact there.""" self.harness.set_leader(True) self.harness.update_config({"external_hostname": "testhostname"}) @@ -405,9 +441,12 @@ def test_base_static_config(self): assert cfg["providers"]["file"]["directory"] assert cfg["providers"]["file"]["watch"] is True - @patch("charm._get_loadbalancer_status", lambda **__: None) - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_tcp_config(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value=None, + ) + def test_tcp_config(self, mock_get_loadbalancer_status): self.harness.set_leader(True) self.harness.update_config({"external_hostname": "testhostname"}) self.harness.begin_with_initial_hooks() @@ -445,7 +484,6 @@ def setup_forward_auth_relation(self) -> int: return relation_id - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_forward_auth_relation_databag(self): self.harness.update_config({"enable_experimental_forward_auth": True}) self.harness.set_leader(True) @@ -468,7 +506,6 @@ def test_forward_auth_relation_databag(self): assert expected_provider_info.app_names == provider_info["app_names"] assert expected_provider_info.headers == provider_info["headers"] - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_forward_auth_relation_changed(self): self.harness.update_config({"enable_experimental_forward_auth": True}) self.harness.set_leader(True) @@ -482,7 +519,6 @@ def test_forward_auth_relation_changed(self): _ = self.setup_forward_auth_relation() assert mocked_handle.called - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_forward_auth_relation_removed(self): self.harness.set_leader(True) self.harness.update_config({"external_hostname": "testhostname"}) @@ -509,9 +545,12 @@ def setUp(self): self.container_name = "traefik" @patch("ops.model.Container.exec") - @patch("charm._get_loadbalancer_status", lambda **__: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_transferred_ca_certs_are_updated(self, patch_exec): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def test_transferred_ca_certs_are_updated(self, mock_get_loadbalancer_status, patch_exec): # Given container is ready, when receive-ca-cert relation joins, # then ca certs are updated. provider_app = "self-signed-certificates" @@ -531,9 +570,12 @@ def test_transferred_ca_certs_are_updated(self, patch_exec): ] @patch("ops.model.Container.exec") - @patch("charm._get_loadbalancer_status", lambda **__: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_transferred_ca_certs_are_not_updated(self, patch_exec): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def test_transferred_ca_certs_are_not_updated(self, mock_get_loadbalancer_status, patch_exec): # Given container is not ready, when receive-ca-cert relation joins, # then not attempting to update ca certs. provider_app = "self-signed-certificates" @@ -549,9 +591,12 @@ def test_transferred_ca_certs_are_not_updated(self, patch_exec): class TestConfigOptionsValidation(unittest.TestCase): - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def setUp(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def setUp(self, mock_get_loadbalancer_status): self.harness: Harness[TraefikIngressCharm] = Harness(TraefikIngressCharm) self.harness.set_model_name("test-model") self.addCleanup(self.harness.cleanup) @@ -573,19 +618,13 @@ def setUp(self): harness=self.harness, relation=self.relation, host="10.1.10.1", port=9000 ) - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda **_: None) def test_when_external_hostname_not_set_use_ip_with_port_80(self): self.assertEqual(requirer.urls, {"remote/0": "http://10.0.0.1/test-model-remote-0"}) - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda **_: None) def test_when_external_hostname_is_set_use_it_with_port_80(self): self.harness.update_config({"external_hostname": "testhostname"}) self.assertEqual(requirer.urls, {"remote/0": "http://testhostname/test-model-remote-0"}) - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda **_: None) def test_when_external_hostname_is_invalid_go_into_blocked_status(self): for invalid_hostname in [ "testhostname:8080", @@ -598,3 +637,49 @@ def test_when_external_hostname_is_invalid_go_into_blocked_status(self): self.harness.charm.unit.status, BlockedStatus, invalid_hostname ) self.assertEqual(requirer.urls, {}) + + def test_lb_annotations(self): + test_cases = [ + ("key1=value1,key2=value2", {"key1": "value1", "key2": "value2"}), + ("", {}), + ( + "key1=value1,key_2=value2,key-3=value3,", + {"key1": "value1", "key_2": "value2", "key-3": "value3"}, + ), + ( + "key1=value1,key2=value2,key3=value3", + {"key1": "value1", "key2": "value2", "key3": "value3"}, + ), + ("example.com/key=value", {"example.com/key": "value"}), + ( + "prefix1/key=value1,prefix2/another-key=value2", + {"prefix1/key": "value1", "prefix2/another-key": "value2"}, + ), + ( + "key=value,key.sub-key=value-with-hyphen", + {"key": "value", "key.sub-key": "value-with-hyphen"}, + ), + # Invalid cases + ("key1=value1,key2=value2,key=value3,key4=", None), # Missing value for key4 + ( + "kubernetes.io/description=this-is-valid,custom.io/key=value", + None, + ), # Reserved prefix used + ("key1=value1,key2", None), + ("key1=value1,example..com/key2=value2", None), # Invalid domain format (double dot) + ("key1=value1,key=value2,key3=", None), # Trailing equals for key3 + ("key1=value1,=value2", None), # Missing key + ("key1=value1,key=val=ue2", None), # Extra equals in value + ("a" * 256 + "=value", None), # Key exceeds max length (256 characters) + ("key@=value", None), # Invalid character in key + ("key. =value", None), # Space in key + ("key,value", None), # Missing '=' delimiter + ("kubernetes/description=", None), # Key with no value + ] + + for annotations, expected_result in test_cases: + with self.subTest(annotations=annotations, expected_result=expected_result): + # Update the config with the test annotation string + self.harness.update_config({"loadbalancer_annotations": annotations}) + # Check if the _loadbalancer_annotations property returns the expected result + self.assertEqual(self.harness.charm._loadbalancer_annotations, expected_result) diff --git a/tests/unit/test_route.py b/tests/unit/test_route.py index efdc843a..0c137aea 100644 --- a/tests/unit/test_route.py +++ b/tests/unit/test_route.py @@ -129,7 +129,6 @@ def topology(harness): return topology -@patch("charm.KubernetesServicePatch", lambda *_, **__: None) def initialize_and_setup_tr_relation(harness): harness.update_config({"external_hostname": "testhostname"}) harness.set_leader(True) diff --git a/tests/unit/test_tls_certificates.py b/tests/unit/test_tls_certificates.py index 166c74e9..d24fc65f 100644 --- a/tests/unit/test_tls_certificates.py +++ b/tests/unit/test_tls_certificates.py @@ -2,7 +2,7 @@ # See LICENSE file for licensing details. import unittest -from unittest.mock import patch +from unittest.mock import PropertyMock, patch import ops.testing from ops.testing import Harness @@ -13,9 +13,12 @@ class TlsWithExternalHostname(unittest.TestCase): - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def setUp(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def setUp(self, mock_get_loadbalancer_status): self.harness: Harness[TraefikIngressCharm] = Harness(TraefikIngressCharm) self.harness.set_model_name("test-model") self.addCleanup(self.harness.cleanup) @@ -32,9 +35,12 @@ def setUp(self): self.harness.begin_with_initial_hooks() self.harness.container_pebble_ready("traefik") - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) - def test_external_hostname_is_set_after_relation_joins(self): + @patch( + "charm.TraefikIngressCharm._get_loadbalancer_status", + new_callable=PropertyMock, + return_value="10.0.0.1", + ) + def test_external_hostname_is_set_after_relation_joins(self, mock_get_loadbalancer_status): # GIVEN an external hostname is not set self.assertFalse(self.harness.charm.config.get("external_hostname")) self.assertEqual(self.harness.charm.external_host, "10.0.0.1") @@ -55,8 +61,6 @@ def test_external_hostname_is_set_after_relation_joins(self): unit_databag = self.harness.get_relation_data(self.rel_id, self.harness.charm.unit.name) self.assertIsNotNone(unit_databag.get("certificate_signing_requests")) - @patch("charm._get_loadbalancer_status", lambda **_: "10.0.0.1") - @patch("charm.KubernetesServicePatch", lambda *_, **__: None) def test_external_hostname_is_set_before_relation_joins(self): # GIVEN an external hostname is set self.harness.update_config({"external_hostname": "testhostname"})