diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index e85834be31..0000000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,433 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# KubernetesServicePatch Library. - -This library is designed to enable developers to more simply patch the Kubernetes Service created -by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a -service named after the application in the namespace (named after the Juju model). This service by -default contains a "placeholder" port, which is 65535/TCP. - -When modifying the default set of resources managed by Juju, one must consider the lifecycle of the -charm. In this case, any modifications to the default service (created during deployment), will be -overwritten during a charm upgrade. - -When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` -events which applies the patch to the cluster. This should ensure that the service ports are -correct throughout the charm's life. - -The constructor simply takes a reference to the parent charm, and a list of -[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the -service. For information regarding the `lightkube` `ServicePort` model, please visit the -`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). - -Optionally, a name of the service (in case service name needs to be patched as well), labels, -selectors, and annotations can be provided as keyword arguments. - -## Getting Started - -To get started using the library, you just need to fetch the library using `charmcraft`. **Note -that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** - -```shell -cd some-charm -charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch -cat << EOF >> requirements.txt -lightkube -lightkube-models -EOF -``` - -Then, to initialise the library: - -For `ClusterIP` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch(self, [port]) - # ... -``` - -For `LoadBalancer`/`NodePort` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) - self.service_patcher = KubernetesServicePatch( - self, [port], "LoadBalancer" - ) - # ... -``` - -Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") - udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") - sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") - self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) - # ... -``` - -Bound with custom events by providing `refresh_event` argument: -For example, you would like to have a configurable port in your charm and want to apply -service patch every time charm config is changed. - -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - refresh_event=self.on.config_changed - ) - # ... -``` - -Creating a new k8s lb service instead of patching the one created by juju -Service name is optional. If not provided, it defaults to {app_name}-lb. -If provided and equal to app_name, it also defaults to {app_name}-lb to prevent conflicts with the Juju default service. -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - service_type="LoadBalancer", - service_name="application-lb" - ) - # ... -``` - -Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library -does not try to make any API calls, or open any files during testing that are unlikely to be -present, and could break your tests. The easiest way to do this is during your test `setUp`: - -```python -# ... - -@patch("charm.KubernetesServicePatch", lambda x, y: None) -def setUp(self, *unused): - self.harness = Harness(SomeCharm) - # ... -``` -""" - -import logging -from types import MethodType -from typing import Any, List, Literal, Optional, Union - -from lightkube import ApiError, Client # pyright: ignore -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops import UpgradeCharmEvent -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 12 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name or self._app - # To avoid conflicts with the default Juju service, append "-lb" to the service name. - # The Juju application name is retained for the default service created by Juju. - if self.service_name == self._app and service_type == "LoadBalancer": - self.service_name = f"{self._app}-lb" - self.service_type = service_type - self.service = self._service_object( - ports, - self.service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._on_upgrade_charm) - self.framework.observe(charm.on.update_status, self._patch) - # Sometimes Juju doesn't clean-up a manually created LB service, - # so we clean it up ourselves just in case. - self.framework.observe(charm.on.remove, self._remove_service) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() # pyright: ignore - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - if not self.service_type == "LoadBalancer": - self._delete_and_create_service(client) - else: - self._create_lb_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def _create_lb_service(self, client: Client): - try: - client.get(Service, self.service_name, namespace=self._namespace) - except ApiError: - client.create(self.service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() # pyright: ignore - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - def _on_upgrade_charm(self, event: UpgradeCharmEvent): - """Handle the upgrade charm event.""" - # If a charm author changed the service type from LB to ClusterIP across an upgrade, we need to delete the previous LB. - if self.service_type == "ClusterIP": - - client = Client() # pyright: ignore - - # Define a label selector to find services related to the app - selector: dict[str, Any] = {"app.kubernetes.io/name": self._app} - - # Check if any service of type LoadBalancer exists - services = client.list(Service, namespace=self._namespace, labels=selector) - for service in services: - if ( - not service.metadata - or not service.metadata.name - or not service.spec - or not service.spec.type - ): - logger.warning( - "Service patch: skipping resource with incomplete metadata: %s.", service - ) - continue - if service.spec.type == "LoadBalancer": - client.delete(Service, service.metadata.name, namespace=self._namespace) - logger.info(f"LoadBalancer service {service.metadata.name} deleted.") - - # Continue the upgrade flow normally - self._patch(event) - - def _remove_service(self, _): - """Remove a Kubernetes service associated with this charm. - - Specifically designed to delete the load balancer service created by the charm, since Juju only deletes the - default ClusterIP service and not custom services. - - Returns: - None - - Raises: - ApiError: for deletion errors, excluding when the service is not found (404 Not Found). - """ - client = Client() # pyright: ignore - - try: - client.delete(Service, self.service_name, namespace=self._namespace) - logger.info("The patched k8s service '%s' was deleted.", self.service_name) - except ApiError as e: - if e.status.code == 404: - # Service not found, so no action needed - return - # Re-raise for other statuses - raise - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/src/charm.py b/src/charm.py index 34c62fd57e..ba3a89e529 100755 --- a/src/charm.py +++ b/src/charm.py @@ -34,7 +34,6 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.loki_k8s.v1.loki_push_api import LogProxyConsumer -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.postgresql_k8s.v0.postgresql import ( REQUIRED_PLUGINS, PostgreSQL, @@ -242,9 +241,7 @@ def __init__(self, *args): relation_name="logging", ) - postgresql_db_port = ServicePort(5432, name="database") - patroni_api_port = ServicePort(8008, name="api") - self.service_patcher = KubernetesServicePatch(self, [postgresql_db_port, patroni_api_port]) + self.unit.set_ports(*[5432, 8008]) self.tracing = TracingEndpointRequirer( self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) diff --git a/tests/unit/test_async_replication.py b/tests/unit/test_async_replication.py index b8bdde9e42..46ff0fca4d 100644 --- a/tests/unit/test_async_replication.py +++ b/tests/unit/test_async_replication.py @@ -19,29 +19,27 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def standby(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.set_model_name("standby") + harness = Harness(PostgresqlOperatorCharm) + harness.set_model_name("standby") - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.mark.parametrize("relation_name", RELATION_NAMES) diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 484f2855ba..1acbeb4882 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -26,19 +26,18 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Mock generic sync client to avoid search to ~/.kube/config. - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() - - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Mock generic sync client to avoid search to ~/.kube/config. + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_stanza_name(harness): diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index d5c1cf16e1..d3265e5e74 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -41,15 +41,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.handle_exec("postgresql", ["locale", "-a"], result="C") - - harness.add_relation(PEER, "postgresql-k8s") - harness.begin() - harness.add_relation("restart", harness.charm.app.name) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + harness.handle_exec("postgresql", ["locale", "-a"], result="C") + + harness.add_relation(PEER, "postgresql-k8s") + harness.begin() + harness.add_relation("restart", harness.charm.app.name) + yield harness + harness.cleanup() def test_on_leader_elected(harness): diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py index ddcbec8390..11795397d6 100644 --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -24,26 +24,25 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() + + # Define some relations. + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"cluster_initialised": "True"}, + ) + yield harness + harness.cleanup() def clear_relation_data(_harness): @@ -132,57 +131,56 @@ def test_on_relation_changed(harness): def test_get_extensions(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Test when there are no extensions in the relation databags. - rel_id = harness.model.get_relation(RELATION_NAME).id - relation = harness.model.get_relation(RELATION_NAME, rel_id) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) - - # Test when there are extensions in the application relation databag. - extensions = ["", "citext:public", "debversion"] - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, + # Test when there are no extensions in the relation databags. + rel_id = harness.model.get_relation(RELATION_NAME).id + relation = harness.model.get_relation(RELATION_NAME, rel_id) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) + + # Test when there are extensions in the application relation databag. + extensions = ["", "citext:public", "debversion"] + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"extensions": ",".join(extensions)}, ) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, + ) - # Test when there are extensions in the unit relation databag. - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ""}, - ) - harness.update_relation_data( - rel_id, - "application/0", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, + # Test when there are extensions in the unit relation databag. + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"extensions": ""}, ) - - # Test when one of the plugins/extensions is enabled. - config = """options: - plugin_citext_enable: - default: true - type: boolean - plugin_debversion_enable: - default: false - type: boolean""" - harness = Harness(PostgresqlOperatorCharm, config=config) - harness.cleanup() - harness.begin() - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[2]}, + harness.update_relation_data( + rel_id, + "application/0", + {"extensions": ",".join(extensions)}, ) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, + ) + + # Test when one of the plugins/extensions is enabled. + config = """options: + plugin_citext_enable: + default: true + type: boolean + plugin_debversion_enable: + default: false + type: boolean""" + harness = Harness(PostgresqlOperatorCharm, config=config) + harness.cleanup() + harness.begin() + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[2]}, + ) def test_set_up_relation(harness): diff --git a/tests/unit/test_patroni.py b/tests/unit/test_patroni.py index 211b84fafb..8c7f534ea2 100644 --- a/tests/unit/test_patroni.py +++ b/tests/unit/test_patroni.py @@ -19,34 +19,32 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def patroni(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Setup Patroni wrapper. - patroni = Patroni( - harness.charm, - "postgresql-k8s-0", - ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], - "postgresql-k8s-primary.dev.svc.cluster.local", - "test-model", - STORAGE_PATH, - "superuser-password", - "replication-password", - "rewind-password", - False, - "patroni-password", - ) - root = harness.get_filesystem_root("postgresql") - (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) + # Setup Patroni wrapper. + patroni = Patroni( + harness.charm, + "postgresql-k8s-0", + ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], + "postgresql-k8s-primary.dev.svc.cluster.local", + "test-model", + STORAGE_PATH, + "superuser-password", + "replication-password", + "rewind-password", + False, + "patroni-password", + ) + root = harness.get_filesystem_root("postgresql") + (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) - yield patroni + yield patroni # This method will be used by the mock to replace requests.get diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index d08c60b6cb..c9457a2bf6 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -17,15 +17,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_create_database(harness): diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index e56b392387..314eb39f9c 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -25,25 +25,24 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() + + # Define some relations. + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"cluster_initialised": "True"}, + ) + yield harness + harness.cleanup() def request_database(_harness): diff --git a/tests/unit/test_postgresql_tls.py b/tests/unit/test_postgresql_tls.py index 3d407db395..f04f53cd19 100644 --- a/tests/unit/test_postgresql_tls.py +++ b/tests/unit/test_postgresql_tls.py @@ -17,15 +17,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def delete_secrets(_harness): diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 8035a6dd39..33623048fe 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -20,23 +20,20 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - """Set up the test.""" - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") - peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") - for rel_id in (upgrade_relation_id, peer_relation_id): - harness.add_relation_unit(rel_id, "postgresql-k8s/1") - harness.add_relation("restart", harness.charm.app.name) - with harness.hooks_disabled(): - harness.update_relation_data( - upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"} - ) - yield harness - harness.cleanup() + """Set up the test.""" + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") + peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/1") + harness.add_relation("restart", harness.charm.app.name) + with harness.hooks_disabled(): + harness.update_relation_data(upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"}) + yield harness + harness.cleanup() def test_is_no_sync_member(harness):