Skip to content

Commit

Permalink
requests patch
Browse files Browse the repository at this point in the history
  • Loading branch information
michaeldmitry committed Aug 26, 2024
1 parent 7a29e97 commit 2283817
Show file tree
Hide file tree
Showing 6 changed files with 124 additions and 6 deletions.
15 changes: 13 additions & 2 deletions charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,19 @@ config:
Note that for a tempo deployment as a whole to be consistent, each role
(except the optional 'metrics-generator') needs to be assigned to at least one worker node. If this condition
is not met, the coordinator charm will set blocked status and the deployment will shut down.
cpu:
description: |
K8s cpu resource limit, e.g. "1" or "500m". Default is unset (no limit). This value is used
for the "limits" portion of the resource requirements.
See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: string
memory:
description: |
K8s memory resource limit, e.g. "1Gi". Default is unset (no limit). This value is used
for the "limits" portion of the resource requirements.
See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: string

# build info

bases:
Expand Down
7 changes: 5 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
ops >= 2.15
cosl>=0.0.20
cosl>=0.0.24

# Charm relation interfaces
pydantic>=2

# lib/charms/tempo_k8s/v1/charm_tracing.py
opentelemetry-exporter-otlp-proto-http==1.21.0
opentelemetry-exporter-otlp-proto-http==1.21.0

lightkube>=0.15.4
lightkube-models==1.24.1.4
2 changes: 2 additions & 0 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ def __init__(self, *args):
name="tempo",
pebble_layer=self.generate_worker_layer,
endpoints={"cluster": "tempo-cluster"}, # type: ignore
resources_requests={"cpu": "50m", "memory": "200Mi"},
container_name=self._name,
)
self.framework.observe(self.on.collect_unit_status, self._on_collect_status)

Expand Down
17 changes: 15 additions & 2 deletions tests/scenario/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import pytest
from charm import TempoWorkerK8SOperatorCharm
from scenario import Context, ExecOutput
from ops import ActiveStatus
from unittest.mock import patch


@pytest.fixture(autouse=True)
Expand All @@ -11,8 +13,19 @@ def patch_all():


@pytest.fixture
def ctx():
return Context(TempoWorkerK8SOperatorCharm)
def worker_charm():
with patch.multiple(
"cosl.coordinated_workers.worker.KubernetesComputeResourcesPatch",
_namespace="test-namespace",
_patch=lambda _: None,
get_status=lambda _: ActiveStatus(""),
):
yield TempoWorkerK8SOperatorCharm


@pytest.fixture
def ctx(worker_charm):
return Context(charm_type=worker_charm)


TEMPO_VERSION_EXEC_OUTPUT = ExecOutput(stdout="1.31")
82 changes: 82 additions & 0 deletions tests/scenario/test_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import json
from unittest.mock import patch, MagicMock

from cosl.coordinated_workers.interface import ClusterRequirer
from scenario import Container, ExecOutput, Relation, State

from tests.scenario.conftest import TEMPO_VERSION_EXEC_OUTPUT
from tests.scenario.helpers import set_role
from ops import BlockedStatus, WaitingStatus


@patch.object(ClusterRequirer, "get_worker_config", MagicMock(return_value={"config": "config"}))
@patch(
"cosl.coordinated_workers.worker.KubernetesComputeResourcesPatch.get_status",
MagicMock(return_value=BlockedStatus("`juju trust` this application")),
)
def test_patch_k8s_failed(ctx):

tempo_container = Container(
"tempo",
can_connect=True,
exec_mock={
("/bin/tempo", "-version"): TEMPO_VERSION_EXEC_OUTPUT,
("update-ca-certificates", "--fresh"): ExecOutput(),
},
)
state_out = ctx.run(
"config_changed",
state=set_role(
State(
containers=[tempo_container],
relations=[
Relation(
"tempo-cluster",
remote_app_data={
"tempo_config": json.dumps({"alive": "beef"}),
},
)
],
),
"all",
),
)

assert state_out.unit_status == BlockedStatus("`juju trust` this application")

@patch.object(ClusterRequirer, "get_worker_config", MagicMock(return_value={"config": "config"}))
@patch(
"cosl.coordinated_workers.worker.KubernetesComputeResourcesPatch.get_status",
MagicMock(return_value=WaitingStatus("")),
)
def test_patch_k8s_waiting(ctx):

tempo_container = Container(
"tempo",
can_connect=True,
exec_mock={
("/bin/tempo", "-version"): TEMPO_VERSION_EXEC_OUTPUT,
("update-ca-certificates", "--fresh"): ExecOutput(),
},
)
state_out = ctx.run(
"config_changed",
state=set_role(
State(
containers=[tempo_container],
relations=[
Relation(
"tempo-cluster",
remote_app_data={
"tempo_config": json.dumps({"alive": "beef"}),
},
)
],
),
"all",
),
)

assert state_out.unit_status == WaitingStatus("")
7 changes: 7 additions & 0 deletions tests/unit/test_charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,16 @@

ops.testing.SIMULATE_CAN_CONNECT = True

k8s_resource_multipatch = patch.multiple(
"cosl.coordinated_workers.worker.KubernetesComputeResourcesPatch",
_namespace="test-namespace",
_patch=lambda _: None,
)


@patch("cosl.coordinated_workers.worker.Worker.running_version", lambda *_: "1.2.3")
@patch("cosl.coordinated_workers.worker.Worker.restart", lambda *_: True)
@k8s_resource_multipatch
class TestCharm(unittest.TestCase):
def setUp(self, *unused):
self.harness = Harness(TempoWorkerK8SOperatorCharm)
Expand Down

0 comments on commit 2283817

Please sign in to comment.