Skip to content

Commit

Permalink
Copy control-plane taint from bootstrap config
Browse files Browse the repository at this point in the history
  • Loading branch information
addyess committed May 16, 2024
1 parent 004575e commit c0dff91
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 7 deletions.
1 change: 1 addition & 0 deletions src/k8s/pkg/k8sd/types/cluster_config_convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ func ClusterConfigFromBootstrapConfig(b apiv1.BootstrapConfig) (ClusterConfig, e

// Kubelet
config.Kubelet.CloudProvider = b.ClusterConfig.CloudProvider
config.Kubelet.ControlPlaneTaints = &b.ControlPlaneTaints

return config, nil
}
Expand Down
38 changes: 38 additions & 0 deletions tests/integration/tests/test_control_plane_taints.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#
# Copyright 2024 Canonical, Ltd.
#
import logging
import time
from typing import List

import pytest
import yaml
from test_util import harness, util

LOG = logging.getLogger(__name__)


@pytest.mark.node_count(1)
@pytest.mark.disable_k8s_bootstrapping()
def test_control_plane_taints(instances: List[harness.Instance]):
k8s_instance = instances[0]

bootstrap_conf = yaml.safe_dump(
{"control-plane-taints": ["node-role.kubernetes.io/control-plane:NoSchedule"]}
)

k8s_instance.exec(
["dd", "of=/root/config.yaml"],
input=str.encode(bootstrap_conf),
)

k8s_instance.exec(["k8s", "bootstrap", "--file", "/root/config.yaml"])
while not (nodes := util.get_nodes(k8s_instance)):
LOG.info("Waiting for Nodes")
time.sleep(5)

assert all([
t["effect"] == "NoSchedule"
for t in nodes[0]["spec"]["taints"]
if t["key"] == "node-role.kubernetes.io/control-plane"
])
25 changes: 18 additions & 7 deletions tests/integration/tests/test_util/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,8 @@ def get_local_node_status(instance: harness.Instance) -> str:
return resp.stdout.decode().strip()


def ready_nodes(control_node: harness.Instance) -> List[Any]:
"""Get a list of the ready nodes.
def get_nodes(control_node: harness.Instance) -> List[Any]:
"""Get a list of existing nodes.
Args:
control_node: instance on which to execute check
Expand All @@ -239,21 +239,32 @@ def ready_nodes(control_node: harness.Instance) -> List[Any]:
list of nodes
"""
result = control_node.exec(
"k8s kubectl get nodes -o json".split(" "), capture_output=True
["k8s", "kubectl", "get", "nodes", "-o", "json"],
capture_output=True
)
assert result.returncode == 0, "Failed to get nodes with kubectl"
node_list = json.loads(result.stdout.decode())
assert node_list["kind"] == "List", "Should have found a list of nodes"
nodes = [
node
for node in node_list["items"]
return [node for node in node_list["items"]]


def ready_nodes(control_node: harness.Instance) -> List[Any]:
"""Get a list of the ready nodes.
Args:
control_node: instance on which to execute check
Returns:
list of nodes
"""
return [
node for node in get_nodes(control_node)
if all(
condition["status"] == "False"
for condition in node["status"]["conditions"]
if condition["type"] != "Ready"
)
]
return nodes


# Create a token to join a node to an existing cluster
Expand Down

0 comments on commit c0dff91

Please sign in to comment.