From f4fd0e35ac7ee369c9c40213c9cf820b8a2ebe1d Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Wed, 22 May 2024 16:33:45 -0500 Subject: [PATCH] Copy control-plane taint from bootstrap config (#425) * Copy control-plane taint from bootstrap config * Add tests for ControlPlaneTaints * Empty control plane taints slices can be ignored --- .../pkg/k8sd/types/cluster_config_convert.go | 3 ++ .../k8sd/types/cluster_config_convert_test.go | 17 ++++++++ .../tests/test_control_plane_taints.py | 42 +++++++++++++++++++ tests/integration/tests/test_util/util.py | 23 +++++++--- 4 files changed, 79 insertions(+), 6 deletions(-) create mode 100644 tests/integration/tests/test_control_plane_taints.py diff --git a/src/k8s/pkg/k8sd/types/cluster_config_convert.go b/src/k8s/pkg/k8sd/types/cluster_config_convert.go index ad99f428f..8202228f7 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config_convert.go +++ b/src/k8s/pkg/k8sd/types/cluster_config_convert.go @@ -65,6 +65,9 @@ func ClusterConfigFromBootstrapConfig(b apiv1.BootstrapConfig) (ClusterConfig, e // Kubelet config.Kubelet.CloudProvider = b.ClusterConfig.CloudProvider + if len(b.ControlPlaneTaints) != 0 { + config.Kubelet.ControlPlaneTaints = utils.Pointer(b.ControlPlaneTaints) + } return config, nil } diff --git a/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go b/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go index 4ac7d1cf3..4d6abbe18 100644 --- a/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go +++ b/src/k8s/pkg/k8sd/types/cluster_config_convert_test.go @@ -159,6 +159,23 @@ func TestClusterConfigFromBootstrapConfig(t *testing.T) { }, }, }, + { + name: "ControlPlainTaints", + bootstrap: apiv1.BootstrapConfig{ + ControlPlaneTaints: []string{"node-role.kubernetes.io/control-plane:NoSchedule"}, + }, + expectConfig: types.ClusterConfig{ + APIServer: types.APIServer{ + AuthorizationMode: utils.Pointer("Node,RBAC"), + }, + Datastore: types.Datastore{ + Type: utils.Pointer("k8s-dqlite"), + }, + Kubelet: types.Kubelet{ + ControlPlaneTaints: utils.Pointer([]string{"node-role.kubernetes.io/control-plane:NoSchedule"}), + }, + }, + }, } { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) diff --git a/tests/integration/tests/test_control_plane_taints.py b/tests/integration/tests/test_control_plane_taints.py new file mode 100644 index 000000000..38d3caf02 --- /dev/null +++ b/tests/integration/tests/test_control_plane_taints.py @@ -0,0 +1,42 @@ +# +# Copyright 2024 Canonical, Ltd. +# +import logging +import time +from typing import List + +import pytest +import yaml +from test_util import harness, util + +LOG = logging.getLogger(__name__) + + +@pytest.mark.node_count(1) +@pytest.mark.disable_k8s_bootstrapping() +def test_control_plane_taints(instances: List[harness.Instance]): + k8s_instance = instances[0] + + bootstrap_conf = yaml.safe_dump( + {"control-plane-taints": ["node-role.kubernetes.io/control-plane:NoSchedule"]} + ) + + k8s_instance.exec( + ["dd", "of=/root/config.yaml"], + input=str.encode(bootstrap_conf), + ) + + k8s_instance.exec(["k8s", "bootstrap", "--file", "/root/config.yaml"]) + retries = 10 + while retries and not (nodes := util.get_nodes(k8s_instance)): + LOG.info("Waiting for Nodes") + time.sleep(3) + retries -= 1 + assert len(nodes) == 1, "Should have found one node in 30 sec" + assert all( + [ + t["effect"] == "NoSchedule" + for t in nodes[0]["spec"]["taints"] + if t["key"] == "node-role.kubernetes.io/control-plane" + ] + ) diff --git a/tests/integration/tests/test_util/util.py b/tests/integration/tests/test_util/util.py index 62cfe1f90..c11fdb3e4 100644 --- a/tests/integration/tests/test_util/util.py +++ b/tests/integration/tests/test_util/util.py @@ -229,8 +229,8 @@ def get_local_node_status(instance: harness.Instance) -> str: return resp.stdout.decode().strip() -def ready_nodes(control_node: harness.Instance) -> List[Any]: - """Get a list of the ready nodes. +def get_nodes(control_node: harness.Instance) -> List[Any]: + """Get a list of existing nodes. Args: control_node: instance on which to execute check @@ -239,21 +239,32 @@ def ready_nodes(control_node: harness.Instance) -> List[Any]: list of nodes """ result = control_node.exec( - "k8s kubectl get nodes -o json".split(" "), capture_output=True + ["k8s", "kubectl", "get", "nodes", "-o", "json"], capture_output=True ) assert result.returncode == 0, "Failed to get nodes with kubectl" node_list = json.loads(result.stdout.decode()) assert node_list["kind"] == "List", "Should have found a list of nodes" - nodes = [ + return [node for node in node_list["items"]] + + +def ready_nodes(control_node: harness.Instance) -> List[Any]: + """Get a list of the ready nodes. + + Args: + control_node: instance on which to execute check + + Returns: + list of nodes + """ + return [ node - for node in node_list["items"] + for node in get_nodes(control_node) if all( condition["status"] == "False" for condition in node["status"]["conditions"] if condition["type"] != "Ready" ) ] - return nodes # Create a token to join a node to an existing cluster