From 0197e12e82d877077bd2c04e1fb56e4278f60de9 Mon Sep 17 00:00:00 2001 From: Aylei Date: Tue, 3 Sep 2019 16:31:53 +0900 Subject: [PATCH] AWS terraform: add zone label and reserved resources for nodes Signed-off-by: Aylei --- deploy/modules/aws/tidb-cluster/local.tf | 34 +++++++++++++++++-- .../tidb-cluster/templates/userdata.sh.tpl | 2 +- deploy/modules/aws/tidb-cluster/variables.tf | 11 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/deploy/modules/aws/tidb-cluster/local.tf b/deploy/modules/aws/tidb-cluster/local.tf index 73c7cd16b2..5437f2d2be 100644 --- a/deploy/modules/aws/tidb-cluster/local.tf +++ b/deploy/modules/aws/tidb-cluster/local.tf @@ -35,6 +35,9 @@ locals { placement_group = "" # The name of the placement group into which to launch the instances, if any. } + # 169.254.169.254 is the authoritative AWS metadata server, see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + aws_zone_getter = "$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)" + tidb_cluster_worker_groups = [ { name = "${var.cluster_name}-pd" @@ -42,7 +45,14 @@ locals { instance_type = var.pd_instance_type root_volume_size = "50" public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule --node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true" + # the space separator is safe when the extra args is empty or prefixed by spaces (the same hereafter) + kubelet_extra_args = join(" ", + [ + "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule", + "--node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}", + lookup(var.group_kubelet_extra_args, "pd", var.kubelet_extra_args) + ] + ) asg_desired_capacity = var.pd_count asg_max_size = var.pd_count + 2 # additional_userdata = file("userdata.sh") @@ -53,7 +63,13 @@ locals { instance_type = var.tikv_instance_type root_volume_size = "50" public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule --node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true" + kubelet_extra_args = join(" ", + [ + "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule", + "--node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}", + lookup(var.group_kubelet_extra_args, "tikv", var.kubelet_extra_args) + ] + ) asg_desired_capacity = var.tikv_count asg_max_size = var.tikv_count + 2 pre_userdata = file("${path.module}/pre_userdata") @@ -66,7 +82,13 @@ locals { root_volume_type = "gp2" root_volume_size = "50" public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule --node-labels=dedicated=${var.cluster_name}-tidb" + kubelet_extra_args = join(" ", + [ + "--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule", + "--node-labels=dedicated=${var.cluster_name}-tidb,zone=${local.aws_zone_getter}", + lookup(var.group_kubelet_extra_args, "tidb", var.kubelet_extra_args) + ] + ) asg_desired_capacity = var.tidb_count asg_max_size = var.tidb_count + 2 }, @@ -77,6 +99,12 @@ locals { root_volume_type = "gp2" root_volume_size = "50" public_ip = false + kubelet_extra_args = join(" ", + [ + "--node-labels=zone=${local.aws_zone_getter}", + lookup(var.group_kubelet_extra_args, "monitor", var.kubelet_extra_args) + ] + ) asg_desired_capacity = 1 asg_max_size = 3 } diff --git a/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl b/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl index db015227aa..b3077d77b0 100644 --- a/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl +++ b/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl @@ -18,7 +18,7 @@ systemctl daemon-reload systemctl restart docker # Bootstrap and join the cluster -/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}' +/etc/eks/bootstrap.sh --b64-cluster-ca "${cluster_auth_base64}" --apiserver-endpoint "${endpoint}" ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" "${cluster_name}" # Allow user supplied userdata code ${additional_userdata} diff --git a/deploy/modules/aws/tidb-cluster/variables.tf b/deploy/modules/aws/tidb-cluster/variables.tf index 1d5e5c937f..e0766b4bad 100644 --- a/deploy/modules/aws/tidb-cluster/variables.tf +++ b/deploy/modules/aws/tidb-cluster/variables.tf @@ -147,3 +147,14 @@ variable "iam_path" { description = "If provided, all IAM roles will be created on this path." default = "/" } + +variable "kubelet_extra_args" { + description = "Extra arguments passed to kubelet" + default = "--kube-reserved memory=0.3Gi,ephemeral-storage=1Gi --system-reserved memory=0.2Gi,ephemeral-storage=1Gi" +} + +variable "group_kubelet_extra_args" { + description = "If provided, override the kubelet_extra_args for a specific node group which matches the key of map (e.g. tidb, tikv, pd, monitor)" + type = map(string) + default = {} +}