Skip to content

Commit

Permalink
Automated cherry pick of #1045: tidb lb is empty after terraform apply (
Browse files Browse the repository at this point in the history
#1051)

* tidb lb is empty after terraform apply

* update scheduler image for ali and lb key for gcp

* wait for monitoring lb
  • Loading branch information
DanielZhangQD authored Oct 25, 2019
1 parent 59c60ea commit fe76e8c
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 4 deletions.
4 changes: 2 additions & 2 deletions deploy/modules/aliyun/tidb-operator/operator.tf
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,6 @@ resource "helm_release" "tidb-operator" {

set {
name = "scheduler.kubeSchedulerImageName"
value = "gcr.akscn.io/google_containers/hyperkube"
value = "registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler-amd64"
}
}
}
1 change: 1 addition & 0 deletions deploy/modules/gcp/tidb-cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ module "tidb-cluster" {
kubeconfig_filename = var.kubeconfig_path
base_values = file("${path.module}/values/default.yaml")
wait_on_resource = [google_container_node_pool.tidb_pool, var.tidb_operator_id]
service_ingress_key = "ip"
}

resource "null_resource" "wait-lb-ip" {
Expand Down
4 changes: 2 additions & 2 deletions deploy/modules/share/tidb-cluster-release/data.tf
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
data "external" "tidb_hostname" {
depends_on = [helm_release.tidb-cluster]
depends_on = [helm_release.tidb-cluster, null_resource.wait-lb-ip]
working_dir = path.cwd
program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"]
}

data "external" "monitor_hostname" {
depends_on = [helm_release.tidb-cluster]
depends_on = [helm_release.tidb-cluster, null_resource.wait-mlb-ip]
working_dir = path.cwd
program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"]
}
Expand Down
44 changes: 44 additions & 0 deletions deploy/modules/share/tidb-cluster-release/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -139,3 +139,47 @@ EOS
}
}
}

resource "null_resource" "wait-lb-ip" {
depends_on = [
helm_release.tidb-cluster
]
provisioner "local-exec" {
interpreter = ["bash", "-c"]
working_dir = path.cwd
command = <<EOS
set -euo pipefail
until kubectl get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]' | grep "${var.service_ingress_key}"; do
echo "Wait for TiDB internal loadbalancer IP"
sleep 5
done
EOS

environment = {
KUBECONFIG = var.kubeconfig_filename
}
}
}

resource "null_resource" "wait-mlb-ip" {
depends_on = [
helm_release.tidb-cluster
]
provisioner "local-exec" {
interpreter = ["bash", "-c"]
working_dir = path.cwd
command = <<EOS
set -euo pipefail
until kubectl get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]' | grep "${var.service_ingress_key}"; do
echo "Wait for TiDB monitoring internal loadbalancer IP"
sleep 5
done
EOS

environment = {
KUBECONFIG = var.kubeconfig_filename
}
}
}

0 comments on commit fe76e8c

Please sign in to comment.