forked from microsoft/AIOpsLab
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathremote_setup_controller_worker.yml
103 lines (100 loc) · 3.83 KB
/
remote_setup_controller_worker.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
---
# Control Node Setup
- hosts: control_node # Control plane tasks
become: true
vars:
kubeconfig_path: "/users/{{ k8s_user }}/.kube/config"
tasks:
- name: Resolve control node hostname to IP
command: getent ahosts "{{ hostvars['control_node'].ansible_host }}"
register: resolved_ip_output
- name: Parse resolved IP from output
set_fact:
resolved_control_plane_ip: "{{ resolved_ip_output.stdout_lines[0].split(' ')[0] }}"
- name: Set resolved_control_plane_ip globally
add_host:
name: "global"
resolved_control_plane_ip: "{{ resolved_control_plane_ip }}"
- name: Initialize Kubernetes control plane
shell: |
kubeadm init --pod-network-cidr=10.244.0.0/16 --cri-socket /var/run/cri-dockerd.sock --apiserver-advertise-address={{ resolved_control_plane_ip }}
args:
creates: /etc/kubernetes/admin.conf
- name: Ensure .kube directory exists
file:
path: "/users/{{ k8s_user }}/.kube"
state: directory
mode: '0755'
owner: "{{ k8s_user }}"
# group: "{{ k8s_user }}"
become: true
- name: Temporarily set permissions to read admin.conf
file:
path: /etc/kubernetes/admin.conf
mode: '0644'
become: true
- name: Set up kube config for kubectl on control plane
copy:
src: /etc/kubernetes/admin.conf
dest: "/users/{{ k8s_user }}/.kube/config"
mode: '0644'
remote_src: true
become: true
become_method: sudo
- name: Ensure ownership of kube config for kubectl
file:
path: "/users/{{ k8s_user }}/.kube/config"
owner: "{{ k8s_user }}"
# group: "{{ k8s_user }}"
mode: '0644'
become: true
- name: Display ansible_user_id
debug:
msg: "ansible_user_id is {{ ansible_user_id }}"
- name: Fetch admin.conf to localhost for kubeconfig
fetch:
src: /etc/kubernetes/admin.conf
dest: ~/.kube/config
flat: yes
become: true
- name: Generate kubeadm join command
shell: kubeadm token create --print-join-command
register: kubeadm_join_command
- name: Extract kube_token and cert_hash from join command
set_fact:
kube_token: "{{ (kubeadm_join_command.stdout | regex_search('--token\\s+([\\w.]+)', '\\1')).0 }}"
cert_hash: "{{ (kubeadm_join_command.stdout | regex_search('--discovery-token-ca-cert-hash\\s+sha256:([\\w]+)', '\\1')).0 }}"
- name: Display kube_token
debug:
msg: "kube_token is {{ kube_token }}"
- name: Display cert_hash
debug:
msg: "cert_hash is {{ cert_hash }}"
- name: Install Flannel network plugin
shell: |
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
args:
creates: /etc/kubernetes/kube-flannel.yml
environment:
KUBECONFIG: "{{ kubeconfig_path }}"
- name: Untaint the control plane to host pods
shell: kubectl taint nodes $(hostname) node-role.kubernetes.io/control-plane:NoSchedule- || true
# Worker Node Setup
- hosts: worker_nodes
become: true
tasks:
- name: Join Kubernetes cluster
shell: |
kubeadm join {{ hostvars['global'].resolved_control_plane_ip }}:6443 --token {{ hostvars['control_node'].kube_token }} --discovery-token-ca-cert-hash sha256:{{ hostvars['control_node'].cert_hash }} --cri-socket unix:///var/run/cri-dockerd.sock --v=5
args:
creates: /var/lib/kubelet/kubeadm-flags.env
become: true
- name: Ensure .kube directory exists
file:
path: "/users/{{ ansible_user }}/.kube"
state: directory
mode: '0755'
become_user: "{{ ansible_user }}" # Ensure directory is created under the correct user
- name: Display ansible_user
debug:
msg: "ansible_user is {{ ansible_user }}"