This repository has been archived by the owner on Jun 21, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 10
/
kubernetes.yaml
121 lines (118 loc) · 2.17 KB
/
kubernetes.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: kubedrainer
namespace: kube-system
labels:
app: kubedrainer
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubedrainer
labels:
app: kubedrainer
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- watch
- list
- get
- apiGroups:
- extensions
- apps
resources:
- replicasets
- daemonsets
- statefulsets
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubedrainer
labels:
app: kubedrainer
roleRef:
kind: ClusterRole
name: kubedrainer
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: kubedrainer
namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kubedrainer
namespace: kube-system
labels:
app: kubedrainer
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: kubedrainer
template:
metadata:
labels:
app: kubedrainer
annotations:
iam.amazonaws.com/role: kubedrainer
spec:
serviceAccountName: kubedrainer
containers:
- name: kubedrainer
image: quay.io/virtuslab/kubedrainer:v0.0.10
env:
- name: KUBEDRAINER_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
cpu: 10m
memory: 20Mi
limits:
cpu: 100m
memory: 200Mi
tolerations:
# Mark the pod as a critical add-on for rescheduling
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "ToBeDeletedByClusterAutoscaler"
operator: "Exists"