forked from kubernetes-sigs/cluster-api
-
Notifications
You must be signed in to change notification settings - Fork 0
/
status.go
134 lines (112 loc) · 5.36 KB
/
status.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
)
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) error {
selector := collections.ControlPlaneSelectorForCluster(controlPlane.Cluster.Name)
// Copy label selector to its status counterpart in string format.
// This is necessary for CRDs including scale subresources.
controlPlane.KCP.Status.Selector = selector.String()
controlPlane.KCP.Status.UpdatedReplicas = int32(len(controlPlane.UpToDateMachines()))
replicas := int32(len(controlPlane.Machines))
desiredReplicas := *controlPlane.KCP.Spec.Replicas
// set basic data that does not require interacting with the workload cluster
controlPlane.KCP.Status.Replicas = replicas
controlPlane.KCP.Status.ReadyReplicas = 0
controlPlane.KCP.Status.UnavailableReplicas = replicas
// Return early if the deletion timestamp is set, because we don't want to try to connect to the workload cluster
// and we don't want to report resize condition (because it is set to deleting into reconcile delete).
if !controlPlane.KCP.DeletionTimestamp.IsZero() {
return nil
}
machinesWithHealthyAPIServer := controlPlane.Machines.Filter(collections.HealthyAPIServer())
lowestVersion := machinesWithHealthyAPIServer.LowestVersion()
if lowestVersion != nil {
controlPlane.KCP.Status.Version = lowestVersion
}
switch {
// We are scaling up
case replicas < desiredReplicas:
conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas)
// We are scaling down
case replicas > desiredReplicas:
conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas)
// This means that there was no error in generating the desired number of machine objects
conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition)
default:
// make sure last resize operation is marked as completed.
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines
// are actually provisioned (vs reporting completed immediately after the last machine object is created).
readyMachines := controlPlane.Machines.Filter(collections.IsReady())
if int32(len(readyMachines)) == replicas {
conditions.MarkTrue(controlPlane.KCP, controlplanev1.ResizedCondition)
}
// This means that there was no error in generating the desired number of machine objects
conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesCreatedCondition)
}
workloadCluster, err := controlPlane.GetWorkloadCluster(ctx)
if err != nil {
return errors.Wrap(err, "failed to create remote cluster client")
}
status, err := workloadCluster.ClusterStatus(ctx)
if err != nil {
return err
}
controlPlane.KCP.Status.ReadyReplicas = status.ReadyNodes
controlPlane.KCP.Status.UnavailableReplicas = replicas - status.ReadyNodes
// This only gets initialized once and does not change if the kubeadm config map goes away.
if status.HasKubeadmConfig {
controlPlane.KCP.Status.Initialized = true
conditions.MarkTrue(controlPlane.KCP, controlplanev1.AvailableCondition)
}
if controlPlane.KCP.Status.ReadyReplicas > 0 {
controlPlane.KCP.Status.Ready = true
}
// Surface lastRemediation data in status.
// LastRemediation is the remediation currently in progress, in any, or the
// most recent of the remediation we are keeping track on machines.
var lastRemediation *RemediationData
if v, ok := controlPlane.KCP.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok {
remediationData, err := RemediationDataFromAnnotation(v)
if err != nil {
return err
}
lastRemediation = remediationData
} else {
for _, m := range controlPlane.Machines.UnsortedList() {
if v, ok := m.Annotations[controlplanev1.RemediationForAnnotation]; ok {
remediationData, err := RemediationDataFromAnnotation(v)
if err != nil {
return err
}
if lastRemediation == nil || lastRemediation.Timestamp.Time.Before(remediationData.Timestamp.Time) {
lastRemediation = remediationData
}
}
}
}
if lastRemediation != nil {
controlPlane.KCP.Status.LastRemediation = lastRemediation.ToStatus()
}
return nil
}