-
Notifications
You must be signed in to change notification settings - Fork 4.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
emit warning on no liveness probe defined for pods #10363
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,8 +11,9 @@ import ( | |
) | ||
|
||
const ( | ||
UnmountableSecretWarning = "UnmountableSecret" | ||
MissingSecretWarning = "MissingSecret" | ||
UnmountableSecretWarning = "UnmountableSecret" | ||
MissingSecretWarning = "MissingSecret" | ||
MissingLivenessProbeWarning = "MissingLivenessProbe" | ||
) | ||
|
||
// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount | ||
|
@@ -75,6 +76,58 @@ func FindMissingSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { | |
return markers | ||
} | ||
|
||
// FindMissingLivenessProbes inspects all PodSpecs for missing liveness probes and generates a list of non-duplicate markers | ||
func FindMissingLivenessProbes(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { | ||
markers := []osgraph.Marker{} | ||
|
||
for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { | ||
podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) | ||
if hasLivenessProbe(podSpecNode) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this check is cheap, run it first. |
||
continue | ||
} | ||
|
||
topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) | ||
|
||
// skip any podSpec nodes that are managed by other nodes. | ||
// Liveness probes should only be applied to a controlling | ||
// podSpec node, and not to any of its children. | ||
if hasControllerRefEdge(g, topLevelNode) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. add a comment here explaining the reasoning. |
||
continue | ||
} | ||
|
||
topLevelString := f.ResourceName(topLevelNode) | ||
markers = append(markers, osgraph.Marker{ | ||
Node: podSpecNode, | ||
RelatedNodes: []graph.Node{topLevelNode}, | ||
|
||
Severity: osgraph.WarningSeverity, | ||
Key: MissingLivenessProbeWarning, | ||
Message: fmt.Sprintf("%s has no liveness probe to verify pods are still running.", | ||
topLevelString), | ||
Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --liveness ...", setProbeCommand, topLevelString)), | ||
}) | ||
} | ||
|
||
return markers | ||
} | ||
|
||
// hasLivenessProbe iterates through all of the containers in a podSpecNode returning true | ||
// if at least one container has a liveness probe, or false otherwise | ||
func hasLivenessProbe(podSpecNode *kubegraph.PodSpecNode) bool { | ||
for _, container := range podSpecNode.PodSpec.Containers { | ||
if container.LivenessProbe != nil { | ||
return true | ||
} | ||
} | ||
return false | ||
} | ||
|
||
// hasControllerRefEdge returns true if a given node contains one or more "ManagedByController" outbound edges | ||
func hasControllerRefEdge(g osgraph.Graph, node graph.Node) bool { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don't we have a cleaner way to do this? List outbound edges and check them directly? |
||
managedEdges := g.OutboundEdges(node, kubeedges.ManagedByControllerEdgeKind) | ||
return len(managedEdges) > 0 | ||
} | ||
|
||
// CheckForUnmountableSecrets checks to be sure that all the referenced secrets are mountable (by service account) | ||
func CheckForUnmountableSecrets(g osgraph.Graph, podSpecNode *kubegraph.PodSpecNode) []*kubegraph.SecretNode { | ||
saNodes := g.SuccessorNodesByNodeAndEdgeKind(podSpecNode, kubegraph.ServiceAccountNodeKind, kubeedges.ReferencedServiceAccountEdgeKind) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
#!/bin/bash | ||
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" | ||
trap os::test::junit::reconcile_output EXIT | ||
|
||
# Cleanup cluster resources created by this test | ||
( | ||
set +e | ||
oc delete all,templates --all | ||
exit 0 | ||
) &>/dev/null | ||
|
||
|
||
os::test::junit::declare_suite_start "cmd/set-probe-liveness" | ||
# This test setting a liveness probe, without warning about replication controllers whose deployment depends on deployment configs | ||
os::cmd::expect_success_and_text 'oc create -f pkg/api/graph/test/simple-deployment.yaml' 'deploymentconfig "simple-deployment" created' | ||
os::cmd::expect_success_and_text 'oc status -v' 'dc/simple-deployment has no liveness probe' | ||
os::cmd::expect_success_and_not_text 'oc status -v' 'rc/simple-deployment-1 has no liveness probe' | ||
os::cmd::expect_success_and_text 'oc set probe dc/simple-deployment --liveness --get-url=http://google.com:80' 'deploymentconfig "simple-deployment" updated' | ||
os::cmd::expect_success_and_not_text 'oc status -v' 'dc/simple-deployment has no liveness probe' | ||
echo "set-probe-liveness: ok" | ||
os::test::junit::declare_suite_end |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
in psuedo code: