From 03a0002d20edca80d3b99ec691d8fce8c63980e7 Mon Sep 17 00:00:00 2001 From: Karan Thukral Date: Tue, 18 Jul 2017 12:34:08 -0400 Subject: [PATCH] Add daemon set resource --- .../kubernetes_resource/daemon_set.rb | 100 ++++++++++++++++++ lib/kubernetes-deploy/runner.rb | 1 + test/fixtures/hello-cloud/daemon_set.yml | 16 +++ test/helpers/fixture_set.rb | 9 ++ test/helpers/fixture_sets/hello_cloud.rb | 5 + test/integration/kubernetes_deploy_test.rb | 29 ++++- 6 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb create mode 100644 test/fixtures/hello-cloud/daemon_set.yml diff --git a/lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb b/lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb new file mode 100644 index 000000000..8fdb329b1 --- /dev/null +++ b/lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb @@ -0,0 +1,100 @@ +# frozen_string_literal: true +module KubernetesDeploy + class DaemonSet < KubernetesResource + TIMEOUT = 5.minutes + + def sync + raw_json, _err, st = kubectl.run("get", type, @name, "--output=json") + @found = st.success? + + if @found + daemonset_data = JSON.parse(raw_json) + @desired_number = daemonset_data["status"]["desiredNumberScheduled"] + @rollout_data = daemonset_data["status"] + .slice("currentNumberScheduled", "desiredNumberScheduled", "numberReady") + @status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ") + @pods = find_pods(daemonset_data) + else # reset + @rollout_data = { "replicas" => 0 } + @status = nil + @pods = [] + end + end + + def deploy_succeeded? + @desired_number == @rollout_data["desiredNumberScheduled"].to_i && + @desired_number == @rollout_data["numberReady"].to_i + end + + def deploy_failed? + @pods.present? && @pods.all?(&:deploy_failed?) + end + + def failure_message + @pods.map(&:failure_message).compact.uniq.join("\n") + end + + def timeout_message + @pods.map(&:timeout_message).compact.uniq.join("\n") + end + + def deploy_timed_out? + super || @pods.present? && @pods.all?(&:deploy_timed_out?) + end + + def exists? + @found + end + + def fetch_events + own_events = super + return own_events unless @pods.present? + most_useful_pod = @pods.find(&:deploy_failed?) || @pods.find(&:deploy_timed_out?) || @pods.first + own_events.merge(most_useful_pod.fetch_events) + end + + def fetch_logs + container_names.each_with_object({}) do |container_name, container_logs| + out, _err, _st = kubectl.run( + "logs", + id, + "--container=#{container_name}", + "--since-time=#{@deploy_started.to_datetime.rfc3339}", + "--tail=#{LOG_LINE_COUNT}" + ) + container_logs[container_name] = out.split("\n") + end + end + + private + + def unmanaged? + @parent.blank? + end + + def container_names + @definition["spec"]["template"]["spec"]["containers"].map { |c| c["name"] } + end + + def find_pods(daemonset_data) + label_string = daemonset_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",") + raw_json, _err, st = kubectl.run("get", "pods", "-a", "--output=json", "--selector=#{label_string}") + return [] unless st.success? + + all_pods = JSON.parse(raw_json)["items"] + all_pods.each_with_object([]) do |pod_data, relevant_pods| + next unless pod_data["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == daemonset_data["metadata"]["uid"] } + pod = Pod.new( + namespace: namespace, + context: context, + definition: pod_data, + logger: @logger, + parent: "#{@name.capitalize} replica set", + deploy_started: @deploy_started + ) + pod.sync(pod_data) + relevant_pods << pod + end + end + end +end diff --git a/lib/kubernetes-deploy/runner.rb b/lib/kubernetes-deploy/runner.rb index d558cb959..bce1eaa25 100644 --- a/lib/kubernetes-deploy/runner.rb +++ b/lib/kubernetes-deploy/runner.rb @@ -19,6 +19,7 @@ bugsnag pod_disruption_budget replica_set + daemon_set ).each do |subresource| require "kubernetes-deploy/kubernetes_resource/#{subresource}" end diff --git a/test/fixtures/hello-cloud/daemon_set.yml b/test/fixtures/hello-cloud/daemon_set.yml new file mode 100644 index 000000000..ba2e15534 --- /dev/null +++ b/test/fixtures/hello-cloud/daemon_set.yml @@ -0,0 +1,16 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: busybox +spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox + command: ["sleep", "40"] + ports: + - containerPort: 80 diff --git a/test/helpers/fixture_set.rb b/test/helpers/fixture_set.rb index 9d582b88a..bf638742b 100644 --- a/test/helpers/fixture_set.rb +++ b/test/helpers/fixture_set.rb @@ -120,6 +120,15 @@ def assert_secret_present(secret_name, expected_data = nil, type: 'Opaque', mana assert_equal expected_data, secret_data end + def assert_daemon_set_present(name) + found = false + daemon_sets = v1beta1_kubeclient.get_daemon_sets(namespace: namespace, label_selector: "app=#{name},app=#{name}") + daemon_sets.each do |ds| + found = true if ds.metadata.name == name + end + assert found + end + def assert_annotated(obj, annotation) annotations = obj.metadata.annotations.to_h.stringify_keys assert annotations.key?(annotation), "Expected secret to have annotation #{annotation}, but it did not" diff --git a/test/helpers/fixture_sets/hello_cloud.rb b/test/helpers/fixture_sets/hello_cloud.rb index f22167afd..2a8c2b9d6 100644 --- a/test/helpers/fixture_sets/hello_cloud.rb +++ b/test/helpers/fixture_sets/hello_cloud.rb @@ -14,6 +14,7 @@ def assert_all_up assert_podtemplate_runner_present assert_poddisruptionbudget assert_bare_replicaset_up + assert_daemon_set_up end def assert_unmanaged_pod_statuses(status, count = 1) @@ -79,5 +80,9 @@ def assert_bare_replicaset_up assert_pod_status("bare-replica-set", "Running") assert assert_replica_set_up("bare-replica-set", replicas: 1) end + + def assert_daemon_set_up + assert_daemon_set_present("busybox") + end end end diff --git a/test/integration/kubernetes_deploy_test.rb b/test/integration/kubernetes_deploy_test.rb index 55f4f2b74..abcbe000e 100644 --- a/test/integration/kubernetes_deploy_test.rb +++ b/test/integration/kubernetes_deploy_test.rb @@ -11,7 +11,7 @@ def test_full_hello_cloud_set_deploy_succeeds "Deploying ConfigMap/hello-cloud-configmap-data (timeout: 30s)", "Hello from Docker!", # unmanaged pod logs "Result: SUCCESS", - "Successfully deployed 12 resources" + "Successfully deployed 13 resources" ], in_order: true) assert_logs_match_all([ @@ -550,6 +550,33 @@ def test_success_detection_tolerates_out_of_band_deployment_scaling assert_equal true, success, "Failed to deploy deployment with dynamic replica count" end + def test_successful_daemon_set_deploy + assert deploy_fixtures("hello-cloud", subset: ["daemon_set.yml"]) + + assert_logs_match_all([ + 'Successfully deployed 1 resource', + '1 currentNumberSchedule, 1 desiredNumberSchedule, 1 numberRead' + ]) + end + + def test_timed_out_daemon_set_deploy + forced_timeout = 2 # failure often takes 8s, and want both + KubernetesDeploy::DaemonSet.any_instance.stubs(:timeout).returns(forced_timeout) + success = deploy_fixtures("hello-cloud", subset: ["daemon_set.yml"]) do |fixtures| + puts fixtures + daemon_set = fixtures['daemon_set.yml']['DaemonSet'].first + container = daemon_set['spec']['template']['spec']['containers'].first + container['readinessProbe'] = { "exec" => { "command" => ['- ls'] } } + end + + refute success + + assert_logs_match_all([ + 'Failed to deploy 1 resource', + 'DaemonSet/busybox: TIMED OUT (limit: 2s)' + ]) + end + private def count_by_revisions(pods)