diff --git a/cli/cmd/status/status.go b/cli/cmd/status/status.go index d97bebe308..ee6947e8be 100644 --- a/cli/cmd/status/status.go +++ b/cli/cmd/status/status.go @@ -174,7 +174,7 @@ func (c *Command) checkHelmInstallation(settings *helmCLI.EnvSettings, uiLogger } statuser := action.NewStatus(statusConfig) - rel, err := statuser.Run(releaseName) + rel, err := c.helmActionsRunner.GetStatus(statuser, releaseName) if err != nil { return fmt.Errorf("couldn't check for installations: %s", err) } diff --git a/cli/cmd/status/status_test.go b/cli/cmd/status/status_test.go index e9d622136c..b687aac7df 100644 --- a/cli/cmd/status/status_test.go +++ b/cli/cmd/status/status_test.go @@ -1,7 +1,9 @@ package status import ( + "bytes" "context" + "errors" "flag" "fmt" "io" @@ -11,12 +13,18 @@ import ( "github.com/hashicorp/consul-k8s/cli/common" cmnFlag "github.com/hashicorp/consul-k8s/cli/common/flag" "github.com/hashicorp/consul-k8s/cli/common/terminal" + "github.com/hashicorp/consul-k8s/cli/helm" "github.com/hashicorp/go-hclog" "github.com/posener/complete" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + helmRelease "helm.sh/helm/v3/pkg/release" + helmTime "helm.sh/helm/v3/pkg/time" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) @@ -33,22 +41,7 @@ func TestCheckConsulServers(t *testing.T) { // Next create a stateful set with 3 desired replicas and 3 ready replicas. var replicas int32 = 3 - ss := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-server-test1", - Namespace: "default", - Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - }, - Status: appsv1.StatefulSetStatus{ - Replicas: replicas, - ReadyReplicas: replicas, - }, - } - - c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss, metav1.CreateOptions{}) + createStatefulSet("consul-server-test1", "default", replicas, replicas, c.kubernetes) // Now we run the checkConsulServers() function and it should succeed. s, err := c.checkConsulServers("default") @@ -56,44 +49,14 @@ func TestCheckConsulServers(t *testing.T) { require.Equal(t, "Consul servers healthy (3/3)", s) // If you then create another stateful set it should error. - ss2 := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-server-test2", - Namespace: "default", - Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - }, - Status: appsv1.StatefulSetStatus{ - Replicas: replicas, - ReadyReplicas: replicas, - }, - } - c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss2, metav1.CreateOptions{}) - + createStatefulSet("consul-server-test2", "default", replicas, replicas, c.kubernetes) _, err = c.checkConsulServers("default") require.Error(t, err) require.Contains(t, err.Error(), "found multiple server stateful sets") // Clear out the client and now run a test where the stateful set isn't ready. c.kubernetes = fake.NewSimpleClientset() - - ss3 := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "consul-server-test3", - Namespace: "default", - Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, - }, - Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, - }, - Status: appsv1.StatefulSetStatus{ - Replicas: replicas, - ReadyReplicas: replicas - 1, // Let's just set one of the servers to unhealthy - }, - } - c.kubernetes.AppsV1().StatefulSets("default").Create(context.Background(), ss3, metav1.CreateOptions{}) + createStatefulSet("consul-server-test2", "default", replicas, replicas-1, c.kubernetes) _, err = c.checkConsulServers("default") require.Error(t, err) @@ -171,6 +134,202 @@ func TestCheckConsulClients(t *testing.T) { require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul clients unhealthy", 1, desired)) } +// TestStatus creates a fake stateful set and tests the checkConsulServers function. +func TestStatus(t *testing.T) { + nowTime := helmTime.Now() + timezone, _ := nowTime.Zone() + notImeStr := nowTime.Format("2006/01/02 15:04:05") + " " + timezone + cases := map[string]struct { + input []string + messages []string + preProcessingFunc func(k8s kubernetes.Interface) + helmActionsRunner *helm.MockActionRunner + expectedReturnCode int + }{ + "status with clients and servers returns success": { + input: []string{}, + messages: []string{ + fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), + "\n==> Config:\n {}\n \n ✓ Consul servers healthy (3/3)\n ✓ Consul clients healthy (3/3)\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createDaemonset("consul-client-test1", "consul", 3, 3, k8s) + createStatefulSet("consul-server-test1", "consul", 3, 3, k8s) + }, + + helmActionsRunner: &helm.MockActionRunner{ + GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { + return &helmRelease.Release{ + Name: "consul", Namespace: "consul", + Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, + Chart: &chart.Chart{ + Metadata: &chart.Metadata{ + Version: "1.0.0", + }, + }, + Config: make(map[string]interface{})}, nil + }, + }, + expectedReturnCode: 0, + }, + "status with no servers returns error": { + input: []string{}, + messages: []string{ + fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), + "\n==> Config:\n {}\n \n ! no server stateful set found\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createDaemonset("consul-client-test1", "consul", 3, 3, k8s) + }, + helmActionsRunner: &helm.MockActionRunner{ + GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { + return &helmRelease.Release{ + Name: "consul", Namespace: "consul", + Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, + Chart: &chart.Chart{ + Metadata: &chart.Metadata{ + Version: "1.0.0", + }, + }, + Config: make(map[string]interface{})}, nil + }, + }, + expectedReturnCode: 1, + }, + "status with no clients returns error": { + input: []string{}, + messages: []string{ + fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), + "\n==> Config:\n {}\n \n ✓ Consul servers healthy (3/3)\n ! no client daemon set found\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createStatefulSet("consul-server-test1", "consul", 3, 3, k8s) + }, + helmActionsRunner: &helm.MockActionRunner{ + GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { + return &helmRelease.Release{ + Name: "consul", Namespace: "consul", + Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, + Chart: &chart.Chart{ + Metadata: &chart.Metadata{ + Version: "1.0.0", + }, + }, + Config: make(map[string]interface{})}, nil + }, + }, + expectedReturnCode: 1, + }, + "status with pre-install and pre-upgrade hooks returns success and outputs hook status": { + input: []string{}, + messages: []string{ + fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr), + "\n==> Config:\n {}\n \n", + "\n==> Status Of Helm Hooks:\npre-install-hook pre-install: Succeeded\npre-upgrade-hook pre-upgrade: Succeeded\n ✓ Consul servers healthy (3/3)\n ✓ Consul clients healthy (3/3)\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createDaemonset("consul-client-test1", "consul", 3, 3, k8s) + createStatefulSet("consul-server-test1", "consul", 3, 3, k8s) + }, + + helmActionsRunner: &helm.MockActionRunner{ + GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { + return &helmRelease.Release{ + Name: "consul", Namespace: "consul", + Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"}, + Chart: &chart.Chart{ + Metadata: &chart.Metadata{ + Version: "1.0.0", + }, + }, + Config: make(map[string]interface{}), + Hooks: []*helmRelease.Hook{ + { + Name: "pre-install-hook", + Kind: "pre-install", LastRun: helmRelease.HookExecution{ + Phase: helmRelease.HookPhaseSucceeded, + }, + Events: []helmRelease.HookEvent{ + "pre-install", + }, + }, + { + Name: "pre-upgrade-hook", + Kind: "pre-upgrade", LastRun: helmRelease.HookExecution{ + Phase: helmRelease.HookPhaseSucceeded, + }, + Events: []helmRelease.HookEvent{ + "pre-install", + }, + }, + { + Name: "post-delete-hook", + Kind: "post-delete", LastRun: helmRelease.HookExecution{ + Phase: helmRelease.HookPhaseSucceeded, + }, + Events: []helmRelease.HookEvent{ + "post-delete", + }, + }, + }}, nil + }, + }, + expectedReturnCode: 0, + }, + "status with CheckForInstallations error returns ": { + input: []string{}, + messages: []string{ + "\n==> Consul Status Summary\n ! kaboom!\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createDaemonset("consul-client-test1", "consul", 3, 3, k8s) + createStatefulSet("consul-server-test1", "consul", 3, 3, k8s) + }, + + helmActionsRunner: &helm.MockActionRunner{ + CheckForInstallationsFunc: func(options *helm.CheckForInstallationsOptions) (bool, string, string, error) { + return false, "", "", errors.New("kaboom!") + }, + }, + expectedReturnCode: 1, + }, + "status with GetStatus error returns ": { + input: []string{}, + messages: []string{ + "\n==> Consul Status Summary\n ! couldn't check for installations: kaboom!\n", + }, + preProcessingFunc: func(k8s kubernetes.Interface) { + createDaemonset("consul-client-test1", "consul", 3, 3, k8s) + createStatefulSet("consul-server-test1", "consul", 3, 3, k8s) + }, + + helmActionsRunner: &helm.MockActionRunner{ + GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) { + return nil, errors.New("kaboom!") + }, + }, + expectedReturnCode: 1, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + buf := new(bytes.Buffer) + c := getInitializedCommand(t, buf) + c.kubernetes = fake.NewSimpleClientset() + c.helmActionsRunner = tc.helmActionsRunner + if tc.preProcessingFunc != nil { + tc.preProcessingFunc(c.kubernetes) + } + returnCode := c.Run([]string{}) + require.Equal(t, tc.expectedReturnCode, returnCode) + output := buf.String() + for _, msg := range tc.messages { + require.Contains(t, output, msg) + } + }) + } +} + // getInitializedCommand sets up a command struct for tests. func getInitializedCommand(t *testing.T, buf io.Writer) *Command { t.Helper() @@ -226,3 +385,38 @@ func TestTaskCreateCommand_AutocompleteArgs(t *testing.T) { c := cmd.AutocompleteArgs() assert.Equal(t, complete.PredictNothing, c) } + +func createStatefulSet(name, namespace string, replicas, readyReplicas int32, k8s kubernetes.Interface) { + ss := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": "consul", "chart": "consul-helm", "component": "server"}, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: readyReplicas, + }, + } + + k8s.AppsV1().StatefulSets(namespace).Create(context.Background(), ss, metav1.CreateOptions{}) +} + +func createDaemonset(name, namespace string, replicas, readyReplicas int32, k8s kubernetes.Interface) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": "consul", "chart": "consul-helm"}, + }, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: replicas, + NumberReady: readyReplicas, + }, + } + + k8s.AppsV1().DaemonSets(namespace).Create(context.Background(), ds, metav1.CreateOptions{}) +}