diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go new file mode 100644 index 00000000000..cd6ba52fb36 --- /dev/null +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/passedconstraint_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var mytask1 = &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "mytask1", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: "myresource1", + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + }, + }, +} + +var mytask2 = &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "mytask2", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{ + v1alpha1.TaskResource{ + Name: "myresource1", + Type: v1alpha1.PipelineResourceTypeGit, + }, + }, + }, + }, +} + +var mypipelinetasks = []v1alpha1.PipelineTask{{ + Name: "mypipelinetask1", + TaskRef: v1alpha1.TaskRef{Name: "mytask1"}, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-name-1", + Key: "myresource1", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "myresource1", + }, + }}, +}, { + Name: "mypipelinetask2", + TaskRef: v1alpha1.TaskRef{Name: "mytask2"}, + InputSourceBindings: []v1alpha1.SourceBinding{{ + Name: "some-name-2", + Key: "myresource1", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "myresource1", + }, + PassedConstraints: []string{"mytask1"}, + }}, +}} + +var mytaskruns = []v1alpha1.TaskRun{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pipelinerun-mytask1", + }, + Spec: v1alpha1.TaskRunSpec{}, +}, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "pipelinerun-mytask2", + }, + Spec: v1alpha1.TaskRunSpec{}, +}} + +func TestCanTaskRun(t *testing.T) { + tcs := []struct { + name string + state []*PipelineRunTaskRun + canSecondTaskRun bool + }{ + { + name: "first-task-not-started", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: nil, + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-running", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeStarted(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-failed", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeFailed(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: false, + }, + { + name: "first-task-finished", + state: []*PipelineRunTaskRun{{ + Task: mytask1, + PipelineTask: &mypipelinetasks[0], + TaskRunName: "pipelinerun-mytask1", + TaskRun: makeSucceeded(mytaskruns[0]), + }, { + Task: mytask2, + PipelineTask: &mypipelinetasks[1], + TaskRunName: "pipelinerun-mytask2", + TaskRun: nil, + }}, + canSecondTaskRun: true, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + cantaskrun := canTaskRun(&mypipelinetasks[1], tc.state) + if d := cmp.Diff(cantaskrun, tc.canSecondTaskRun); d != "" { + t.Fatalf("Expected second task availability to run should be %t, but different state returned: %s", tc.canSecondTaskRun, d) + } + }) + } +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go index 107a8330c92..97ba67ce3e2 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/pipelinestate.go @@ -45,7 +45,7 @@ func GetNextTask(prName string, state []*PipelineRunTaskRun, logger *zap.Sugared logger.Infof("TaskRun %s is still running so we shouldn't start more for PipelineRun %s", prtr.TaskRunName, prName) return nil } - } else if canTaskRun(prtr.PipelineTask) { + } else if canTaskRun(prtr.PipelineTask, state) { logger.Infof("TaskRun %s should be started for PipelineRun %s", prtr.TaskRunName, prName) return prtr } @@ -54,8 +54,32 @@ func GetNextTask(prName string, state []*PipelineRunTaskRun, logger *zap.Sugared return nil } -func canTaskRun(pt *v1alpha1.PipelineTask) bool { +func canTaskRun(pt *v1alpha1.PipelineTask, state []*PipelineRunTaskRun) bool { // Check if Task can run now. Go through all the input constraints + for _, input := range pt.InputSourceBindings { + if len(input.PassedConstraints) > 0 { + for _, constrainingTaskName := range input.PassedConstraints { + for _, prtr := range state { + // the constraining task must have a successful task run to allow this task to run + if prtr.Task.Name == constrainingTaskName { + if prtr.TaskRun == nil { + return false + } + c := prtr.TaskRun.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c == nil { + return false + } + switch c.Status { + case corev1.ConditionFalse: + return false + case corev1.ConditionUnknown: + return false + } + } + } + } + } + } return true } diff --git a/test/crd_checks.go b/test/crd_checks.go index 5dc6784abad..56bea537c2d 100644 --- a/test/crd_checks.go +++ b/test/crd_checks.go @@ -86,3 +86,21 @@ func WaitForPipelineRunState(c *clients, name string, inState func(r *v1alpha1.P return inState(r) }) } + +// WaitForServiceExternalIPState polls the status of the a k8s Service called name from client every +// interval until an external ip is assigned indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} diff --git a/test/helm_task_test.go b/test/helm_task_test.go index e729fe4c4a4..95fe2d8699d 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -94,26 +94,36 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) } + logger.Info("Waiting for service to get external IP") var serviceIp string - k8sService, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(helmDeployServiceName, metav1.GetOptions{}) - if err != nil { - t.Errorf("Error getting service at %s %s", helmDeployServiceName, err) - } - if k8sService != nil { - ingress := k8sService.Status.LoadBalancer.Ingress - if len(ingress) > 0 { - serviceIp = ingress[0].IP - t.Logf("Service IP is %s", serviceIp) + if err := WaitForServiceExternalIPState(c, namespace, helmDeployServiceName, func(svc *corev1.Service) (bool, error) { + ingress := svc.Status.LoadBalancer.Ingress + if ingress != nil { + if len(ingress) > 0 { + serviceIp = ingress[0].IP + return true, nil + } } + return false, nil + }, "ServiceExternalIPisReady"); err != nil { + t.Errorf("Error waiting for Service %s to get an external IP: %s", helmDeployServiceName, err) } - resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) - if err != nil { - t.Errorf("Error reaching service at http://%s:8080 %s", serviceIp, err) - } - if resp != nil && resp.StatusCode != http.StatusOK { - t.Errorf("Error from service at http://%s:8080 %s", serviceIp, err) + if serviceIp != "" { + resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIp)) + if err != nil { + t.Errorf("Error reaching service at http://%s:8080 %s", serviceIp, err) + } + if resp != nil && resp.StatusCode != http.StatusOK { + t.Errorf("Error from service at http://%s:8080 %s", serviceIp, err) + } + + } else { + t.Errorf("Service IP is empty.") } + + // cleanup task to remove helm from cluster, will not fail the test if it fails, just log + removeHelmFromCluster(c, t, namespace, logger) } func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { @@ -144,7 +154,7 @@ func getCreateImageTask(namespace string, t *testing.T) *v1alpha1.Task { } imageName = fmt.Sprintf("%s/%s", dockerRepo, AppendRandomString(sourceImageName)) - t.Log("Image to be pusblished: %s", imageName) + t.Logf("Image to be pusblished: %s", imageName) return &v1alpha1.Task{ ObjectMeta: metav1.ObjectMeta{ @@ -203,7 +213,10 @@ func getHelmDeployTask(namespace string) *v1alpha1.Task { }, { Name: "helm-cleanup", //for local clusters, clean up from previous runs Image: "alpine/helm", - Command: []string{"/bin/sh", + Command: []string{ + "/bin/sh", + }, + Args: []string{ "-c", "helm ls --short --all | xargs -n1 helm del --purge", }, @@ -255,6 +268,7 @@ func getHelmDeployPipeline(namespace string) *v1alpha1.Pipeline { ResourceRef: v1alpha1.PipelineResourceRef{ Name: sourceResourceName, }, + PassedConstraints: []string{createImageTaskName}, }}, Params: []v1alpha1.Param{{ Name: "pathToHelmCharts", @@ -306,7 +320,92 @@ func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { }}, } + t.Logf("Creating Cluster Role binding in kube-system for helm in namespace %s", namespace) if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(defaultClusterRoleBinding); err != nil { t.Fatalf("Failed to create default Service account for Helm in namespace: %s - %s", namespace, err) } + + kubesystemClusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: AppendRandomString("default-tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: "kube-system", + }}, + } + + t.Logf("Creating Cluster Role binding in kube-system for helm") + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(kubesystemClusterRoleBinding); err != nil { + t.Fatalf("Failed to create default Service account for Helm in kube-system - %s", err) + } +} + +func removeHelmFromCluster(c *clients, t *testing.T, namespace string, logger *logging.BaseLogger) { + helmResetTaskName := "helm-reset-task" + helmResetTask := &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmResetTaskName, + }, + Spec: v1alpha1.TaskSpec{ + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "helm-reset", + Image: "alpine/helm", + Args: []string{"reset", "--force"}, + }, + }, + }, + }, + } + + helmResetTaskRunName := "helm-reset-taskrun" + helmResetTaskRun := &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: helmResetTaskRunName, + }, + Spec: v1alpha1.TaskRunSpec{ + TaskRef: v1alpha1.TaskRef{ + Name: helmResetTaskName, + }, + Trigger: v1alpha1.TaskTrigger{ + TriggerRef: v1alpha1.TaskTriggerRef{ + Type: v1alpha1.TaskTriggerTypeManual, + }, + }, + }, + } + + logger.Infof("Creating Task %s", helmResetTaskName) + if _, err := c.TaskClient.Create(helmResetTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmResetTaskName, err) + } + + logger.Infof("Creating TaskRun %s", helmResetTaskRunName) + if _, err := c.TaskRunClient.Create(helmResetTaskRun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", helmResetTaskRunName, err) + } + + logger.Infof("Waiting for TaskRun %s in namespace %s to complete", helmResetTaskRunName, namespace) + if err := WaitForTaskRunState(c, helmResetTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName) + } + } + return false, nil + }, "TaskRunSuccess"); err != nil { + logger.Infof("TaskRun %s failed to finish: %s", helmResetTaskRunName, err) + } }