diff --git a/docs/events.md b/docs/events.md index add381754cc..7e0d9e0321d 100644 --- a/docs/events.md +++ b/docs/events.md @@ -27,13 +27,21 @@ No events are emitted for `Conditions` today (https://github.com/tektoncd/pipeli successfully, including post-steps injected by Tekton. - `Failed`: this is triggered if the `TaskRun` is completed, but not successfully. Causes of failure may be: one the steps failed, the `TaskRun` was cancelled or - the `TaskRun` timed out. + the `TaskRun` timed out. `Failed` events are also triggered in case the `TaskRun` + cannot be executed at all because of validation issues. ## PipelineRuns `PipelineRun` events are generated for the following `Reasons`: +- `Started`: this is triggered the first time the `PipelineRun` is picked by the + reconciler from its work queue, so it only happens if web-hook validation was + successful. Note that this event does not imply that a step started executing, + as pipeline, task and bound resource validation must be successful first. +- `Running`: this is triggered when the `PipelineRun` passes validation and + actually starts running. - `Succeeded`: this is triggered once all `Tasks` reachable via the DAG are executed successfully. - `Failed`: this is triggered if the `PipelineRun` is completed, but not successfully. Causes of failure may be: one the `Tasks` failed or the - `PipelineRun` was cancelled. + `PipelineRun` was cancelled or timed out. `Failed` events are also triggered + in case the `PipelineRun` cannot be executed at all because of validation issues. diff --git a/pkg/reconciler/pipelinerun/controller.go b/pkg/reconciler/pipelinerun/controller.go index b8c161856c8..9313de70a64 100644 --- a/pkg/reconciler/pipelinerun/controller.go +++ b/pkg/reconciler/pipelinerun/controller.go @@ -67,6 +67,7 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex KubeClientSet: kubeclientset, PipelineClientSet: pipelineclientset, Logger: logger, + Recorder: controller.GetEventRecorder(ctx), } c := &Reconciler{ diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index 6a7d9fb3202..943cd10a228 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -18,7 +18,6 @@ package pipelinerun import ( "context" - "encoding/json" "fmt" "path/filepath" "reflect" @@ -50,7 +49,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "knative.dev/pkg/apis" "knative.dev/pkg/configmap" @@ -97,10 +95,6 @@ const ( // pipelineRunAgentName defines logging agent name for PipelineRun Controller pipelineRunAgentName = "pipeline-controller" - - // Event reasons - eventReasonFailed = "PipelineRunFailed" - eventReasonSucceeded = "PipelineRunSucceeded" ) type configStore interface { @@ -141,7 +135,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { c.Logger.Errorf("invalid resource key: %s", key) - return nil + return controller.NewPermanentError(err) } ctx = c.configStore.ToContext(ctx) @@ -151,192 +145,144 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { if errors.IsNotFound(err) { // The resource no longer exists, in which case we stop processing. c.Logger.Errorf("pipeline run %q in work queue no longer exists", key) - return nil + return controller.NewPermanentError(err) } else if err != nil { return err } // Don't modify the informer's copy. pr := original.DeepCopy() + if !pr.HasStarted() { + // HasStarted is made true by InitializeConditions so that this is only + // executed the first time reconcile happens for a key pr.Status.InitializeConditions() // In case node time was not synchronized, when controller has been scheduled to other nodes. if pr.Status.StartTime.Sub(pr.CreationTimestamp.Time) < 0 { c.Logger.Warnf("PipelineRun %s createTimestamp %s is after the pipelineRun started %s", pr.GetRunKey(), pr.CreationTimestamp, pr.Status.StartTime) pr.Status.StartTime = &pr.CreationTimestamp } - // start goroutine to track pipelinerun timeout only startTime is not set + // start goroutine to track pipelinerun timeout only if startTime is not set go c.timeoutHandler.WaitPipelineRun(pr, pr.Status.StartTime) - } else { - pr.Status.InitializeConditions() + // Emit events. During the first reconcile the status of the PipelineRun may change twice + // from not Started to Started and then to Running, so we need to sent the event here + // and at the end of 'Reconcile' again. + // We also want to send the "Started" event as soon as possible for anyone who may be waiting + // on the event to perform user facing initialisations, such has reset a CI check status + afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded) + events.Emit(c.Recorder, nil, afterCondition, pr) } - // In case of reconcile errors, we store the error in a multierror, attempt - // to update, and return the original error combined with any update error - var merr *multierror.Error + if pr.IsDone() { + var merr *multierror.Error - switch { - case pr.IsDone(): // We may be reading a version of the object that was stored at an older version // and may not have had all of the assumed default specified. pr.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx)) - c.updatePipelineResults(ctx, pr) - if err := artifacts.CleanupArtifactStorage(pr, c.KubeClientSet, c.Logger); err != nil { - c.Logger.Errorf("Failed to delete PVC for PipelineRun %s: %v", pr.Name, err) - return err + // Completion time is set, so we don't need the timeout handler anymore + c.timeoutHandler.Release(pr) + + // Get the latest status from the TaskRuns, since we won't get it from "reconcile" + if err := c.updateTaskRunsStatusDirectly(pr); err != nil { + merr = multierror.Append(err, merr) + c.Logger.Errorf("Failed to update TaskRun status for PipelineRun %s: %v", pr.Name, err) } + updateErr := c.updateStatusLabelsAndAnnotations(pr, original) + events.EmitError(c.Recorder, updateErr, pr) + if err := c.cleanupAffinityAssistants(pr); err != nil { + merr = multierror.Append(err, merr) c.Logger.Errorf("Failed to delete StatefulSet for PipelineRun %s: %v", pr.Name, err) - return err } - c.timeoutHandler.Release(pr) - if err := c.updateTaskRunsStatusDirectly(pr); err != nil { - c.Logger.Errorf("Failed to update TaskRun status for PipelineRun %s: %v", pr.Name, err) - return err + c.updatePipelineResults(ctx, pr) + if err := artifacts.CleanupArtifactStorage(pr, c.KubeClientSet, c.Logger); err != nil { + merr = multierror.Append(err, merr) + c.Logger.Errorf("Failed to delete PVC for PipelineRun %s: %v", pr.Name, err) } + go func(metrics *Recorder) { err := metrics.DurationAndCount(pr) if err != nil { c.Logger.Warnf("Failed to log the metrics : %v", err) } }(c.metrics) - case pr.IsCancelled(): - // If the pipelinerun is cancelled, cancel tasks and update status - before := pr.Status.GetCondition(apis.ConditionSucceeded) - merr = multierror.Append(merr, cancelPipelineRun(c.Logger, pr, c.PipelineClientSet)) - after := pr.Status.GetCondition(apis.ConditionSucceeded) - events.Emit(c.Recorder, before, after, pr) - default: - if err := c.tracker.Track(pr.GetTaskRunRef(), pr); err != nil { - c.Logger.Errorf("Failed to create tracker for TaskRuns for PipelineRun %s: %v", pr.Name, err) - c.Recorder.Event(pr, corev1.EventTypeWarning, eventReasonFailed, "Failed to create tracker for TaskRuns for PipelineRun") - return err - } - - // Make sure that the PipelineRun status is in sync with the actual TaskRuns - err = c.updatePipelineRunStatusFromInformer(pr) - if err != nil { - // This should not fail. Return the error so we can re-try later. - c.Logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error()) - return err - } - // Reconcile this copy of the pipelinerun and then write back any status or label - // updates regardless of whether the reconciliation errored out. - if err = c.reconcile(ctx, pr); err != nil { - c.Logger.Errorf("Reconcile error: %v", err.Error()) - merr = multierror.Append(merr, err) - } + return merr.ErrorOrNil() } - var updated bool - if !equality.Semantic.DeepEqual(original.Status, pr.Status) { - if _, err := c.updateStatus(pr); err != nil { - c.Logger.Warn("Failed to update PipelineRun status", zap.Error(err)) - c.Recorder.Event(pr, corev1.EventTypeWarning, eventReasonFailed, "PipelineRun failed to update") - return multierror.Append(merr, err) - } - updated = true - } + // Store the condition before reconcile + before := pr.Status.GetCondition(apis.ConditionSucceeded) - // When we update the status only, we use updateStatus to minimize the chances of - // racing any clients updating other parts of the Run, e.g. the spec or the labels. - // If we need to update the labels or annotations, we need to call Update with these - // changes explicitly. - if !reflect.DeepEqual(original.ObjectMeta.Labels, pr.ObjectMeta.Labels) || !reflect.DeepEqual(original.ObjectMeta.Annotations, pr.ObjectMeta.Annotations) { - if _, err := c.updateLabelsAndAnnotations(pr); err != nil { - c.Logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err)) - c.Recorder.Event(pr, corev1.EventTypeWarning, eventReasonFailed, "PipelineRun failed to update labels/annotations") - return multierror.Append(merr, err) - } - updated = true + // Make sure that the PipelineRun status is in sync with the actual TaskRuns + err = c.updatePipelineRunStatusFromInformer(pr) + if err != nil { + // This should not fail. Return the error so we can re-try later. + c.Logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error()) + return err } - if updated { - go func(metrics *Recorder) { - err := metrics.RunningPipelineRuns(c.pipelineRunLister) - if err != nil { - c.Logger.Warnf("Failed to log the metrics : %v", err) - } - }(c.metrics) + // If the pipelinerun is cancelled, cancel tasks and update status + if pr.IsCancelled() { + err := cancelPipelineRun(c.Logger, pr, c.PipelineClientSet) + return c.finishReconcileUpdateEmitEvents(pr, original, before, err) } - return merr.ErrorOrNil() -} - -func (c *Reconciler) updatePipelineResults(ctx context.Context, pr *v1beta1.PipelineRun) { - - // TODO: Use factory func instead of hard-coding this once OCI images are supported. - resolver := &resources.LocalPipelineRefResolver{ - Namespace: pr.Namespace, - Tektonclient: c.PipelineClientSet, + // PipelineRun is not done, let's track matching TaskRuns + if err := c.tracker.Track(pr.GetTaskRunRef(), pr); err != nil { + c.Logger.Errorf("Failed to create tracker for TaskRuns for PipelineRun %s: %v", pr.Name, err) + events.EmitError(c.Recorder, fmt.Errorf("Failed to create tracker for TaskRuns for PipelineRun %s", pr.Name), pr) + return err } - pipelineMeta, pipelineSpec, err := resources.GetPipelineData(ctx, pr, resolver.GetPipeline) + + // prepare fetches all required resources, runs validations and convertions + // Errors that come out of prepare are permanent one, so in case of error we update + // emit events and return + pipelineState, dag, pipelineSpec, err := c.prepare(ctx, pr) if err != nil { - if ce, ok := err.(*v1beta1.CannotConvertError); ok { - pr.Status.MarkResourceNotConvertible(ce) - return - } - c.Logger.Errorf("Failed to determine Pipeline spec to use for pipelinerun %s: %v", pr.Name, err) - pr.Status.MarkFailed(ReasonCouldntGetPipeline, - "Error retrieving pipeline for pipelinerun %s/%s: %s", - pr.Namespace, pr.Name, err) - return + // In case of error, we log a warning, emit an event, sync the status back + // and finally return the error back. If the error is a permanent one, this + // key won't be re-queued, which is normally the case of errors in the prepare phase + c.Logger.Warnf("PipelineRun prepare error: %v", err.Error()) + return c.finishReconcileUpdateEmitEvents(pr, original, nil, err) } - if pipelineSpec.Results != nil && len(pipelineSpec.Results) != 0 { - providedResources, err := resources.GetResourcesFromBindings( - pr, c.resourceLister.PipelineResources(pr.Namespace).Get) - if err != nil { - // This Run has failed, so we need to mark it as failed and stop reconciling it - pr.Status.MarkFailed(ReasonCouldntGetResource, - "PipelineRun %s/%s can't be Run; it tries to bind Resources that don't exist: %s", - pipelineMeta.Namespace, pr.Name, err) - return - } - pipelineState, err := resources.ResolvePipelineRun(ctx, - *pr, - func(name string) (v1beta1.TaskInterface, error) { - return c.taskLister.Tasks(pr.Namespace).Get(name) - }, - func(name string) (*v1beta1.TaskRun, error) { - return c.taskRunLister.TaskRuns(pr.Namespace).Get(name) - }, - func(name string) (v1beta1.TaskInterface, error) { - return c.clusterTaskLister.Get(name) - }, - func(name string) (*v1alpha1.Condition, error) { - return c.conditionLister.Conditions(pr.Namespace).Get(name) - }, - pipelineSpec.Tasks, providedResources, - ) - if err != nil { - // This Run has failed, so we need to mark it as failed and stop reconciling it - switch err := err.(type) { - case *resources.TaskNotFoundError: - pr.Status.MarkFailed(ReasonCouldntGetTask, - "Pipeline %s/%s can't be Run; it contains Tasks that don't exist: %s", - pipelineMeta.Namespace, pipelineMeta.Name, err) - case *resources.ConditionNotFoundError: - pr.Status.MarkFailed(ReasonCouldntGetCondition, - "PipelineRun %s/%s can't be Run; it contains Conditions that don't exist: %s", - pipelineMeta.Namespace, pr.Name, err) - default: - pr.Status.MarkFailed(ReasonFailedValidation, - "PipelineRun %s/%s can't be Run; couldn't resolve all references: %s", - pipelineMeta.Namespace, pr.Name, err) - } - } - resolvedResultRefs := resources.ResolvePipelineResultRefs(pipelineState, pipelineSpec.Results) - pr.Status.PipelineResults = getPipelineRunResults(pipelineSpec, resolvedResultRefs) + + // Now that we updated the state, if the pr is marked is done and all taskruns are too, + // we can now release the timeout handler and stop here + if pipelineState.IsDone() && pr.IsDone() { + c.timeoutHandler.Release(pr) + return c.finishReconcileUpdateEmitEvents(pr, original, before, err) } + + // Reconcile this copy of the pipelinerun and then write back any status or label + // updates regardless of whether the reconciliation errored out. + if err = c.reconcile(ctx, pr, pipelineState, dag, pipelineSpec); err != nil { + c.Logger.Warnf("Reconcile error: %v", err.Error()) + } + + // After reconcile, sync back any update we may have + return c.finishReconcileUpdateEmitEvents(pr, original, before, err) } -func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) error { + +// prepare fetches resources the pipelinerun depends on, runs validation and convertion +// +// prepare may report errors back to Reconcile, it updates the pipelinerun status in case of +// error but it does not sync updates back to etcd. It does not emit events. +// +// All errors returned by prepare are always handled by `Reconcile`, so they don't cause +// the key to be re-queued directly. Once we start using `PermanentErrors` code in +// `prepare` will be able to control which error is handled by `Reconcile` and which is not +// See https://github.com/tektoncd/pipeline/issues/2474 for details. +// `prepare` returns spec and resources. In future we might store +// them in the PipelineRun.Status so we don't need to re-run `prepare` at every +// reconcile (see https://github.com/tektoncd/pipeline/issues/2473). +func (c *Reconciler) prepare(ctx context.Context, pr *v1beta1.PipelineRun) (resources.PipelineRunState, *dag.Graph, *v1beta1.PipelineSpec, error) { // We may be reading a version of the object that was stored at an older version // and may not have had all of the assumed default specified. pr.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx)) // TODO: Use factory func instead of hard-coding this once OCI images are supported. + // Reference issue: https://github.com/tektoncd/pipeline/issues/1839 resolver := &resources.LocalPipelineRefResolver{ Namespace: pr.Namespace, Tektonclient: c.PipelineClientSet, @@ -345,13 +291,12 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err if err != nil { if ce, ok := err.(*v1beta1.CannotConvertError); ok { pr.Status.MarkResourceNotConvertible(ce) - return nil + return nil, nil, nil, err } - c.Logger.Errorf("Failed to determine Pipeline spec to use for pipelinerun %s: %v", pr.Name, err) pr.Status.MarkFailed(ReasonCouldntGetPipeline, "Error retrieving pipeline for pipelinerun %s/%s: %s", pr.Namespace, pr.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } // Store the fetched PipelineSpec on the PipelineRun for auditing @@ -382,7 +327,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidGraph, "PipelineRun %s/%s's Pipeline DAG is invalid: %s", pr.Namespace, pr.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } if err := pipelineSpec.Validate(ctx); err != nil { @@ -390,7 +335,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonFailedValidation, "Pipeline %s/%s can't be Run; it has an invalid spec: %s", pipelineMeta.Namespace, pipelineMeta.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } if err := resources.ValidateResourceBindings(pipelineSpec, pr); err != nil { @@ -398,15 +343,16 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidBindings, "PipelineRun %s/%s doesn't bind Pipeline %s/%s's PipelineResources correctly: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } + providedResources, err := resources.GetResourcesFromBindings(pr, c.resourceLister.PipelineResources(pr.Namespace).Get) if err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it pr.Status.MarkFailed(ReasonCouldntGetResource, "PipelineRun %s/%s can't be Run; it tries to bind Resources that don't exist: %s", pipelineMeta.Namespace, pr.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } // Ensure that the parameters from the PipelineRun are overriding Pipeline parameters with the same type. @@ -417,7 +363,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonParameterTypeMismatch, "PipelineRun %s/%s parameters have mismatching types with Pipeline %s/%s's parameters: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } // Ensure that the workspaces expected by the Pipeline are provided by the PipelineRun. @@ -425,7 +371,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidWorkspaceBinding, "PipelineRun %s/%s doesn't bind Pipeline %s/%s's Workspaces correctly: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } // Ensure that the ServiceAccountNames defined correct. @@ -433,12 +379,11 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidServiceAccountMapping, "PipelineRun %s/%s doesn't define ServiceAccountNames correctly: %s", pr.Namespace, pr.Name, err) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } // Apply parameter substitution from the PipelineRun pipelineSpec = resources.ApplyParameters(pipelineSpec, pr) - pipelineState, err := resources.ResolvePipelineRun(ctx, *pr, func(name string) (v1beta1.TaskInterface, error) { @@ -458,6 +403,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err if err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it + var wrapErr error switch err := err.(type) { case *resources.TaskNotFoundError: pr.Status.MarkFailed(ReasonCouldntGetTask, @@ -472,28 +418,161 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err "PipelineRun %s/%s can't be Run; couldn't resolve all references: %s", pipelineMeta.Namespace, pr.Name, err) } - return nil - } - - if pipelineState.IsDone() && pr.IsDone() { - c.timeoutHandler.Release(pr) - c.Recorder.Event(pr, corev1.EventTypeNormal, eventReasonSucceeded, "PipelineRun completed successfully.") - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) } for _, rprt := range pipelineState { err := taskrun.ValidateResolvedTaskResources(rprt.PipelineTask.Params, rprt.ResolvedTaskResources) if err != nil { - c.Logger.Errorf("Failed to validate pipelinerun %q with error %v", pr.Name, err) pr.Status.MarkFailed(ReasonFailedValidation, err.Error()) - return nil + return nil, nil, nil, controller.NewPermanentError(wrapErr) + } + } + + return pipelineState, d, pipelineSpec, nil +} + +// Push changes (if any) to the TaskRun status, labels and annotations to +// TaskRun definition in ectd +func (c *Reconciler) updateStatusLabelsAndAnnotations(pr, original *v1beta1.PipelineRun) error { + + var updated bool + if !equality.Semantic.DeepEqual(original.Status, pr.Status) { + if _, err := c.updateStatus(pr); err != nil { + c.Logger.Warn("Failed to update PipelineRun status", zap.Error(err)) + return fmt.Errorf("failed to update PipelineRun status: %s", err.Error()) } + updated = true + } + + // When we update the status only, we use updateStatus to minimize the chances of + // racing any clients updating other parts of the Run, e.g. the spec or the labels. + // If we need to update the labels or annotations, we need to call Update with these + // changes explicitly. + if !reflect.DeepEqual(original.ObjectMeta.Labels, pr.ObjectMeta.Labels) || !reflect.DeepEqual(original.ObjectMeta.Annotations, pr.ObjectMeta.Annotations) { + if _, err := c.updateLabelsAndAnnotations(pr); err != nil { + c.Logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err)) + return fmt.Errorf("failed to update PipelineRun labels/annotations: %s", err.Error()) + } + updated = true + } + + if updated { + go func(metrics *Recorder) { + err := metrics.RunningPipelineRuns(c.pipelineRunLister) + if err != nil { + c.Logger.Warnf("Failed to log the metrics : %v", err) + } + }(c.metrics) } + return nil +} + +func (c *Reconciler) finishReconcileUpdateEmitEvents(pr, original *v1beta1.PipelineRun, beforeCondition *apis.Condition, previousError error) error { + afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded) + events.Emit(c.Recorder, beforeCondition, afterCondition, pr) + err := c.updateStatusLabelsAndAnnotations(pr, original) + events.EmitError(c.Recorder, err, pr) + // If there was an error on update, we want to re-queue to re-try and update the status + // otherwise we return the original error, so we preserve the type (permanent or not) + if err != nil { + return multierror.Append(previousError, err).ErrorOrNil() + } + return previousError +} + +func (c *Reconciler) updatePipelineResults(ctx context.Context, pr *v1beta1.PipelineRun) { + + // TODO: Use factory func instead of hard-coding this once OCI images are supported. + resolver := &resources.LocalPipelineRefResolver{ + Namespace: pr.Namespace, + Tektonclient: c.PipelineClientSet, + } + pipelineMeta, pipelineSpec, err := resources.GetPipelineData(ctx, pr, resolver.GetPipeline) + if err != nil { + if ce, ok := err.(*v1beta1.CannotConvertError); ok { + pr.Status.MarkResourceNotConvertible(ce) + return + } + c.Logger.Errorf("Failed to determine Pipeline spec to use for pipelinerun %s: %v", pr.Name, err) + pr.Status.MarkFailed(ReasonCouldntGetPipeline, + "Error retrieving pipeline for pipelinerun %s/%s: %s", + pr.Namespace, pr.Name, err) + return + } + if pipelineSpec.Results != nil && len(pipelineSpec.Results) != 0 { + providedResources, err := resources.GetResourcesFromBindings( + pr, c.resourceLister.PipelineResources(pr.Namespace).Get) + if err != nil { + // This Run has failed, so we need to mark it as failed and stop reconciling it + pr.Status.MarkFailed(ReasonCouldntGetResource, + "PipelineRun %s/%s can't be Run; it tries to bind Resources that don't exist: %s", + pipelineMeta.Namespace, pr.Name, err) + return + } + pipelineState, err := resources.ResolvePipelineRun(ctx, + *pr, + func(name string) (v1beta1.TaskInterface, error) { + return c.taskLister.Tasks(pr.Namespace).Get(name) + }, + func(name string) (*v1beta1.TaskRun, error) { + return c.taskRunLister.TaskRuns(pr.Namespace).Get(name) + }, + func(name string) (v1beta1.TaskInterface, error) { + return c.clusterTaskLister.Get(name) + }, + func(name string) (*v1alpha1.Condition, error) { + return c.conditionLister.Conditions(pr.Namespace).Get(name) + }, + pipelineSpec.Tasks, providedResources, + ) + if err != nil { + // This Run has failed, so we need to mark it as failed and stop reconciling it + switch err := err.(type) { + case *resources.TaskNotFoundError: + pr.Status.MarkFailed(ReasonCouldntGetTask, + "Pipeline %s/%s can't be Run; it contains Tasks that don't exist: %s", + pipelineMeta.Namespace, pipelineMeta.Name, err) + case *resources.ConditionNotFoundError: + pr.Status.MarkFailed(ReasonCouldntGetCondition, + "PipelineRun %s/%s can't be Run; it contains Conditions that don't exist: %s", + pipelineMeta.Namespace, pr.Name, err) + default: + pr.Status.MarkFailed(ReasonFailedValidation, + "PipelineRun %s/%s can't be Run; couldn't resolve all references: %s", + pipelineMeta.Namespace, pr.Name, err) + } + } + resolvedResultRefs := resources.ResolvePipelineResultRefs(pipelineState, pipelineSpec.Results) + pr.Status.PipelineResults = getPipelineRunResults(pipelineSpec, resolvedResultRefs) + } +} + +// reconcile is executed once the PipelineRun and its depedencies are valid and compatible. +// Results, since they are dynamic, may still cause validation issues. +// +// reconcile takes the following inputs: +// - ctx: the context +// - pr: the resource we are reconciling +// - pipelineState: the up-do-date statue of all TaskRuns and Conditions executed or running in the PipelineRun +// - prDag: the DAG calculated from the pipeline spec +// - pipelineSpec: the pipeline spec +// +// The structure of reconcile is: +// - Create any PVC that may be needed - when templates are used (only before the first task) +// - Find next Tasks from the DAG +// - Resolve results that may be input of Tasks to be started +// - Initialize (only when needed) the artifact storage +// - Loop through the tasks obtained from the DAG and run each of them +// - Update the overall PipelineRun status from pipelineState +// - Update the TaskRun status in PipelineRun status from the TaskRuns +func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, pipelineState resources.PipelineRunState, prDag *dag.Graph, pipelineSpec *v1beta1.PipelineSpec) error { + if pipelineState.IsBeforeFirstTaskRun() { if pr.HasVolumeClaimTemplate() { // create workspace PVC from template - if err = c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(pr.Spec.Workspaces, pr.GetOwnerReference(), pr.Namespace); err != nil { + if err := c.pvcHandler.CreatePersistentVolumeClaimsForWorkspaces(pr.Spec.Workspaces, pr.GetOwnerReference(), pr.Namespace); err != nil { c.Logger.Errorf("Failed to create PVC for PipelineRun %s: %v", pr.Name, err) pr.Status.MarkFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC, "Failed to create PVC for PipelineRun %s/%s Workspaces correctly: %s", @@ -504,7 +583,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err if !c.isAffinityAssistantDisabled() { // create Affinity Assistant (StatefulSet) so that taskRun pods that share workspace PVC achieve Node Affinity - if err = c.createAffinityAssistants(pr.Spec.Workspaces, pr, pr.Namespace); err != nil { + if err := c.createAffinityAssistants(pr.Spec.Workspaces, pr, pr.Namespace); err != nil { c.Logger.Errorf("Failed to create affinity assistant StatefulSet for PipelineRun %s: %v", pr.Name, err) pr.Status.MarkFailed(ReasonCouldntCreateAffinityAssistantStatefulSet, "Failed to create StatefulSet for PipelineRun %s/%s correctly: %s", @@ -514,7 +593,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } } - candidateTasks, err := dag.GetSchedulable(d, pipelineState.SuccessfulPipelineTaskNames()...) + candidateTasks, err := dag.GetSchedulable(prDag, pipelineState.SuccessfulPipelineTaskNames()...) if err != nil { c.Logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err) } @@ -522,9 +601,10 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err nextRprts := pipelineState.GetNextTasks(candidateTasks) resolvedResultRefs, err := resources.ResolveResultRefs(pipelineState, nextRprts) if err != nil { - c.Logger.Infof("Failed to resolve all task params for %q with error %v", pr.Name, err) + // We can still encounter validation errors during reconcile, because results are + // dynamic and we have cannot check statically until we decide which tasks to run next pr.Status.MarkFailed(ReasonFailedValidation, err.Error()) - return nil + return controller.NewPermanentError(wrapErr) } resources.ApplyTaskResults(nextRprts, resolvedResultRefs) @@ -535,6 +615,8 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err return err } + // Loop through the Tasks to be started, and attempt to run them or run their conditions. + // Stop at the first failure. for _, rprt := range nextRprts { if rprt == nil { continue @@ -555,8 +637,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } } } - before := pr.Status.GetCondition(apis.ConditionSucceeded) - after := resources.GetPipelineConditionStatus(pr, pipelineState, c.Logger, d) + after := resources.GetPipelineConditionStatus(pr, pipelineState, c.Logger, prDag) switch after.Status { case corev1.ConditionTrue: pr.Status.MarkSucceeded(after.Reason, after.Message) @@ -565,8 +646,6 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err case corev1.ConditionUnknown: pr.Status.MarkRunning(after.Reason, after.Message) } - events.Emit(c.Recorder, before, after, pr) - pr.Status.TaskRuns = getTaskRunsStatus(pr, pipelineState) c.Logger.Infof("PipelineRun %s status is being set to %s", pr.Name, after) return nil @@ -834,7 +913,6 @@ func (c *Reconciler) updateStatus(pr *v1beta1.PipelineRun) (*v1beta1.PipelineRun return nil, fmt.Errorf("error getting PipelineRun %s when updating status: %w", pr.Name, err) } if !reflect.DeepEqual(pr.Status, newPr.Status) { - newPr = newPr.DeepCopy() // Don't modify the informer's copy newPr.Status = pr.Status return c.PipelineClientSet.TektonV1beta1().PipelineRuns(pr.Namespace).UpdateStatus(newPr) } @@ -847,17 +925,9 @@ func (c *Reconciler) updateLabelsAndAnnotations(pr *v1beta1.PipelineRun) (*v1bet return nil, fmt.Errorf("error getting PipelineRun %s when updating labels/annotations: %w", pr.Name, err) } if !reflect.DeepEqual(pr.ObjectMeta.Labels, newPr.ObjectMeta.Labels) || !reflect.DeepEqual(pr.ObjectMeta.Annotations, newPr.ObjectMeta.Annotations) { - mergePatch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "labels": pr.ObjectMeta.Labels, - "annotations": pr.ObjectMeta.Annotations, - }, - } - patch, err := json.Marshal(mergePatch) - if err != nil { - return nil, err - } - return c.PipelineClientSet.TektonV1beta1().PipelineRuns(pr.Namespace).Patch(pr.Name, types.MergePatchType, patch) + newPr.ObjectMeta.Labels = pr.ObjectMeta.Labels + newPr.ObjectMeta.Annotations = pr.ObjectMeta.Annotations + return c.PipelineClientSet.TektonV1beta1().PipelineRuns(pr.Namespace).Update(newPr) } return newPr, nil } diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index b80aefe1d47..d1981b8af66 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -47,9 +47,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" k8stesting "k8s.io/client-go/testing" ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" ) var ( @@ -94,7 +96,33 @@ func conditionCheckFromTaskRun(tr *v1beta1.TaskRun) *v1beta1.ConditionCheck { return &cc } +func checkEvents(fr *record.FakeRecorder, testName string, wantEvents []string) error { + timer := time.NewTimer(1 * time.Second) + foundEvents := []string{} + for ii := 0; ii < len(wantEvents)+1; ii++ { + select { + case event := <-fr.Events: + foundEvents = append(foundEvents, event) + if ii > len(wantEvents)-1 { + return fmt.Errorf("Received event \"%s\" for %s but not more expected", event, testName) + } + wantEvent := wantEvents[ii] + if !(strings.HasPrefix(event, wantEvent)) { + return fmt.Errorf("Expected event \"%s\" but got \"%s\" instead for %s", wantEvent, event, testName) + } + case <-timer.C: + if len(foundEvents) == len(wantEvents) { + return nil + } + return fmt.Errorf("Received %d events for %s but %d expected. Found events: %#v", len(foundEvents), testName, len(wantEvents), foundEvents) + } + } + return nil +} + func TestReconcile(t *testing.T) { + // TestReconcile runs "Reconcile" on a PipelineRun with one Task that has not been started yet. + // It verifies that the TaskRun is created, it checks the resulting API actions, status and events. names.TestingSeed() prs := []*v1beta1.PipelineRun{ @@ -208,8 +236,10 @@ func TestReconcile(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - if err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-success"); err != nil { + if err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-success"); err != nil { t.Fatalf("Error reconciling: %s", err) } @@ -293,9 +323,20 @@ func TestReconcile(t *testing.T) { // A PVC should have been created to deal with output -> input linking ensurePVCCreated(t, clients, expectedTaskRun.GetPipelineRunPVCName(), "foo") + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 0", + } + err = checkEvents(fr, "test-pipeline-run-success", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { + // TestReconcile_PipelineSpecTaskSpec runs "Reconcile" on a PipelineRun that has an embedded PipelineSpec that has an embedded TaskSpec. + // It verifies that a TaskRun is created, it checks the resulting API actions, status and events. names.TestingSeed() prs := []*v1beta1.PipelineRun{ @@ -329,8 +370,10 @@ func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - if err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-success"); err != nil { + if err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-success"); err != nil { t.Fatalf("Error reconciling: %s", err) } @@ -377,9 +420,20 @@ func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-task-spec-9l9zj"]; !exists { t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) } + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 0", + } + err = checkEvents(fr, "test-pipeline-run-success", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcile_InvalidPipelineRuns(t *testing.T) { + // TestReconcile_InvalidPipelineRuns runs "Reconcile" on several PipelineRuns that are invalid in different ways. + // It verifies that reconcile fails, how it fails and which events are triggered. ts := []*v1beta1.Task{ tb.Task("a-task-that-exists", tb.TaskNamespace("foo")), tb.Task("a-task-that-needs-params", tb.TaskSpec( @@ -443,52 +497,109 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { pipelineRun *v1beta1.PipelineRun reason string hasNoDefaultLabels bool + permanentError bool + wantEvents []string }{ { name: "invalid-pipeline-shd-be-stop-reconciling", pipelineRun: prs[0], reason: ReasonCouldntGetPipeline, hasNoDefaultLabels: true, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed Failed to determine Pipeline spec to use", + }, }, { - name: "invalid-pipeline-run-missing-tasks-shd-stop-reconciling", - pipelineRun: prs[1], - reason: ReasonCouldntGetTask, + name: "invalid-pipeline-run-missing-tasks-shd-stop-reconciling", + pipelineRun: prs[1], + reason: ReasonCouldntGetTask, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed Pipeline foo/pipeline-missing-tasks can't be Run", + }, }, { - name: "invalid-pipeline-run-params-dont-exist-shd-stop-reconciling", - pipelineRun: prs[2], - reason: ReasonFailedValidation, + name: "invalid-pipeline-run-params-dont-exist-shd-stop-reconciling", + pipelineRun: prs[2], + reason: ReasonFailedValidation, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed Failed to validate pipelinerun \"pipeline-params-dont-exist\"", + }, }, { - name: "invalid-pipeline-run-resources-not-bound-shd-stop-reconciling", - pipelineRun: prs[3], - reason: ReasonInvalidBindings, + name: "invalid-pipeline-run-resources-not-bound-shd-stop-reconciling", + pipelineRun: prs[3], + reason: ReasonInvalidBindings, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/pipeline-resources-not-bound doesn't bind Pipeline", + }, }, { - name: "invalid-pipeline-run-missing-resource-shd-stop-reconciling", - pipelineRun: prs[4], - reason: ReasonCouldntGetResource, + name: "invalid-pipeline-run-missing-resource-shd-stop-reconciling", + pipelineRun: prs[4], + reason: ReasonCouldntGetResource, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/pipeline-resources-dont-exist can't be Run; it tries to bind Resources", + }, }, { - name: "invalid-pipeline-missing-declared-resource-shd-stop-reconciling", - pipelineRun: prs[5], - reason: ReasonFailedValidation, + name: "invalid-pipeline-missing-declared-resource-shd-stop-reconciling", + pipelineRun: prs[5], + reason: ReasonFailedValidation, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed Pipeline foo/a-pipeline-that-should-be-caught-by-admission-control can't be Run; it has an invalid spec", + }, }, { - name: "invalid-pipeline-mismatching-parameter-types", - pipelineRun: prs[6], - reason: ReasonParameterTypeMismatch, + name: "invalid-pipeline-mismatching-parameter-types", + pipelineRun: prs[6], + reason: ReasonParameterTypeMismatch, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/pipeline-mismatching-param-type parameters have mismatching types", + }, }, { - name: "invalid-pipeline-missing-conditions-shd-stop-reconciling", - pipelineRun: prs[7], - reason: ReasonCouldntGetCondition, + name: "invalid-pipeline-missing-conditions-shd-stop-reconciling", + pipelineRun: prs[7], + reason: ReasonCouldntGetCondition, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/pipeline-conditions-missing can't be Run; it contains Conditions", + }, }, { - name: "invalid-embedded-pipeline-resources-bot-bound-shd-stop-reconciling", - pipelineRun: prs[8], - reason: ReasonInvalidBindings, + name: "invalid-embedded-pipeline-resources-bot-bound-shd-stop-reconciling", + pipelineRun: prs[8], + reason: ReasonInvalidBindings, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/embedded-pipeline-resources-not-bound doesn't bind Pipeline", + }, }, { - name: "invalid-embedded-pipeline-bad-name-shd-stop-reconciling", - pipelineRun: prs[9], - reason: ReasonFailedValidation, + name: "invalid-embedded-pipeline-bad-name-shd-stop-reconciling", + pipelineRun: prs[9], + reason: ReasonFailedValidation, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed Pipeline foo/embedded-pipeline-invalid can't be Run; it has an invalid spec: invalid value \"bad-t@$k\"", + }, }, { - name: "invalid-embedded-pipeline-mismatching-parameter-types", - pipelineRun: prs[10], - reason: ReasonParameterTypeMismatch, + name: "invalid-embedded-pipeline-mismatching-parameter-types", + pipelineRun: prs[10], + reason: ReasonParameterTypeMismatch, + permanentError: true, + wantEvents: []string{ + "Normal Started", + "Warning Failed PipelineRun foo/embedded-pipeline-mismatching-param-type parameters have mismatching types", + }, }, } @@ -497,9 +608,16 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { testAssets, cancel := getPipelineRunController(t, d) defer cancel() c := testAssets.Controller + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) + + reconcileError := reconciler.Reconcile(context.Background(), getRunName(tc.pipelineRun)) + if reconcileError == nil { + t.Fatalf("Expected an error to be returned by Reconcile, got nil instead") + } - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.pipelineRun)); err != nil { - t.Fatalf("Error reconciling: %s", err) + if controller.IsPermanentError(reconcileError) != tc.permanentError { + t.Fatalf("Expected the error to be permanent: %v but got permanent: %v", tc.permanentError, controller.IsPermanentError(reconcileError)) } // When a PipelineRun is invalid and can't run, we don't want to return an error because // an error will tell the Reconciler to keep trying to reconcile; instead we want to stop @@ -542,25 +660,39 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { } } } + + // Check generated events match what's expected + err := checkEvents(fr, tc.pipelineRun.Name, tc.wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } }) } } func TestReconcile_InvalidPipelineRunNames(t *testing.T) { + // TestReconcile_InvalidPipelineRunNames runs "Reconcile" on several PipelineRuns that have invalid names. + // It verifies that reconcile fails, how it fails and which events are triggered. invalidNames := []string{ "foo/test-pipeline-run-doesnot-exist", "test/invalidformat/t", } tcs := []struct { - name string - pipelineRun string + name string + pipelineRun string + permanentError bool + wantEvents []string }{ { - name: "invalid-pipeline-run-shd-stop-reconciling", - pipelineRun: invalidNames[0], + name: "invalid-pipeline-run-shd-stop-reconciling", + pipelineRun: invalidNames[0], + permanentError: true, + wantEvents: []string{}, }, { - name: "invalid-pipeline-run-name-shd-stop-reconciling", - pipelineRun: invalidNames[1], + name: "invalid-pipeline-run-name-shd-stop-reconciling", + pipelineRun: invalidNames[1], + permanentError: true, + wantEvents: []string{}, }, } @@ -569,17 +701,32 @@ func TestReconcile_InvalidPipelineRunNames(t *testing.T) { testAssets, cancel := getPipelineRunController(t, test.Data{}) defer cancel() c := testAssets.Controller + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), tc.pipelineRun) + reconcileError := reconciler.Reconcile(context.Background(), tc.pipelineRun) // No reason to keep reconciling something that doesnt or can't exist - if err != nil { - t.Errorf("Did not expect to see error when reconciling invalid PipelineRun but saw %q", err) + if reconcileError == nil { + t.Fatalf("Expected an error to be returned by Reconcile, got nil instead") + } + + if controller.IsPermanentError(reconcileError) != tc.permanentError { + t.Fatalf("Expected the error to be permanent: %v but got permanent: %v", tc.permanentError, controller.IsPermanentError(reconcileError)) + } + + // In case of "resource not found", we don't generate any event. + // The object associated to the event would have to be nil otherwise. + err := checkEvents(fr, tc.pipelineRun, tc.wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) } }) } } func TestUpdateTaskRunsState(t *testing.T) { + // TestUpdateTaskRunsState runs "getTaskRunsStatus" and verifies how it updates a PipelineRun status + // from a TaskRun associated to the PipelineRun pr := tb.PipelineRun("test-pipeline-run", tb.PipelineRunNamespace("foo"), tb.PipelineRunSpec("test-pipeline")) pipelineTask := v1beta1.PipelineTask{ Name: "unit-test-1", @@ -636,6 +783,8 @@ func TestUpdateTaskRunsState(t *testing.T) { } func TestUpdateTaskRunStateWithConditionChecks(t *testing.T) { + // TestUpdateTaskRunsState runs "getTaskRunsStatus" and verifies how it updates a PipelineRun status + // from several different TaskRun with Conditions associated to the PipelineRun taskrunName := "task-run" successConditionCheckName := "success-condition" failingConditionCheckName := "fail-condition" @@ -777,6 +926,10 @@ func TestUpdateTaskRunStateWithConditionChecks(t *testing.T) { } func TestReconcileOnCompletedPipelineRun(t *testing.T) { + // TestReconcileOnCompletedPipelineRun runs "Reconcile" on a PipelineRun that already reached completion + // and that does not have the latest status from TaskRuns yet. It checks that the TaskRun status is updated + // in the PipelineRun status, that the completion status is not altered, that not error is returned and + // a succesful event is triggered taskRunName := "test-pipeline-run-completed-hello-world" prs := []*v1beta1.PipelineRun{tb.PipelineRun("test-pipeline-run-completed", tb.PipelineRunNamespace("foo"), @@ -821,30 +974,42 @@ func TestReconcileOnCompletedPipelineRun(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - if err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-completed"); err != nil { + if err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-completed"); err != nil { t.Fatalf("Error reconciling: %s", err) } - if len(clients.Pipeline.Actions()) != 2 { + actions := clients.Pipeline.Actions() + if len(actions) != 2 { + t.Errorf("# Actions: %d, Actions: %#v", len(actions), actions) t.Fatalf("Expected client to have updated the TaskRun status for a completed PipelineRun, but it did not") } - _, ok := clients.Pipeline.Actions()[1].(ktesting.UpdateAction).GetObject().(*v1beta1.PipelineRun) - if !ok { - t.Errorf("Expected a PipelineRun to be updated, but it wasn't.") - } - t.Log(clients.Pipeline.Actions()) - actions := clients.Pipeline.Actions() + pipelineUpdates := 0 for _, action := range actions { if action != nil { - resource := action.GetResource().Resource - if resource == "taskruns" { - t.Fatalf("Expected client to not have created a TaskRun for the completed PipelineRun, but it did") + switch { + case action.Matches("create", "taskruns"): + t.Errorf("Expected client to not have created a TaskRun, but it did") + case action.Matches("update", "pipelineruns"): + pipelineUpdates++ + case action.Matches("get", "pipeline"): + // We expect one pipelinerun update and one pipeline get. + // We need to check the pipeline spec in case there are results to be calculated + fallthrough + default: + continue } } } + if pipelineUpdates != 1 { + // If only the pipelinerun status changed, we expect one update + t.Fatalf("Expected client to have updated the pipelinerun twice, but it did %d times", pipelineUpdates) + } + // Check that the PipelineRun was reconciled correctly reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-run-completed", metav1.GetOptions{}) if err != nil { @@ -869,14 +1034,25 @@ func TestReconcileOnCompletedPipelineRun(t *testing.T) { if d := cmp.Diff(reconciledRun.Status.TaskRuns, expectedTaskRunsStatus); d != "" { t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch %s", diff.PrintWantGot(d)) } + + wantEvents := []string{ + "Normal Succeeded All Tasks have completed executing", + } + err = checkEvents(fr, "test-pipeline-run-completed", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcileOnCancelledPipelineRun(t *testing.T) { + // TestReconcileOnCancelledPipelineRun runs "Reconcile" on a PipelineRun that has been cancelled. + // It verifies that reconcile is successful, the pipeline status updated and events generated. prs := []*v1beta1.PipelineRun{tb.PipelineRun("test-pipeline-run-cancelled", tb.PipelineRunNamespace("foo"), tb.PipelineRunSpec("test-pipeline", tb.PipelineRunServiceAccountName("test-sa"), tb.PipelineRunCancelled, ), + tb.PipelineRunStatus(tb.PipelineRunStartTime(time.Now())), )} ps := []*v1beta1.Pipeline{tb.Pipeline("test-pipeline", tb.PipelineNamespace("foo"), tb.PipelineSpec( tb.PipelineTask("hello-world-1", "hello-world"), @@ -905,8 +1081,10 @@ func TestReconcileOnCancelledPipelineRun(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-cancelled") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-cancelled") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } @@ -925,9 +1103,19 @@ func TestReconcileOnCancelledPipelineRun(t *testing.T) { if !reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { t.Errorf("Expected PipelineRun status to be complete and false, but was %v", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) } + + wantEvents := []string{ + "Warning Failed PipelineRun \"test-pipeline-run-cancelled\" was cancelled", + } + err = checkEvents(fr, "test-pipeline-run-cancelled", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcileWithTimeout(t *testing.T) { + // TestReconcileWithTimeout runs "Reconcile" on a PipelineRun that has timed out. + // It verifies that reconcile is successful, the pipeline status updated and events generated. ps := []*v1beta1.Pipeline{tb.Pipeline("test-pipeline", tb.PipelineNamespace("foo"), tb.PipelineSpec( tb.PipelineTask("hello-world-1", "hello-world"), ))} @@ -952,8 +1140,10 @@ func TestReconcileWithTimeout(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-timeout") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-timeout") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } @@ -983,9 +1173,19 @@ func TestReconcileWithTimeout(t *testing.T) { if actual.Spec.Timeout.Duration > prs[0].Spec.Timeout.Duration { t.Errorf("TaskRun timeout %s should be less than or equal to PipelineRun timeout %s", actual.Spec.Timeout.Duration.String(), prs[0].Spec.Timeout.Duration.String()) } + + wantEvents := []string{ + "Warning Failed PipelineRun \"test-pipeline-run-with-timeout\" failed to finish within \"12h0m0s\"", + } + err = checkEvents(fr, "test-pipeline-run-with-timeout", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcileWithoutPVC(t *testing.T) { + // TestReconcileWithoutPVC runs "Reconcile" on a PipelineRun that has two unrelated tasks. + // It verifies that reconcile is successful and that no PVC is created ps := []*v1beta1.Pipeline{tb.Pipeline("test-pipeline", tb.PipelineNamespace("foo"), tb.PipelineSpec( tb.PipelineTask("hello-world-1", "hello-world"), tb.PipelineTask("hello-world-2", "hello-world"), @@ -1034,9 +1234,12 @@ func TestReconcileWithoutPVC(t *testing.T) { } func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { + // TestReconcileCancelledFailsTaskRunCancellation runs "Reconcile" on a PipelineRun with a single TaskRun. + // The TaskRun cannot be cancelled. Check that the pipelinerun cancel fails, that reconcile fails and + // an event is generated names.TestingSeed() ptName := "hello-world-1" - prName := "test-pipeline-run-with-timeout" + prName := "test-pipeline-fails-to-cancel" prs := []*v1beta1.PipelineRun{tb.PipelineRun(prName, tb.PipelineRunNamespace("foo"), tb.PipelineRunSpec("test-pipeline", tb.PipelineRunCancelled, @@ -1044,10 +1247,17 @@ func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { // The reconciler uses the presence of this TaskRun in the status to determine that a TaskRun // is already running. The TaskRun will not be retrieved at all so we do not need to seed one. tb.PipelineRunStatus( + tb.PipelineRunStatusCondition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: resources.ReasonRunning, + Message: "running...", + }), tb.PipelineRunTaskRunsStatus(prName+ptName, &v1alpha1.PipelineRunTaskRunStatus{ PipelineTaskName: ptName, Status: &v1alpha1.TaskRunStatus{}, }), + tb.PipelineRunStartTime(time.Now()), ), )} @@ -1059,19 +1269,21 @@ func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) // Make the patch call fail, i.e. make it so that the controller fails to cancel the TaskRun clients.Pipeline.PrependReactor("patch", "taskruns", func(action k8stesting.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("i'm sorry Dave, i'm afraid i can't do that") }) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-timeout") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-fails-to-cancel") if err == nil { t.Errorf("Expected to see error returned from reconcile after failing to cancel TaskRun but saw none!") } // Check that the PipelineRun is still running with correct error message - reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-run-with-timeout", metav1.GetOptions{}) + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-fails-to-cancel", metav1.GetOptions{}) if err != nil { t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) } @@ -1084,16 +1296,30 @@ func TestReconcileCancelledFailsTaskRunCancellation(t *testing.T) { if condition.Reason != ReasonCouldntCancel { t.Errorf("Expected PipelineRun condition to indicate the cancellation failed but reason was %s", condition.Reason) } + // The event here is "Normal" because in case we fail to cancel we leave the condition to unknown + // Further reconcile might converge then the status of the pipeline. + // See https://github.com/tektoncd/pipeline/issues/2647 for further details. + wantEvents := []string{ + "Normal PipelineRunCouldntCancel PipelineRun \"test-pipeline-fails-to-cancel\" was cancelled but had errors trying to cancel TaskRuns", + } + err = checkEvents(fr, prName, wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcileCancelledPipelineRun(t *testing.T) { + // TestReconcileCancelledPipelineRun runs "Reconcile" on a PipelineRun that has been cancelled. + // The PipelineRun had no TaskRun associated yet, and no TaskRun should have been created. + // It verifies that reconcile is successful, the pipeline status updated and events generated. ps := []*v1beta1.Pipeline{tb.Pipeline("test-pipeline", tb.PipelineNamespace("foo"), tb.PipelineSpec( tb.PipelineTask("hello-world-1", "hello-world", tb.Retries(1)), ))} - prs := []*v1beta1.PipelineRun{tb.PipelineRun("test-pipeline-run-with-timeout", tb.PipelineRunNamespace("foo"), + prs := []*v1beta1.PipelineRun{tb.PipelineRun("test-pipeline-run-cancelled", tb.PipelineRunNamespace("foo"), tb.PipelineRunSpec("test-pipeline", tb.PipelineRunCancelled, ), + tb.PipelineRunStatus(tb.PipelineRunStartTime(time.Now())), )} ts := []*v1beta1.Task{tb.Task("hello-world", tb.TaskNamespace("foo"))} @@ -1107,14 +1333,16 @@ func TestReconcileCancelledPipelineRun(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-timeout") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-cancelled") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } // Check that the PipelineRun was reconciled correctly - reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-run-with-timeout", metav1.GetOptions{}) + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get("test-pipeline-run-cancelled", metav1.GetOptions{}) if err != nil { t.Fatalf("Somehow had error getting completed reconciled run out of fake client: %s", err) } @@ -1132,6 +1360,14 @@ func TestReconcileCancelledPipelineRun(t *testing.T) { t.Errorf("Expected a TaskRun to be get/updated, but it was %s", actionType) } } + + wantEvents := []string{ + "Warning Failed PipelineRun \"test-pipeline-run-cancelled\" was cancelled", + } + err = checkEvents(fr, "test-pipeline-run-cancelled", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcilePropagateLabels(t *testing.T) { @@ -1285,22 +1521,30 @@ func TestReconcileWithDifferentServiceAccounts(t *testing.T) { } func TestReconcileWithTimeoutAndRetry(t *testing.T) { - + // TestReconcileWithTimeoutAndRetry runs "Reconcile" against pipelines with retries and timeout settings, + // and status that represents different number of retries already performed. + // It verifies the reconciled status and events generated tcs := []struct { name string retries int conditionSucceeded corev1.ConditionStatus + wantEvents []string }{ { name: "One try has to be done", retries: 1, conditionSucceeded: corev1.ConditionFalse, + wantEvents: []string{ + "Warning Failed PipelineRun \"test-pipeline-retry-run-with-timeout\" failed to finish within", + }, }, { name: "No more retries are needed", retries: 2, conditionSucceeded: corev1.ConditionUnknown, - }, + wantEvents: []string{ + "Warning Failed PipelineRun \"test-pipeline-retry-run-with-timeout\" failed to finish within", + }}, } for _, tc := range tcs { @@ -1359,8 +1603,10 @@ func TestReconcileWithTimeoutAndRetry(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-retry-run-with-timeout") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-retry-run-with-timeout") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } @@ -1379,6 +1625,10 @@ func TestReconcileWithTimeoutAndRetry(t *testing.T) { t.Fatalf("Succeeded expected to be %s but is %s", tc.conditionSucceeded, status) } + err = checkEvents(fr, prs[0].Name, tc.wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } }) } } @@ -1616,6 +1866,9 @@ func TestReconcileAndPropagateCustomPipelineTaskRunSpec(t *testing.T) { } func TestReconcileWithConditionChecks(t *testing.T) { + // TestReconcileWithConditionChecks runs "Reconcile" on a PipelineRun that has a task with + // multiple conditions. It verifies that reconcile is successful, taskruns are created and + // the status is updated. It checks that the correct events are sent. names.TestingSeed() prName := "test-pipeline-run" conditions := []*v1alpha1.Condition{ @@ -1668,8 +1921,10 @@ func TestReconcileWithConditionChecks(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/"+prName) + err := reconciler.Reconcile(context.Background(), "foo/"+prName) if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } @@ -1700,9 +1955,21 @@ func TestReconcileWithConditionChecks(t *testing.T) { if d := cmp.Diff(actual, expectedConditionChecks); d != "" { t.Errorf("expected to see 2 ConditionCheck TaskRuns created. Diff %s", diff.PrintWantGot(d)) } + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 0, Incomplete: 1, Skipped: 0", + } + err = checkEvents(fr, "test-pipeline-run-completed", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func TestReconcileWithFailingConditionChecks(t *testing.T) { + // TestReconcileWithFailingConditionChecks runs "Reconcile" on a PipelineRun that has a task with + // multiple conditions, some that fails. It verifies that reconcile is successful, taskruns are + // created and the status is updated. It checks that the correct events are sent. names.TestingSeed() conditions := []*v1alpha1.Condition{ tbv1alpha1.Condition("always-false", tbv1alpha1.ConditionNamespace("foo"), tbv1alpha1.ConditionSpec( @@ -1784,8 +2051,10 @@ func TestReconcileWithFailingConditionChecks(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) - err := c.Reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-conditions") + err := reconciler.Reconcile(context.Background(), "foo/test-pipeline-run-with-conditions") if err != nil { t.Errorf("Did not expect to see error when reconciling completed PipelineRun but saw %s", err) } @@ -1820,6 +2089,15 @@ func TestReconcileWithFailingConditionChecks(t *testing.T) { if d := cmp.Diff(actual, expectedTaskRun); d != "" { t.Errorf("expected to see ConditionCheck TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d)) } + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 1, Incomplete: 1, Skipped: 1", + } + err = checkEvents(fr, "test-pipeline-run-completed", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } } func makeExpectedTr(condName, ccName string, labels, annotations map[string]string) *v1beta1.TaskRun { @@ -3020,3 +3298,155 @@ func TestUpdatePipelineRunStatusFromTaskRuns(t *testing.T) { }) } } + +func TestReconcileOnCompletedTaskRuns(t *testing.T) { + // This tests aims to verify the behaviour of the controller when a pipelinerun is transitioned + // to a successful completion state. Before the beginning of the reconcile all the TaskRuns reached + // a successful completion state. The list of candidate TaskRuns will be empty. + // The PipelineRun is expected to sync the status of the TaskRuns to its own status, update its own + // status, set a completion time and emit events. + taskRunName := "test-pipeline-run-to-complete-hello-world" + pipelineRunName := "test-pipeline-run-to-complete" + prs := []*v1beta1.PipelineRun{tb.PipelineRun(pipelineRunName, + tb.PipelineRunNamespace("foo"), + tb.PipelineRunSpec("test-pipeline", tb.PipelineRunServiceAccountName("test-sa")), + tb.PipelineRunStatus(tb.PipelineRunStatusCondition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: resources.ReasonRunning, + Message: "Not all Tasks have completed executing", + }), + tb.PipelineRunTaskRunsStatus(taskRunName, &v1beta1.PipelineRunTaskRunStatus{ + PipelineTaskName: "hello-world-1", + Status: &v1beta1.TaskRunStatus{}, + }), + tb.PipelineRunStartTime(time.Now()), + ), + )} + ps := []*v1beta1.Pipeline{tb.Pipeline("test-pipeline", tb.PipelineNamespace("foo"), tb.PipelineSpec( + tb.PipelineTask("hello-world-1", "hello-world")))} + ts := []*v1beta1.Task{tb.Task("hello-world", tb.TaskNamespace("foo"))} + trs := []*v1beta1.TaskRun{ + tb.TaskRun(taskRunName, + tb.TaskRunNamespace("foo"), + tb.TaskRunOwnerReference("kind", "name"), + tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineLabelKey, pipelineRunName), + tb.TaskRunLabel(pipeline.GroupName+pipeline.PipelineRunLabelKey, "test-pipeline"), + tb.TaskRunSpec(tb.TaskRunTaskRef("hello-world")), + tb.TaskRunStatus( + tb.StatusCondition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }), + ), + ), + } + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + TaskRuns: trs, + } + + testAssets, cancel := getPipelineRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + reconciler := c.Reconciler.(*Reconciler) + fr := reconciler.Recorder.(*record.FakeRecorder) + + if err := reconciler.Reconcile(context.Background(), "foo/"+pipelineRunName); err != nil { + t.Fatalf("Error reconciling: %s", err) + } + + // Expected 3 actions: + // - fetch the pipeline spec + // - update pipelinerun status + // - update pipelinerun labels + actions := clients.Pipeline.Actions() + if len(actions) != 3 { + t.Fatalf("Expected client to have performed three actions to reconcile the PipelineRun, but it performed %d", len(actions)) + } + + actual := clients.Pipeline.Actions()[1].(ktesting.UpdateAction).GetObject().(*v1beta1.PipelineRun) + if actual == nil { + t.Errorf("Expected a PipelineRun to be updated, but it wasn't.") + } + t.Log(actions) + for _, action := range actions { + if action != nil { + if action.Matches("create", "taskruns") { + t.Fatalf("Expected client to not have created a TaskRun for the completed PipelineRun, but it did") + } + } + } + + // Check that the PipelineRun was reconciled correctly + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Somehow had error getting completed reconciled run out of fake client: %s", err) + } + + // This PipelineRun should still be complete and the status should reflect that + if reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() { + t.Errorf("Expected PipelineRun status to be complete, but was %v", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) + } + + expectedTaskRunsStatus := make(map[string]*v1beta1.PipelineRunTaskRunStatus) + expectedTaskRunsStatus[taskRunName] = &v1beta1.PipelineRunTaskRunStatus{ + PipelineTaskName: "hello-world-1", + Status: &v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }}, + }, + }, + } + + expectedPipelineRunStatus := v1beta1.PipelineRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: resources.ReasonSucceeded, + Message: "Tasks Completed: 1, Skipped: 0", + }, + }, + }, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + StartTime: prs[0].Status.PipelineRunStatusFields.StartTime, + TaskRuns: expectedTaskRunsStatus, + PipelineSpec: &v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{Name: "hello-world-1", TaskRef: &v1beta1.TaskRef{Name: "hello-world"}}}, + }, + }, + } + + // Let's check timestamp first so we can set them to match for a full diff + if (reconciledRun.Status.Conditions[0].LastTransitionTime == apis.VolatileTime{}) { + t.Fatalf("Expected the PipelineRun LastTransitionTime to be set, got apis.VolatileTime{} instead") + } + reconciledRun.Status.Conditions[0].LastTransitionTime = apis.VolatileTime{} + if *reconciledRun.Status.PipelineRunStatusFields.StartTime != *expectedPipelineRunStatus.PipelineRunStatusFields.StartTime { + t.Fatalf("Expected the PipelineRun StartTime stay the same, it changed instead") + } + if reconciledRun.Status.PipelineRunStatusFields.CompletionTime == nil { + t.Fatalf("Expected the PipelineRun CompletionTime not to be nil") + } + reconciledRun.Status.PipelineRunStatusFields.CompletionTime = nil + // Check that the status matches - except for timestamps + if d := cmp.Diff(reconciledRun.Status, expectedPipelineRunStatus); d != "" { + t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch %s", diff.PrintWantGot(d)) + } + + wantEvents := []string{ + "Normal Succeeded Tasks Completed: 1, Skipped: 0", + } + err = checkEvents(fr, pipelineRunName, wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } +} diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 888ff21da0c..77c8a79a001 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -384,7 +384,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, go c.timeoutHandler.WaitTaskRun(tr, tr.Status.StartTime) } if err := c.tracker.Track(tr.GetBuildPodRef(), tr); err != nil { - c.Logger.Errorf("Failed to create tracker for build pod %q for taskrun %q: %v", tr.Name, tr.Name, err) + c.Logger.Errorf("Failed to create tracker for pod %q for taskrun %q: %v", tr.Name, tr.Name, err) return err } @@ -420,8 +420,8 @@ func (c *Reconciler) updateStatusLabelsAndAnnotations(tr, original *v1beta1.Task // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. if _, err := c.updateStatus(tr); err != nil { - c.Logger.Warn("Failed to update taskRun status", zap.Error(err)) - return err + c.Logger.Warn("Failed to update TaskRun status", zap.Error(err)) + return fmt.Errorf("failed to update TaskRun status: %s", err.Error()) } updated = true } @@ -433,7 +433,7 @@ func (c *Reconciler) updateStatusLabelsAndAnnotations(tr, original *v1beta1.Task if !reflect.DeepEqual(original.ObjectMeta.Labels, tr.ObjectMeta.Labels) || !reflect.DeepEqual(original.ObjectMeta.Annotations, tr.ObjectMeta.Annotations) { if _, err := c.updateLabelsAndAnnotations(tr); err != nil { c.Logger.Warn("Failed to update TaskRun labels/annotations", zap.Error(err)) - return err + return fmt.Errorf("failed to update TaskRun labels/annotations: %s", err.Error()) } updated = true } diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index b90b449bd58..6ee4569d616 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -297,9 +297,10 @@ func checkEvents(fr *record.FakeRecorder, testName string, wantEvents []string) return fmt.Errorf("Expected event \"%s\" but got \"%s\" instead for %s", wantEvent, event, testName) } case <-timer.C: - if len(foundEvents) > len(wantEvents) { - return fmt.Errorf("Received %d events for %s but %d expected. Found events: %#v", len(foundEvents), testName, len(wantEvents), foundEvents) + if len(foundEvents) == len(wantEvents) { + return nil } + return fmt.Errorf("Received %d events for %s but %d expected. Found events: %#v", len(foundEvents), testName, len(wantEvents), foundEvents) } } return nil