diff --git a/pkg/build/api/types.go b/pkg/build/api/types.go index 5910cc19669f..cfea99533248 100644 --- a/pkg/build/api/types.go +++ b/pkg/build/api/types.go @@ -286,6 +286,46 @@ const ( // secret for pushing the output image. // The build will stay in the pending state until the secret is created, or the build times out. StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" + + // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. + StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" + + // StatusReasonPushImageToRegistryFailed indicates that an image failed to be + // pushed to the registry. + StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" + + // StatusReasonPullBuilderImageFailed indicates that we failed to pull the + // builder image. + StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" + + // StatusReasonFetchSourceFailed indicates that fetching the source of the + // build has failed. + StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" + + // StatusReasonCancelledBuild indicates that the build was cancelled by the + // user. + StatusReasonCancelledBuild StatusReason = "CancelledBuild" + + // StatusReasonDockerBuildFailed indicates that the docker build strategy has + // failed. + StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" +) + +// NOTE: These messages might change. +const ( + StatusMessageCannotCreateBuildPodSpec = "Failed to create pod spec" + StatusMessageCannotCreateBuildPod = "Failed creating build pod" + StatusMessageInvalidOutputRef = "Output image could not be resolved" + StatusMessageCancelBuildFailed = "Failed to cancel build" + StatusMessageBuildPodDeleted = "The pod for this build was deleted before the build completed" + StatusMessageExceededRetryTimeout = "Build did not complete and retrying timed out" + StatusMessageMissingPushSecret = "Missing push secret" + StatusMessagePostCommitHookFailed = "Build failed because of post commit hook" + StatusMessagePushImageToRegistryFailed = "Failed to push the image to the registry" + StatusMessagePullBuilderImageFailed = "Failed pulling builder image" + StatusMessageFetchSourceFailed = "Failed to fetch the input source" + StatusMessageCancelledBuild = "The build was cancelled by the user" + StatusMessageDockerBuildFailed = "Docker build strategy has failed" ) // BuildSource is the input used for the build. diff --git a/pkg/build/builder/common.go b/pkg/build/builder/common.go index 94341d853ef1..d9d54ebadd92 100644 --- a/pkg/build/builder/common.go +++ b/pkg/build/builder/common.go @@ -10,6 +10,8 @@ import ( "github.com/docker/distribution/reference" "github.com/fsouza/go-dockerclient" + kclient "k8s.io/kubernetes/pkg/client/unversioned" + "github.com/openshift/origin/pkg/build/api" "github.com/openshift/origin/pkg/client" "github.com/openshift/origin/pkg/generate/git" @@ -20,7 +22,14 @@ import ( // client facing libraries should not be using glog var glog = utilglog.ToFile(os.Stderr, 2) -const OriginalSourceURLAnnotationKey = "openshift.io/original-source-url" +const ( + OriginalSourceURLAnnotationKey = "openshift.io/original-source-url" + + // containerNamePrefix prefixes the name of containers launched by a build. + // We cannot reuse the prefix "k8s" because we don't want the containers to + // be managed by a kubelet. + containerNamePrefix = "openshift" +) // KeyValue can be used to build ordered lists of key-value pairs. type KeyValue struct { @@ -66,35 +75,6 @@ func buildInfo(build *api.Build) []KeyValue { return kv } -func updateBuildRevision(c client.BuildInterface, build *api.Build, sourceInfo *git.SourceInfo) { - if build.Spec.Revision != nil { - return - } - build.Spec.Revision = &api.SourceRevision{ - Git: &api.GitSourceRevision{ - Commit: sourceInfo.CommitID, - Message: sourceInfo.Message, - Author: api.SourceControlUser{ - Name: sourceInfo.AuthorName, - Email: sourceInfo.AuthorEmail, - }, - Committer: api.SourceControlUser{ - Name: sourceInfo.CommitterName, - Email: sourceInfo.CommitterEmail, - }, - }, - } - - // Reset ResourceVersion to avoid a conflict with other updates to the build - build.ResourceVersion = "" - - glog.V(4).Infof("Setting build revision to %#v", build.Spec.Revision.Git) - _, err := c.UpdateDetails(build) - if err != nil { - glog.V(0).Infof("error: An error occurred saving build revision: %v", err) - } -} - // randomBuildTag generates a random tag used for building images in such a way // that the built image can be referred to unambiguously even in the face of // concurrent builds with the same name in the same namespace. @@ -108,11 +88,6 @@ func randomBuildTag(namespace, name string) string { return fmt.Sprintf("%s:%s", repo, randomTag) } -// containerNamePrefix prefixes the name of containers launched by a build. We -// cannot reuse the prefix "k8s" because we don't want the containers to be -// managed by a kubelet. -const containerNamePrefix = "openshift" - // containerName creates names for Docker containers launched by a build. It is // meant to resemble Kubernetes' pkg/kubelet/dockertools.BuildDockerName. func containerName(strategyName, buildName, namespace, containerPurpose string) string { @@ -180,3 +155,47 @@ func execPostCommitHook(client DockerClient, postCommitSpec api.BuildPostCommitS Stderr: true, }) } + +func updateBuildRevision(build *api.Build, sourceInfo *git.SourceInfo) *api.SourceRevision { + if build.Spec.Revision != nil { + return build.Spec.Revision + } + return &api.SourceRevision{ + Git: &api.GitSourceRevision{ + Commit: sourceInfo.CommitID, + Message: sourceInfo.Message, + Author: api.SourceControlUser{ + Name: sourceInfo.AuthorName, + Email: sourceInfo.AuthorEmail, + }, + Committer: api.SourceControlUser{ + Name: sourceInfo.CommitterName, + Email: sourceInfo.CommitterEmail, + }, + }, + } +} + +func retryBuildStatusUpdate(build *api.Build, client client.BuildInterface, sourceRev *api.SourceRevision) error { + return kclient.RetryOnConflict(kclient.DefaultBackoff, func() error { + // before updating, make sure we are using the latest version of the build + latestBuild, err := client.Get(build.Name) + if err != nil { + // usually this means we failed to get resources due to the missing + // privilleges + return err + } + if sourceRev != nil { + latestBuild.Spec.Revision = sourceRev + latestBuild.ResourceVersion = "" + } + + latestBuild.Status.Reason = build.Status.Reason + latestBuild.Status.Message = build.Status.Message + + if _, err := client.UpdateDetails(latestBuild); err != nil { + return err + } + return nil + }) +} diff --git a/pkg/build/builder/docker.go b/pkg/build/builder/docker.go index 27e362174104..f686e40d0bce 100644 --- a/pkg/build/builder/docker.go +++ b/pkg/build/builder/docker.go @@ -12,7 +12,9 @@ import ( dockercmd "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" docker "github.com/fsouza/go-dockerclient" + kapi "k8s.io/kubernetes/pkg/api" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" s2iapi "github.com/openshift/source-to-image/pkg/api" "github.com/openshift/source-to-image/pkg/tar" @@ -69,12 +71,22 @@ func (d *DockerBuilder) Build() error { } sourceInfo, err := fetchSource(d.dockerClient, buildDir, d.build, d.urlTimeout, os.Stdin, d.gitClient) if err != nil { + d.build.Status.Reason = api.StatusReasonFetchSourceFailed + d.build.Status.Message = api.StatusMessageFetchSourceFailed + if updateErr := retryBuildStatusUpdate(d.build, d.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } + if sourceInfo != nil { - updateBuildRevision(d.client, d.build, sourceInfo) + glog.V(4).Infof("Setting build revision with details %#v", sourceInfo) + revision := updateBuildRevision(d.build, sourceInfo) + if updateErr := retryBuildStatusUpdate(d.build, d.client, revision); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } } - if err := d.addBuildParameters(buildDir); err != nil { + if err = d.addBuildParameters(buildDir); err != nil { return err } @@ -91,14 +103,14 @@ func (d *DockerBuilder) Build() error { dockerfilePath := d.getDockerfilePath(buildDir) imageNames := getDockerfileFrom(dockerfilePath) if len(imageNames) == 0 { - return fmt.Errorf("no from image in dockerfile.") + return fmt.Errorf("no FROM image in Dockerfile") } for _, imageName := range imageNames { if imageName == "scratch" { glog.V(4).Infof("\nSkipping image \"scratch\"") continue } - var imageExists bool = true + imageExists := true _, err = d.dockerClient.InspectImage(imageName) if err != nil { if err != docker.ErrNoSuchImage { @@ -113,18 +125,33 @@ func (d *DockerBuilder) Build() error { dockercfg.PullAuthType, ) glog.V(0).Infof("\nPulling image %s ...", imageName) - if err := pullImage(d.dockerClient, imageName, pullAuthConfig); err != nil { + if err = pullImage(d.dockerClient, imageName, pullAuthConfig); err != nil { + d.build.Status.Reason = api.StatusReasonPullBuilderImageFailed + d.build.Status.Message = api.StatusMessagePullBuilderImageFailed + if updateErr := retryBuildStatusUpdate(d.build, d.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return fmt.Errorf("failed to pull image: %v", err) } } } - if err := d.dockerBuild(buildDir, buildTag, d.build.Spec.Source.Secrets); err != nil { + if err = d.dockerBuild(buildDir, buildTag, d.build.Spec.Source.Secrets); err != nil { + d.build.Status.Reason = api.StatusReasonDockerBuildFailed + d.build.Status.Message = api.StatusMessageDockerBuildFailed + if updateErr := retryBuildStatusUpdate(d.build, d.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } cname := containerName("docker", d.build.Name, d.build.Namespace, "post-commit") if err := execPostCommitHook(d.dockerClient, d.build.Spec.PostCommit, buildTag, cname); err != nil { + d.build.Status.Reason = api.StatusReasonPostCommitHookFailed + d.build.Status.Message = api.StatusMessagePostCommitHookFailed + if updateErr := retryBuildStatusUpdate(d.build, d.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } @@ -149,6 +176,11 @@ func (d *DockerBuilder) Build() error { } glog.V(0).Infof("\nPushing image %s ...", pushTag) if err := pushImage(d.dockerClient, pushTag, pushAuthConfig); err != nil { + d.build.Status.Reason = api.StatusReasonPushImageToRegistryFailed + d.build.Status.Message = api.StatusMessagePushImageToRegistryFailed + if updateErr := retryBuildStatusUpdate(d.build, d.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return reportPushFailure(err, authPresent, pushAuthConfig) } glog.V(0).Infof("Push successful") diff --git a/pkg/build/builder/sti.go b/pkg/build/builder/sti.go index 03c28eee3b0d..1eafcd854556 100644 --- a/pkg/build/builder/sti.go +++ b/pkg/build/builder/sti.go @@ -12,6 +12,8 @@ import ( "strings" "time" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + s2iapi "github.com/openshift/source-to-image/pkg/api" "github.com/openshift/source-to-image/pkg/api/describe" "github.com/openshift/source-to-image/pkg/api/validation" @@ -27,7 +29,7 @@ import ( // builderFactory is the internal interface to decouple S2I-specific code from Origin builder code type builderFactory interface { // Create S2I Builder based on S2I configuration - Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, error) + Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, s2iapi.BuildInfo, error) } // validator is the interval interface to decouple S2I-specific code from Origin builder code @@ -40,9 +42,9 @@ type validator interface { type runtimeBuilderFactory struct{} // Builder delegates execution to S2I-specific code -func (_ runtimeBuilderFactory) Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, error) { - builder, _, err := s2i.Strategy(config, overrides) - return builder, err +func (_ runtimeBuilderFactory) Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, s2iapi.BuildInfo, error) { + builder, buildInfo, err := s2i.Strategy(config, overrides) + return builder, buildInfo, err } // runtimeConfigValidator is the default implementation of stiConfigValidator @@ -67,7 +69,8 @@ type S2IBuilder struct { } // NewS2IBuilder creates a new STIBuilder instance -func NewS2IBuilder(dockerClient DockerClient, dockerSocket string, buildsClient client.BuildInterface, build *api.Build, gitClient GitClient, cgLimits *s2iapi.CGroupLimits) *S2IBuilder { +func NewS2IBuilder(dockerClient DockerClient, dockerSocket string, buildsClient client.BuildInterface, build *api.Build, + gitClient GitClient, cgLimits *s2iapi.CGroupLimits) *S2IBuilder { // delegate to internal implementation passing default implementation of builderFactory and validator return newS2IBuilder(dockerClient, dockerSocket, buildsClient, build, gitClient, runtimeBuilderFactory{}, runtimeConfigValidator{}, cgLimits) } @@ -88,7 +91,8 @@ func newS2IBuilder(dockerClient DockerClient, dockerSocket string, buildsClient } } -// Build executes STI build based on configured builder, S2I builder factory and S2I config validator +// Build executes STI build based on configured builder, S2I builder factory +// and S2I config validator func (s *S2IBuilder) Build() error { if s.build.Spec.Strategy.SourceStrategy == nil { return errors.New("the source to image builder must be used with the source strategy") @@ -103,11 +107,11 @@ func (s *S2IBuilder) Build() error { return err } srcDir := filepath.Join(buildDir, s2iapi.Source) - if err := os.MkdirAll(srcDir, os.ModePerm); err != nil { + if err = os.MkdirAll(srcDir, os.ModePerm); err != nil { return err } tmpDir := filepath.Join(buildDir, "tmp") - if err := os.MkdirAll(tmpDir, os.ModePerm); err != nil { + if err = os.MkdirAll(tmpDir, os.ModePerm); err != nil { return err } @@ -210,7 +214,7 @@ func (s *S2IBuilder) Build() error { allowedUIDs := os.Getenv(api.AllowedUIDs) glog.V(4).Infof("The value of %s is [%s]", api.AllowedUIDs, allowedUIDs) if len(allowedUIDs) > 0 { - err := config.AllowedUIDs.Set(allowedUIDs) + err = config.AllowedUIDs.Set(allowedUIDs) if err != nil { return err } @@ -245,29 +249,43 @@ func (s *S2IBuilder) Build() error { } glog.V(4).Infof("Creating a new S2I builder with build config: %#v\n", describe.Config(config)) - builder, err := s.builder.Builder(config, s2ibuild.Overrides{Downloader: download}) + builder, buildInfo, err := s.builder.Builder(config, s2ibuild.Overrides{Downloader: download}) if err != nil { + s.build.Status.Reason, s.build.Status.Message = convertS2IFailureType(buildInfo.FailureReason.Reason, buildInfo.FailureReason.Message) + if updateErr := retryBuildStatusUpdate(s.build, s.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } glog.V(4).Infof("Starting S2I build from %s/%s BuildConfig ...", s.build.Namespace, s.build.Name) + result, err := builder.Build(config) + if err != nil { + s.build.Status.Reason, s.build.Status.Message = convertS2IFailureType(result.BuildInfo.FailureReason.Reason, result.BuildInfo.FailureReason.Message) - if _, err = builder.Build(config); err != nil { + if updateErr := retryBuildStatusUpdate(s.build, s.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } - cname := containerName("s2i", s.build.Name, s.build.Namespace, "post-commit") - if err := execPostCommitHook(s.dockerClient, s.build.Spec.PostCommit, buildTag, cname); err != nil { + cName := containerName("s2i", s.build.Name, s.build.Namespace, "post-commit") + if err = execPostCommitHook(s.dockerClient, s.build.Spec.PostCommit, buildTag, cName); err != nil { + s.build.Status.Reason = api.StatusReasonPostCommitHookFailed + s.build.Status.Message = api.StatusMessagePostCommitHookFailed + if updateErr := retryBuildStatusUpdate(s.build, s.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return err } if push { - if err := tagImage(s.dockerClient, buildTag, pushTag); err != nil { + if err = tagImage(s.dockerClient, buildTag, pushTag); err != nil { return err } } - if err := removeImage(s.dockerClient, buildTag); err != nil { + if err = removeImage(s.dockerClient, buildTag); err != nil { glog.V(0).Infof("warning: Failed to remove temporary build tag %v: %v", buildTag, err) } @@ -283,7 +301,12 @@ func (s *S2IBuilder) Build() error { glog.V(3).Infof("No push secret provided") } glog.V(0).Infof("\nPushing image %s ...", pushTag) - if err := pushImage(s.dockerClient, pushTag, pushAuthConfig); err != nil { + if err = pushImage(s.dockerClient, pushTag, pushAuthConfig); err != nil { + s.build.Status.Reason = api.StatusReasonPushImageToRegistryFailed + s.build.Status.Message = api.StatusMessagePushImageToRegistryFailed + if updateErr := retryBuildStatusUpdate(s.build, s.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return reportPushFailure(err, authPresent, pushAuthConfig) } glog.V(0).Infof("Push successful") @@ -312,10 +335,18 @@ func (d *downloader) Download(config *s2iapi.Config) (*s2iapi.SourceInfo, error) // fetch source sourceInfo, err := fetchSource(d.s.dockerClient, targetDir, d.s.build, d.timeout, d.in, d.s.gitClient) if err != nil { + d.s.build.Status.Reason = api.StatusReasonFetchSourceFailed + d.s.build.Status.Message = api.StatusMessageFetchSourceFailed + if updateErr := retryBuildStatusUpdate(d.s.build, d.s.client, nil); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } return nil, err } if sourceInfo != nil { - updateBuildRevision(d.s.client, d.s.build, sourceInfo) + revision := updateBuildRevision(d.s.build, sourceInfo) + if updateErr := retryBuildStatusUpdate(d.s.build, d.s.client, revision); updateErr != nil { + utilruntime.HandleError(fmt.Errorf("error: An error occured while updating the build status: %v", updateErr)) + } } if sourceInfo != nil { sourceInfo.ContextDir = config.ContextDir @@ -409,3 +440,7 @@ func copyToVolumeList(artifactsMapping []api.ImageSourcePath) (volumeList s2iapi } return } + +func convertS2IFailureType(reason s2iapi.StepFailureReason, message s2iapi.StepFailureMessage) (api.StatusReason, string) { + return api.StatusReason(reason), fmt.Sprintf("%s", message) +} diff --git a/pkg/build/builder/sti_test.go b/pkg/build/builder/sti_test.go index 869edb5c9673..403c1ad205de 100644 --- a/pkg/build/builder/sti_test.go +++ b/pkg/build/builder/sti_test.go @@ -23,12 +23,12 @@ type testStiBuilderFactory struct { // Builder implements builderFactory. It returns a mock S2IBuilder instance that // returns specific errors. -func (factory testStiBuilderFactory) Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, error) { +func (factory testStiBuilderFactory) Builder(config *s2iapi.Config, overrides s2ibuild.Overrides) (s2ibuild.Builder, s2iapi.BuildInfo, error) { // Return a strategy error if non-nil. if factory.getStrategyErr != nil { - return nil, factory.getStrategyErr + return nil, s2iapi.BuildInfo{}, factory.getStrategyErr } - return testBuilder{buildError: factory.buildError}, nil + return testBuilder{buildError: factory.buildError}, s2iapi.BuildInfo{}, nil } // testBuilder is a mock implementation of s2iapi.Builder. @@ -38,7 +38,9 @@ type testBuilder struct { // Build implements s2iapi.Builder. It always returns a mocked build error. func (builder testBuilder) Build(config *s2iapi.Config) (*s2iapi.Result, error) { - return nil, builder.buildError + return &s2iapi.Result{ + BuildInfo: s2iapi.BuildInfo{}, + }, builder.buildError } type testS2IBuilderConfig struct { diff --git a/pkg/build/controller/controller.go b/pkg/build/controller/controller.go index 61c6234611a1..d1919b8bbdbe 100644 --- a/pkg/build/controller/controller.go +++ b/pkg/build/controller/controller.go @@ -11,6 +11,7 @@ import ( "k8s.io/kubernetes/pkg/client/cache" kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" builddefaults "github.com/openshift/origin/pkg/build/admission/defaults" buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides" @@ -72,10 +73,12 @@ func (bc *BuildController) CancelBuild(build *buildapi.Build) error { } build.Status.Phase = buildapi.BuildPhaseCancelled - build.Status.Reason = "" - build.Status.Message = "" now := unversioned.Now() build.Status.CompletionTimestamp = &now + // set the status details for the cancelled build before updating the build + // object. + build.Status.Reason = buildapi.StatusReasonCancelledBuild + build.Status.Message = buildapi.StatusMessageCancelledBuild if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err) } @@ -111,6 +114,10 @@ func (bc *BuildController) HandleBuild(build *buildapi.Build) error { glog.V(5).Infof("Marking build %s/%s as cancelled", build.Namespace, build.Name) if err := bc.CancelBuild(build); err != nil { build.Status.Reason = buildapi.StatusReasonCancelBuildFailed + build.Status.Message = buildapi.StatusMessageCancelBuildFailed + if err = bc.BuildUpdater.Update(build.Namespace, build); err != nil { + utilruntime.HandleError(fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err)) + } return fmt.Errorf("Failed to cancel build %s/%s: %v, will retry", build.Namespace, build.Name, err) } } @@ -147,8 +154,6 @@ func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error { if build.Status.Cancelled { glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name) build.Status.Phase = buildapi.BuildPhaseCancelled - build.Status.Reason = "" - build.Status.Message = "" return nil } @@ -156,6 +161,7 @@ func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error { ref, err := bc.resolveOutputDockerImageReference(build) if err != nil { build.Status.Reason = buildapi.StatusReasonInvalidOutputReference + build.Status.Message = buildapi.StatusMessageInvalidOutputRef return err } build.Status.OutputDockerImageReference = ref @@ -185,6 +191,7 @@ func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error { podSpec, err := bc.BuildStrategy.CreateBuildPod(buildCopy) if err != nil { build.Status.Reason = buildapi.StatusReasonCannotCreateBuildPodSpec + build.Status.Reason = buildapi.StatusMessageCannotCreateBuildPodSpec if strategy.IsFatal(err) { return strategy.FatalError(fmt.Sprintf("failed to create a build pod spec for build %s/%s: %v", build.Namespace, build.Name, err)) } @@ -201,13 +208,14 @@ func (bc *BuildController) nextBuildPhase(build *buildapi.Build) error { if _, err := bc.PodManager.CreatePod(build.Namespace, podSpec); err != nil { if errors.IsAlreadyExists(err) { - bc.Recorder.Eventf(build, kapi.EventTypeWarning, "failedCreate", "Pod already exists: %s/%s", podSpec.Namespace, podSpec.Name) + bc.Recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Pod already exists: %s/%s", podSpec.Namespace, podSpec.Name) glog.V(4).Infof("Build pod already existed: %#v", podSpec) return nil } // Log an event if the pod is not created (most likely due to quota denial). bc.Recorder.Eventf(build, kapi.EventTypeWarning, "FailedCreate", "Error creating: %v", err) build.Status.Reason = buildapi.StatusReasonCannotCreateBuildPod + build.Status.Message = buildapi.StatusMessageCannotCreateBuildPod return fmt.Errorf("failed to create build pod: %v", err) } setBuildPodNameAnnotation(build, podSpec.Name) @@ -294,20 +302,24 @@ func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { case kapi.PodRunning: // The pod's still running build.Status.Reason = "" + build.Status.Message = "" nextStatus = buildapi.BuildPhaseRunning case kapi.PodPending: build.Status.Reason = "" + build.Status.Message = "" nextStatus = buildapi.BuildPhasePending if secret := build.Spec.Output.PushSecret; secret != nil && currentReason != buildapi.StatusReasonMissingPushSecret { if _, err := bc.SecretClient.Secrets(build.Namespace).Get(secret.Name); err != nil && errors.IsNotFound(err) { build.Status.Reason = buildapi.StatusReasonMissingPushSecret + build.Status.Message = buildapi.StatusMessageMissingPushSecret glog.V(4).Infof("Setting reason for pending build to %q due to missing secret %s/%s", build.Status.Reason, build.Namespace, secret.Name) } } case kapi.PodSucceeded: + build.Status.Reason = "" + build.Status.Message = "" // Check the exit codes of all the containers in the pod nextStatus = buildapi.BuildPhaseComplete - build.Status.Reason = "" if len(pod.Status.ContainerStatuses) == 0 { // no containers in the pod means something went badly wrong, so the build // should be failed. @@ -322,8 +334,10 @@ func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { } } case kapi.PodFailed: - build.Status.Reason = "" nextStatus = buildapi.BuildPhaseFailed + default: + build.Status.Reason = "" + build.Status.Message = "" } // Update the build object when it progress to a next state or the reason for @@ -336,7 +350,7 @@ func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { } glog.V(4).Infof("Updating build %s/%s status %s -> %s%s", build.Namespace, build.Name, build.Status.Phase, nextStatus, reason) build.Status.Phase = nextStatus - build.Status.Message = "" + if buildutil.IsBuildComplete(build) { now := unversioned.Now() build.Status.CompletionTimestamp = &now @@ -398,7 +412,7 @@ func (bc *BuildPodDeleteController) HandleBuildPodDeletion(pod *kapi.Pod) error glog.V(4).Infof("Updating build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) build.Status.Phase = nextStatus build.Status.Reason = buildapi.StatusReasonBuildPodDeleted - build.Status.Message = "The pod for this build was deleted before the build completed." + build.Status.Message = buildapi.StatusMessageBuildPodDeleted now := unversioned.Now() build.Status.CompletionTimestamp = &now if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { diff --git a/pkg/build/controller/factory/factory.go b/pkg/build/controller/factory/factory.go index 82da488ba2e5..79a6d40bbcfb 100644 --- a/pkg/build/controller/factory/factory.go +++ b/pkg/build/controller/factory/factory.go @@ -53,6 +53,7 @@ func limitedLogAndRetry(buildupdater buildclient.BuildUpdater, maxTimeout time.D build.Status.Phase = buildapi.BuildPhaseFailed if !isFatal { build.Status.Reason = buildapi.StatusReasonExceededRetryTimeout + build.Status.Message = buildapi.StatusMessageExceededRetryTimeout } build.Status.Message = errors.ErrorToSentence(err) now := unversioned.Now() diff --git a/pkg/build/registry/build/strategy.go b/pkg/build/registry/build/strategy.go index 1dfe06f1d1d9..986f7e704187 100644 --- a/pkg/build/registry/build/strategy.go +++ b/pkg/build/registry/build/strategy.go @@ -2,6 +2,7 @@ package build import ( "fmt" + "reflect" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" @@ -109,26 +110,34 @@ type detailsStrategy struct { } // Prepares a build for update by only allowing an update to build details. -// For now, this is the Spec.Revision field +// Build details currently consists of: Spec.Revision, Status.Reason, and +// Status.Message, all of which are updated from within the build pod func (detailsStrategy) PrepareForUpdate(ctx kapi.Context, obj, old runtime.Object) { newBuild := obj.(*api.Build) oldBuild := old.(*api.Build) revision := newBuild.Spec.Revision + message := newBuild.Status.Message + reason := newBuild.Status.Reason *newBuild = *oldBuild newBuild.Spec.Revision = revision + newBuild.Status.Reason = reason + newBuild.Status.Message = message } // Validates that an update is valid by ensuring that no Revision exists and that it's not getting updated to blank func (detailsStrategy) ValidateUpdate(ctx kapi.Context, obj, old runtime.Object) field.ErrorList { newBuild := obj.(*api.Build) oldBuild := old.(*api.Build) + oldRevision := oldBuild.Spec.Revision + newRevision := newBuild.Spec.Revision errors := field.ErrorList{} - if oldBuild.Spec.Revision != nil { - // If there was already a revision, then return an error - errors = append(errors, field.Duplicate(field.NewPath("status", "revision"), oldBuild.Spec.Revision)) + + if newRevision == nil && oldRevision != nil { + errors = append(errors, field.Invalid(field.NewPath("spec", "revision"), nil, "cannot set an empty revision in build spec")) } - if newBuild.Spec.Revision == nil { - errors = append(errors, field.Invalid(field.NewPath("status", "revision"), nil, "cannot set an empty revision in build status")) + if !reflect.DeepEqual(oldRevision, newRevision) && oldRevision != nil { + // If there was already a revision, then return an error + errors = append(errors, field.Duplicate(field.NewPath("spec", "revision"), oldBuild.Spec.Revision)) } return errors } diff --git a/pkg/client/builds.go b/pkg/client/builds.go index 953c2bee4bae..15f3f4a2539a 100644 --- a/pkg/client/builds.go +++ b/pkg/client/builds.go @@ -39,42 +39,41 @@ func newBuilds(c *Client, namespace string) *builds { } // List returns a list of builds that match the label and field selectors. -func (c *builds) List(opts kapi.ListOptions) (result *buildapi.BuildList, err error) { - result = &buildapi.BuildList{} - err = c.r.Get(). +func (c *builds) List(opts kapi.ListOptions) (*buildapi.BuildList, error) { + result := &buildapi.BuildList{} + err := c.r.Get(). Namespace(c.ns). Resource("builds"). VersionedParams(&opts, kapi.ParameterCodec). Do(). Into(result) - return + return result, err } // Get returns information about a particular build and error if one occurs. -func (c *builds) Get(name string) (result *buildapi.Build, err error) { - result = &buildapi.Build{} - err = c.r.Get().Namespace(c.ns).Resource("builds").Name(name).Do().Into(result) - return +func (c *builds) Get(name string) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Get().Namespace(c.ns).Resource("builds").Name(name).Do().Into(result) + return result, err } // Create creates new build. Returns the server's representation of the build and error if one occurs. -func (c *builds) Create(build *buildapi.Build) (result *buildapi.Build, err error) { - result = &buildapi.Build{} - err = c.r.Post().Namespace(c.ns).Resource("builds").Body(build).Do().Into(result) - return +func (c *builds) Create(build *buildapi.Build) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Post().Namespace(c.ns).Resource("builds").Body(build).Do().Into(result) + return result, err } // Update updates the build on server. Returns the server's representation of the build and error if one occurs. -func (c *builds) Update(build *buildapi.Build) (result *buildapi.Build, err error) { - result = &buildapi.Build{} - err = c.r.Put().Namespace(c.ns).Resource("builds").Name(build.Name).Body(build).Do().Into(result) - return +func (c *builds) Update(build *buildapi.Build) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Put().Namespace(c.ns).Resource("builds").Name(build.Name).Body(build).Do().Into(result) + return result, err } // Delete deletes a build, returns error if one occurs. -func (c *builds) Delete(name string) (err error) { - err = c.r.Delete().Namespace(c.ns).Resource("builds").Name(name).Do().Error() - return +func (c *builds) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("builds").Name(name).Do().Error() } // Watch returns a watch.Interface that watches the requested builds @@ -88,17 +87,17 @@ func (c *builds) Watch(opts kapi.ListOptions) (watch.Interface, error) { } // Clone creates a clone of a build returning new object or an error -func (c *builds) Clone(request *buildapi.BuildRequest) (result *buildapi.Build, err error) { - result = &buildapi.Build{} - err = c.r.Post().Namespace(c.ns).Resource("builds").Name(request.Name).SubResource("clone").Body(request).Do().Into(result) - return +func (c *builds) Clone(request *buildapi.BuildRequest) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Post().Namespace(c.ns).Resource("builds").Name(request.Name).SubResource("clone").Body(request).Do().Into(result) + return result, err } // UpdateDetails updates the build details for a given build. // Currently only the Spec.Revision is allowed to be updated. // Returns the server's representation of the build and error if one occurs. -func (c *builds) UpdateDetails(build *buildapi.Build) (result *buildapi.Build, err error) { - result = &buildapi.Build{} - err = c.r.Put().Namespace(c.ns).Resource("builds").Name(build.Name).SubResource("details").Body(build).Do().Into(result) - return +func (c *builds) UpdateDetails(build *buildapi.Build) (*buildapi.Build, error) { + result := &buildapi.Build{} + err := c.r.Put().Namespace(c.ns).Resource("builds").Name(build.Name).SubResource("details").Body(build).Do().Into(result) + return result, err } diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index 77a268d5761f..4022f4e731eb 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -475,6 +475,7 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole { // allow auto-provisioning when pushing an image that doesn't have an imagestream yet authorizationapi.NewRule("create").Groups(imageGroup).Resources("imagestreams").RuleOrDie(), authorizationapi.NewRule("update").Groups(buildGroup).Resources("builds/details").RuleOrDie(), + authorizationapi.NewRule("get").Groups(buildGroup).Resources("builds").RuleOrDie(), }, }, { diff --git a/test/extended/builds/failure_status.go b/test/extended/builds/failure_status.go new file mode 100644 index 000000000000..9aeed773bb0e --- /dev/null +++ b/test/extended/builds/failure_status.go @@ -0,0 +1,114 @@ +package builds + +import ( + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + s2istatus "github.com/openshift/source-to-image/pkg/util/status" + + buildapi "github.com/openshift/origin/pkg/build/api" + exutil "github.com/openshift/origin/test/extended/util" +) + +var _ = g.Describe("[builds][Slow] update failure status", func() { + defer g.GinkgoRecover() + + var ( + // convert the s2i failure cases to our own StatusReason + reasonAssembleFailed = buildapi.StatusReason(s2istatus.ReasonAssembleFailed) + messageAssembleFailed = string(s2istatus.ReasonMessageAssembleFailed) + + oc = exutil.NewCLI("update-buildstatus", exutil.KubeConfigPath()) + postCommitHookFixture = exutil.FixturePath("testdata", "statusfail-postcommithook.yaml") + gitCloneFixture = exutil.FixturePath("testdata", "statusfail-fetchsource.yaml") + builderImageFixture = exutil.FixturePath("testdata", "statusfail-fetchbuilderimage.yaml") + pushToRegistryFixture = exutil.FixturePath("testdata", "statusfail-pushtoregistry.yaml") + failedAssembleFixture = exutil.FixturePath("testdata", "statusfail-failedassemble.yaml") + ) + + g.JustBeforeEach(func() { + g.By("waiting for the builder service account") + err := exutil.WaitForBuilderAccount(oc.KubeClient().ServiceAccounts(oc.Namespace())) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.Describe("Build status postcommit hook failure", func() { + g.It("should contain the post commit hook failure reason and message", func() { + err := oc.Run("create").Args("-f", postCommitHookFixture).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + br, err := exutil.StartBuildAndWait(oc, "failstatus-postcommithook") + o.Expect(err).NotTo(o.HaveOccurred()) + br.AssertFailure() + + build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(build.Status.Reason).To(o.Equal(buildapi.StatusReasonPostCommitHookFailed)) + o.Expect(build.Status.Message).To(o.Equal(buildapi.StatusMessagePostCommitHookFailed)) + }) + }) + + g.Describe("Build status fetch source failure", func() { + g.It("should contain the fetch source failure reason and message", func() { + err := oc.Run("create").Args("-f", gitCloneFixture).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + br, err := exutil.StartBuildAndWait(oc, "failstatus-fetchsource") + o.Expect(err).NotTo(o.HaveOccurred()) + br.AssertFailure() + + build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(build.Status.Reason).To(o.Equal(buildapi.StatusReasonFetchSourceFailed)) + o.Expect(build.Status.Message).To(o.Equal(buildapi.StatusMessageFetchSourceFailed)) + }) + }) + + g.Describe("Build status fetch builder image failure", func() { + g.It("should contain the fetch builder image failure reason and message", func() { + err := oc.Run("create").Args("-f", builderImageFixture).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + br, err := exutil.StartBuildAndWait(oc, "failstatus-builderimage") + o.Expect(err).NotTo(o.HaveOccurred()) + br.AssertFailure() + + build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(build.Status.Reason).To(o.Equal(buildapi.StatusReasonPullBuilderImageFailed)) + o.Expect(build.Status.Message).To(o.Equal(buildapi.StatusMessagePullBuilderImageFailed)) + }) + }) + + g.Describe("Build status push image to registry failure", func() { + g.It("should contain the image push to registry failure reason and message", func() { + err := oc.Run("create").Args("-f", pushToRegistryFixture).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + br, err := exutil.StartBuildAndWait(oc, "failstatus-pushtoregistry") + o.Expect(err).NotTo(o.HaveOccurred()) + br.AssertFailure() + + build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(build.Status.Reason).To(o.Equal(buildapi.StatusReasonPushImageToRegistryFailed)) + o.Expect(build.Status.Message).To(o.Equal(buildapi.StatusMessagePushImageToRegistryFailed)) + }) + }) + + g.Describe("Build status failed assemble container", func() { + g.It("should contain the failure reason related to an assemble script failing in s2i", func() { + err := oc.Run("create").Args("-f", failedAssembleFixture).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + br, err := exutil.StartBuildAndWait(oc, "failstatus-assemblescript") + o.Expect(err).NotTo(o.HaveOccurred()) + br.AssertFailure() + + build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(build.Status.Reason).To(o.Equal(reasonAssembleFailed)) + o.Expect(build.Status.Message).To(o.Equal(messageAssembleFailed)) + }) + }) +}) diff --git a/test/extended/testdata/statusfail-failedassemble.yaml b/test/extended/testdata/statusfail-failedassemble.yaml new file mode 100644 index 000000000000..3ac8e28b072f --- /dev/null +++ b/test/extended/testdata/statusfail-failedassemble.yaml @@ -0,0 +1,13 @@ +kind: BuildConfig +apiVersion: v1 +metadata: + name: failstatus-assemblescript +spec: + source: + type: binary + strategy: + sourceStrategy: + from: + kind: DockerImage + name: centos/ruby-23-centos7 + type: Source diff --git a/test/extended/testdata/statusfail-fetchbuilderimage.yaml b/test/extended/testdata/statusfail-fetchbuilderimage.yaml new file mode 100644 index 000000000000..c60d06e86056 --- /dev/null +++ b/test/extended/testdata/statusfail-fetchbuilderimage.yaml @@ -0,0 +1,14 @@ +kind: BuildConfig +apiVersion: v1 +metadata: + name: failstatus-builderimage +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world.git" + strategy: + dockerStrategy: + from: + kind: DockerImage + name: fail/me:latest + type: Source diff --git a/test/extended/testdata/statusfail-fetchsource.yaml b/test/extended/testdata/statusfail-fetchsource.yaml new file mode 100644 index 000000000000..1df5da84de45 --- /dev/null +++ b/test/extended/testdata/statusfail-fetchsource.yaml @@ -0,0 +1,14 @@ +kind: BuildConfig +apiVersion: v1 +metadata: + name: failstatus-fetchsource +spec: + source: + git: + uri: "https://failure/s2i/status" + strategy: + dockerStrategy: + from: + kind: DockerImage + name: ruby:latest + type: Docker diff --git a/test/extended/testdata/statusfail-postcommithook.yaml b/test/extended/testdata/statusfail-postcommithook.yaml new file mode 100644 index 000000000000..062fe334eba4 --- /dev/null +++ b/test/extended/testdata/statusfail-postcommithook.yaml @@ -0,0 +1,17 @@ +kind: BuildConfig +apiVersion: v1 +metadata: + name: failstatus-postcommithook +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world.git" + postCommit: + args: + - failme + strategy: + sourceStrategy: + from: + kind: DockerImage + name: centos/ruby-23-centos7:latest + type: Source diff --git a/test/extended/testdata/statusfail-pushtoregistry.yaml b/test/extended/testdata/statusfail-pushtoregistry.yaml new file mode 100644 index 000000000000..6025b88a2514 --- /dev/null +++ b/test/extended/testdata/statusfail-pushtoregistry.yaml @@ -0,0 +1,18 @@ +kind: BuildConfig +apiVersion: v1 +metadata: + name: failstatus-pushtoregistry +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world.git" + output: + to: + kind: DockerImage + name: bogus.registry/image:latest + strategy: + sourceStrategy: + from: + kind: DockerImage + name: centos/ruby-23-centos7:latest + type: Source diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 50b93cb64bf9..cc6a581530d2 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -1637,6 +1637,13 @@ items: - builds/details verbs: - update + - apiGroups: + - "" + attributeRestrictions: null + resources: + - builds + verbs: + - get - apiVersion: v1 kind: ClusterRole metadata: