diff --git a/README.md b/README.md index e04d26fc53..18e022e8b8 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ To find out more on what's the best strategy or what else can Shipwright do for | Dependency | Supported versions | | -------------------------------------| ---------------------------- | | [Kubernetes](https://kubernetes.io/) | v1.22,\*, v1.23.\*, v1.24.\* | -| [Tekton](https://tekton.dev) | v0.34.\*, v0.35.\*, v0.36.\*, v0.37.\*, v0.38.\* | +| [Tekton](https://tekton.dev) | v0.36.\*, v0.37.\*, v0.38.\* | ### Platform support diff --git a/go.mod b/go.mod index 0bd8b45ca2..d26f23dfc0 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 - github.com/tektoncd/pipeline v0.35.1 + github.com/tektoncd/pipeline v0.38.3 go.uber.org/zap v1.22.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 diff --git a/go.sum b/go.sum index f171a068d4..e909dbf166 100644 --- a/go.sum +++ b/go.sum @@ -145,7 +145,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/v2 v2.5.0 h1:Ts6aLHbBUJfcNcZ4ouAfJ4+Np7SE1Yf2w4ADKRCd7Fo= +github.com/cloudevents/sdk-go/v2 v2.10.1 h1:qNFovJ18fWOd8Q9ydWJPk1oiFudXyv1GxJIP7MwPjuM= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -690,8 +690,8 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tektoncd/pipeline v0.35.1 h1:cgEVV+DqXQRyduFDnqLjuK6uMcau3betM0iV+F1vjPQ= -github.com/tektoncd/pipeline v0.35.1/go.mod h1:+Jc1ESROXrzosXmpAKMWY8CJhUu52mK+wigjpVMgOio= +github.com/tektoncd/pipeline v0.38.3 h1:yMmEW5HRlCDJhepfPzaKhjbhZfAEf9GR34bJMZE5wQY= +github.com/tektoncd/pipeline v0.38.3/go.mod h1:9uQZ6PdOZXPtoceupLMyChXUR6elsTuHpVNlEGAIJXU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= diff --git a/pkg/config/config.go b/pkg/config/config.go index 315ab8b3ab..74e856ca85 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -12,6 +12,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + pipeline "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" ) @@ -92,10 +93,10 @@ var ( // can be set to use on the Build controllers type Config struct { CtxTimeOut time.Duration - GitContainerTemplate corev1.Container - MutateImageContainerTemplate corev1.Container - BundleContainerTemplate corev1.Container - WaiterContainerTemplate corev1.Container + GitContainerTemplate pipeline.Step + MutateImageContainerTemplate pipeline.Step + BundleContainerTemplate pipeline.Step + WaiterContainerTemplate pipeline.Step RemoteArtifactsContainerImage string TerminationLogPath string Prometheus PrometheusConfig @@ -144,7 +145,7 @@ type KubeAPIOptions struct { func NewDefaultConfig() *Config { return &Config{ CtxTimeOut: contextTimeout, - GitContainerTemplate: corev1.Container{ + GitContainerTemplate: pipeline.Step{ Image: gitDefaultImage, Command: []string{ "/ko-app/git", @@ -154,7 +155,7 @@ func NewDefaultConfig() *Config { RunAsGroup: nonRoot, }, }, - BundleContainerTemplate: corev1.Container{ + BundleContainerTemplate: pipeline.Step{ Image: bundleDefaultImage, Command: []string{ "/ko-app/bundle", @@ -165,7 +166,7 @@ func NewDefaultConfig() *Config { }, }, RemoteArtifactsContainerImage: remoteArtifactsDefaultImage, - MutateImageContainerTemplate: corev1.Container{ + MutateImageContainerTemplate: pipeline.Step{ Image: mutateImageDefaultImage, Command: []string{ "/ko-app/mutate-image", @@ -192,7 +193,7 @@ func NewDefaultConfig() *Config { }, }, }, - WaiterContainerTemplate: corev1.Container{ + WaiterContainerTemplate: pipeline.Step{ Image: waiterDefaultImage, Command: []string{ "/ko-app/waiter", @@ -247,7 +248,7 @@ func (c *Config) SetConfigFromEnv() error { } if gitContainerTemplate := os.Getenv(gitContainerTemplateEnvVar); gitContainerTemplate != "" { - c.GitContainerTemplate = corev1.Container{} + c.GitContainerTemplate = pipeline.Step{} if err := json.Unmarshal([]byte(gitContainerTemplate), &c.GitContainerTemplate); err != nil { return err } @@ -262,7 +263,7 @@ func (c *Config) SetConfigFromEnv() error { } if mutateImageContainerTemplate := os.Getenv(mutateImageContainerTemplateEnvVar); mutateImageContainerTemplate != "" { - c.MutateImageContainerTemplate = corev1.Container{} + c.MutateImageContainerTemplate = pipeline.Step{} if err := json.Unmarshal([]byte(mutateImageContainerTemplate), &c.MutateImageContainerTemplate); err != nil { return err } @@ -283,7 +284,7 @@ func (c *Config) SetConfigFromEnv() error { } if bundleContainerTemplate := os.Getenv(bundleContainerTemplateEnvVar); bundleContainerTemplate != "" { - c.BundleContainerTemplate = corev1.Container{} + c.BundleContainerTemplate = pipeline.Step{} if err := json.Unmarshal([]byte(bundleContainerTemplate), &c.BundleContainerTemplate); err != nil { return err } @@ -298,7 +299,7 @@ func (c *Config) SetConfigFromEnv() error { } if waiterContainerTemplate := os.Getenv(waiterContainerTemplateEnvVar); waiterContainerTemplate != "" { - c.WaiterContainerTemplate = corev1.Container{} + c.WaiterContainerTemplate = pipeline.Step{} if err := json.Unmarshal([]byte(waiterContainerTemplate), &c.WaiterContainerTemplate); err != nil { return err } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index ea67f09101..b4ab76d215 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -10,6 +10,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + pipeline "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/pointer" @@ -107,7 +108,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.GitContainerTemplate).To(Equal(corev1.Container{ + Expect(config.GitContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/git-image", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -126,7 +127,7 @@ var _ = Describe("Config", func() { configWithEnvVariableOverrides(overrides, func(config *Config) { nonRoot := pointer.Int64(1000) - Expect(config.GitContainerTemplate).To(Equal(corev1.Container{ + Expect(config.GitContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/git-image", Command: []string{ "/ko-app/git", @@ -146,7 +147,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.GitContainerTemplate).To(Equal(corev1.Container{ + Expect(config.GitContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/git-image:override", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -164,7 +165,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.MutateImageContainerTemplate).To(Equal(corev1.Container{ + Expect(config.MutateImageContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/mutate-image", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -182,7 +183,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.WaiterContainerTemplate).To(Equal(corev1.Container{ + Expect(config.WaiterContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/image", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -201,7 +202,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.MutateImageContainerTemplate).To(Equal(corev1.Container{ + Expect(config.MutateImageContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/mutate-image:override", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -221,7 +222,7 @@ var _ = Describe("Config", func() { configWithEnvVariableOverrides(overrides, func(config *Config) { nonRoot := pointer.Int64(1000) - Expect(config.WaiterContainerTemplate).To(Equal(corev1.Container{ + Expect(config.WaiterContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/image", Command: []string{"/ko-app/waiter"}, Args: []string{"start"}, @@ -240,7 +241,7 @@ var _ = Describe("Config", func() { } configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.WaiterContainerTemplate).To(Equal(corev1.Container{ + Expect(config.WaiterContainerTemplate).To(Equal(pipeline.Step{ Image: "myregistry/custom/image:override", Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ diff --git a/pkg/reconciler/buildrun/resources/mutate.go b/pkg/reconciler/buildrun/resources/mutate.go index 558b598e0f..790dc1ee06 100644 --- a/pkg/reconciler/buildrun/resources/mutate.go +++ b/pkg/reconciler/buildrun/resources/mutate.go @@ -21,17 +21,15 @@ func amendTaskSpecWithImageMutate( buildOutput, buildRunOutput buildv1alpha1.Image, ) { // initialize the step from the template - mutateStep := tektonv1beta1.Step{ - Container: *cfg.MutateImageContainerTemplate.DeepCopy(), - } + mutateStep := *cfg.MutateImageContainerTemplate.DeepCopy() - mutateStep.Container.Name = imageMutateContainerName + mutateStep.Name = imageMutateContainerName // if labels or annotations are specified in buildRun then merge them with build's labels := mergeMaps(buildOutput.Labels, buildRunOutput.Labels) annotations := mergeMaps(buildOutput.Annotations, buildRunOutput.Annotations) - mutateStep.Container.Args = mutateArgs(annotations, labels) + mutateStep.Args = mutateArgs(annotations, labels) // append the mutate step taskSpec.Steps = append(taskSpec.Steps, mutateStep) diff --git a/pkg/reconciler/buildrun/resources/params_test.go b/pkg/reconciler/buildrun/resources/params_test.go index 87228f38fe..89583affb8 100644 --- a/pkg/reconciler/buildrun/resources/params_test.go +++ b/pkg/reconciler/buildrun/resources/params_test.go @@ -309,20 +309,18 @@ var _ = Describe("isStepReferencingParameter", func() { Context("for a Step referencing parameters in different ways", func() { step := &pipeline.Step{ - Container: corev1.Container{ - Command: []string{ - "some-command", - "$(params.first-param)", - }, - Args: []string{ - "--flag=$(params['dot.param'])", - "$(params.array-param[*])", - }, - Env: []corev1.EnvVar{{ - Name: "MY_ENV_VAR", - Value: "hohe $(params[\"another.dot.param\"])", - }}, + Command: []string{ + "some-command", + "$(params.first-param)", + }, + Args: []string{ + "--flag=$(params['dot.param'])", + "$(params.array-param[*])", }, + Env: []corev1.EnvVar{{ + Name: "MY_ENV_VAR", + Value: "hohe $(params[\"another.dot.param\"])", + }}, } It("returns true for a classical referenced parameter in the command", func() { @@ -357,19 +355,15 @@ var _ = Describe("HandleTaskRunParam", func() { TaskSpec: &pipeline.TaskSpec{ Steps: []pipeline.Step{ { - Container: corev1.Container{ - Name: "first-container", - Args: []string{ - "--an-argument=$(params.string-parameter)", - }, + Name: "first-container", + Args: []string{ + "--an-argument=$(params.string-parameter)", }, }, { - Container: corev1.Container{ - Name: "second-container", - Args: []string{ - "$(params.array-parameter[*])", - }, + Name: "second-container", + Args: []string{ + "$(params.array-parameter[*])", }, }, }, diff --git a/pkg/reconciler/buildrun/resources/results.go b/pkg/reconciler/buildrun/resources/results.go index ecd1260905..4eb12de686 100644 --- a/pkg/reconciler/buildrun/resources/results.go +++ b/pkg/reconciler/buildrun/resources/results.go @@ -43,10 +43,10 @@ func updateBuildRunStatusWithOutputResult(ctx context.Context, buildRun *build.B for _, result := range taskRunResult { switch result.Name { case generateOutputResultName(imageDigestResult): - buildRun.Status.Output.Digest = result.Value + buildRun.Status.Output.Digest = result.Value.StringVal case generateOutputResultName(imageSizeResult): - if size, err := strconv.ParseInt(result.Value, 10, 64); err != nil { + if size, err := strconv.ParseInt(result.Value.StringVal, 10, 64); err != nil { ctxlog.Info(ctx, "invalid value for output image size from taskRun result", namespace, request.Namespace, name, request.Name, "error", err) } else { buildRun.Status.Output.Size = size diff --git a/pkg/reconciler/buildrun/resources/results_test.go b/pkg/reconciler/buildrun/resources/results_test.go index da26cb5a3c..6249457ea2 100644 --- a/pkg/reconciler/buildrun/resources/results_test.go +++ b/pkg/reconciler/buildrun/resources/results_test.go @@ -54,12 +54,18 @@ var _ = Describe("TaskRun results to BuildRun", func() { tr.Status.TaskRunResults = append(tr.Status.TaskRunResults, pipelinev1beta1.TaskRunResult{ - Name: "shp-source-default-commit-sha", - Value: commitSha, + Name: "shp-source-default-commit-sha", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: commitSha, + }, }, pipelinev1beta1.TaskRunResult{ - Name: "shp-source-default-commit-author", - Value: "foo bar", + Name: "shp-source-default-commit-author", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: "foo bar", + }, }) resources.UpdateBuildRunUsingTaskResults(ctx, br, tr.Status.TaskRunResults, taskRunRequest) @@ -77,8 +83,11 @@ var _ = Describe("TaskRun results to BuildRun", func() { tr.Status.TaskRunResults = append(tr.Status.TaskRunResults, pipelinev1beta1.TaskRunResult{ - Name: "shp-source-default-image-digest", - Value: bundleImageDigest, + Name: "shp-source-default-image-digest", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: bundleImageDigest, + }, }) resources.UpdateBuildRunUsingTaskResults(ctx, br, tr.Status.TaskRunResults, taskRunRequest) @@ -92,12 +101,18 @@ var _ = Describe("TaskRun results to BuildRun", func() { tr.Status.TaskRunResults = append(tr.Status.TaskRunResults, pipelinev1beta1.TaskRunResult{ - Name: "shp-image-digest", - Value: imageDigest, + Name: "shp-image-digest", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: imageDigest, + }, }, pipelinev1beta1.TaskRunResult{ - Name: "shp-image-size", - Value: "230", + Name: "shp-image-size", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: "230", + }, }) resources.UpdateBuildRunUsingTaskResults(ctx, br, tr.Status.TaskRunResults, taskRunRequest) @@ -113,20 +128,32 @@ var _ = Describe("TaskRun results to BuildRun", func() { tr.Status.TaskRunResults = append(tr.Status.TaskRunResults, pipelinev1beta1.TaskRunResult{ - Name: "shp-source-default-commit-sha", - Value: commitSha, + Name: "shp-source-default-commit-sha", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: commitSha, + }, }, pipelinev1beta1.TaskRunResult{ - Name: "shp-source-default-commit-author", - Value: "foo bar", + Name: "shp-source-default-commit-author", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: "foo bar", + }, }, pipelinev1beta1.TaskRunResult{ - Name: "shp-image-digest", - Value: imageDigest, + Name: "shp-image-digest", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: imageDigest, + }, }, pipelinev1beta1.TaskRunResult{ - Name: "shp-image-size", - Value: "230", + Name: "shp-image-size", + Value: pipelinev1beta1.ArrayOrString{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: "230", + }, }) resources.UpdateBuildRunUsingTaskResults(ctx, br, tr.Status.TaskRunResults, taskRunRequest) diff --git a/pkg/reconciler/buildrun/resources/sources/bundle.go b/pkg/reconciler/buildrun/resources/sources/bundle.go index aeb1464ec6..5903573bff 100644 --- a/pkg/reconciler/buildrun/resources/sources/bundle.go +++ b/pkg/reconciler/buildrun/resources/sources/bundle.go @@ -30,13 +30,11 @@ func AppendBundleStep( }) // initialize the step from the template - bundleStep := pipeline.Step{ - Container: *cfg.BundleContainerTemplate.DeepCopy(), - } + bundleStep := *cfg.BundleContainerTemplate.DeepCopy() // add the build-specific details - bundleStep.Container.Name = fmt.Sprintf("source-%s", name) - bundleStep.Container.Args = []string{ + bundleStep.Name = fmt.Sprintf("source-%s", name) + bundleStep.Args = []string{ "--image", source.BundleContainer.Image, "--target", fmt.Sprintf("$(params.%s-%s)", prefixParamsResultsVolumes, paramSourceRoot), "--result-file-image-digest", fmt.Sprintf("$(results.%s-source-%s-image-digest.path)", prefixParamsResultsVolumes, name), @@ -56,14 +54,14 @@ func AppendBundleStep( }) // append the argument - bundleStep.Container.Args = append(bundleStep.Container.Args, + bundleStep.Args = append(bundleStep.Args, "--secret-path", secretMountPath, ) } // add prune flag in when prune after pull is configured if source.BundleContainer.Prune != nil && *source.BundleContainer.Prune == build.PruneAfterPull { - bundleStep.Container.Args = append(bundleStep.Container.Args, "--prune") + bundleStep.Args = append(bundleStep.Args, "--prune") } taskSpec.Steps = append(taskSpec.Steps, bundleStep) diff --git a/pkg/reconciler/buildrun/resources/sources/git.go b/pkg/reconciler/buildrun/resources/sources/git.go index 5f058c8528..7b19a96d1f 100644 --- a/pkg/reconciler/buildrun/resources/sources/git.go +++ b/pkg/reconciler/buildrun/resources/sources/git.go @@ -40,13 +40,11 @@ func AppendGitStep( }) // initialize the step from the template - gitStep := tektonv1beta1.Step{ - Container: *cfg.GitContainerTemplate.DeepCopy(), - } + gitStep := *cfg.GitContainerTemplate.DeepCopy() // add the build-specific details - gitStep.Container.Name = fmt.Sprintf("source-%s", name) - gitStep.Container.Args = []string{ + gitStep.Name = fmt.Sprintf("source-%s", name) + gitStep.Args = []string{ "--url", *source.URL, "--target", @@ -66,8 +64,8 @@ func AppendGitStep( // Check if a revision is defined if source.Revision != nil { // append the argument - gitStep.Container.Args = append( - gitStep.Container.Args, + gitStep.Args = append( + gitStep.Args, "--revision", *source.Revision, ) @@ -75,7 +73,7 @@ func AppendGitStep( // If configure, use Git URL rewrite flag if cfg.GitRewriteRule { - gitStep.Container.Args = append(gitStep.Container.Args, "--git-url-rewrite") + gitStep.Args = append(gitStep.Args, "--git-url-rewrite") } if source.Credentials != nil { @@ -92,8 +90,8 @@ func AppendGitStep( }) // append the argument - gitStep.Container.Args = append( - gitStep.Container.Args, + gitStep.Args = append( + gitStep.Args, "--secret-path", secretMountPath, ) diff --git a/pkg/reconciler/buildrun/resources/sources/http.go b/pkg/reconciler/buildrun/resources/sources/http.go index f5e8bf9772..301f97996f 100644 --- a/pkg/reconciler/buildrun/resources/sources/http.go +++ b/pkg/reconciler/buildrun/resources/sources/http.go @@ -10,7 +10,6 @@ import ( buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" "github.com/shipwright-io/build/pkg/config" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - corev1 "k8s.io/api/core/v1" ) // RemoteArtifactsContainerName name for the container dealing with remote artifacts download. @@ -25,22 +24,20 @@ func AppendHTTPStep( // HTTP is done currently all in a single step, see if there is already one httpStep := findExistingHTTPSourcesStep(taskSpec) if httpStep != nil { - httpStep.Container.Args[3] = fmt.Sprintf("%s ; wget %q", httpStep.Container.Args[3], source.URL) + httpStep.Args[3] = fmt.Sprintf("%s ; wget %q", httpStep.Args[3], source.URL) } else { httpStep := tektonv1beta1.Step{ - Container: corev1.Container{ - Name: RemoteArtifactsContainerName, - Image: cfg.RemoteArtifactsContainerImage, - WorkingDir: fmt.Sprintf("$(params.%s-%s)", prefixParamsResultsVolumes, paramSourceRoot), - Command: []string{ - "/bin/sh", - }, - Args: []string{ - "-e", - "-x", - "-c", - fmt.Sprintf("wget %q", source.URL), - }, + Name: RemoteArtifactsContainerName, + Image: cfg.RemoteArtifactsContainerImage, + WorkingDir: fmt.Sprintf("$(params.%s-%s)", prefixParamsResultsVolumes, paramSourceRoot), + Command: []string{ + "/bin/sh", + }, + Args: []string{ + "-e", + "-x", + "-c", + fmt.Sprintf("wget %q", source.URL), }, } diff --git a/pkg/reconciler/buildrun/resources/sources/http_test.go b/pkg/reconciler/buildrun/resources/sources/http_test.go index 9dc2b4bd3f..2cf1bc3fe6 100644 --- a/pkg/reconciler/buildrun/resources/sources/http_test.go +++ b/pkg/reconciler/buildrun/resources/sources/http_test.go @@ -13,14 +13,13 @@ import ( "github.com/shipwright-io/build/pkg/reconciler/buildrun/resources/sources" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - corev1 "k8s.io/api/core/v1" ) var _ = Describe("HTTP", func() { cfg := config.NewDefaultConfig() - Context("when a TaskSpec does not contain an step", func() { + Context("when a TaskSpec does not contain an step", func() { var taskSpec *tektonv1beta1.TaskSpec BeforeEach(func() { @@ -41,26 +40,24 @@ var _ = Describe("HTTP", func() { }) }) - Context("when a TaskSpec already contains the http step", func() { + Context("when a TaskSpec already contains the http step", func() { var taskSpec *tektonv1beta1.TaskSpec BeforeEach(func() { taskSpec = &tektonv1beta1.TaskSpec{ Steps: []tektonv1beta1.Step{ { - Container: corev1.Container{ - Name: sources.RemoteArtifactsContainerName, - Image: cfg.RemoteArtifactsContainerImage, - WorkingDir: "$(params.shp-source-root)", - Command: []string{ - "/bin/sh", - }, - Args: []string{ - "-e", - "-x", - "-c", - "wget \"https://tekton.dev/images/tekton-horizontal-color.png\"", - }, + Name: sources.RemoteArtifactsContainerName, + Image: cfg.RemoteArtifactsContainerImage, + WorkingDir: "$(params.shp-source-root)", + Command: []string{ + "/bin/sh", + }, + Args: []string{ + "-e", + "-x", + "-c", + "wget \"https://tekton.dev/images/tekton-horizontal-color.png\"", }, }, }, @@ -81,16 +78,14 @@ var _ = Describe("HTTP", func() { }) }) - Context("when a TaskSpec already another source step step", func() { + Context("when a TaskSpec already another source step step", func() { var taskSpec *tektonv1beta1.TaskSpec BeforeEach(func() { taskSpec = &tektonv1beta1.TaskSpec{ Steps: []tektonv1beta1.Step{ { - Container: corev1.Container{ - Name: "source-something", - }, + Name: "source-something", }, }, } diff --git a/pkg/reconciler/buildrun/resources/sources/local_copy.go b/pkg/reconciler/buildrun/resources/sources/local_copy.go index e2a38e11e0..0013dc325d 100644 --- a/pkg/reconciler/buildrun/resources/sources/local_copy.go +++ b/pkg/reconciler/buildrun/resources/sources/local_copy.go @@ -18,7 +18,7 @@ const WaiterContainerName = "source-local" // AppendLocalCopyStep defines and append a new task based on the waiter container template, passed // by the configuration instance. func AppendLocalCopyStep(cfg *config.Config, taskSpec *tektonv1beta1.TaskSpec, timeout *metav1.Duration) { - step := tektonv1beta1.Step{Container: *cfg.WaiterContainerTemplate.DeepCopy()} + step := *cfg.WaiterContainerTemplate.DeepCopy() // the data upload mechanism targets a specific POD, and in this POD it aims for a specific // container name, and having a static name, makes this process straight forward. step.Name = WaiterContainerName diff --git a/pkg/reconciler/buildrun/resources/sources/utils.go b/pkg/reconciler/buildrun/resources/sources/utils.go index c504c0689d..72d837fe6f 100644 --- a/pkg/reconciler/buildrun/resources/sources/utils.go +++ b/pkg/reconciler/buildrun/resources/sources/utils.go @@ -72,7 +72,7 @@ func SanitizeVolumeNameForSecretName(secretName string) string { func findResultValue(results []tektonv1beta1.TaskRunResult, name string) string { for _, result := range results { if result.Name == name { - return result.Value + return result.Value.StringVal } } diff --git a/pkg/reconciler/buildrun/resources/taskrun.go b/pkg/reconciler/buildrun/resources/taskrun.go index 082be737f7..97b00a8f8a 100644 --- a/pkg/reconciler/buildrun/resources/taskrun.go +++ b/pkg/reconciler/buildrun/resources/taskrun.go @@ -199,18 +199,16 @@ func GenerateTaskSpec( } step := v1beta1.Step{ - Container: corev1.Container{ - Image: taskImage, - ImagePullPolicy: containerValue.ImagePullPolicy, - Name: containerValue.Name, - VolumeMounts: containerValue.VolumeMounts, - Command: taskCommand, - Args: taskArgs, - SecurityContext: containerValue.SecurityContext, - WorkingDir: containerValue.WorkingDir, - Resources: containerValue.Resources, - Env: stepEnv, - }, + Image: taskImage, + ImagePullPolicy: containerValue.ImagePullPolicy, + Name: containerValue.Name, + VolumeMounts: containerValue.VolumeMounts, + Command: taskCommand, + Args: taskArgs, + SecurityContext: containerValue.SecurityContext, + WorkingDir: containerValue.WorkingDir, + Resources: containerValue.Resources, + Env: stepEnv, } generatedTaskSpec.Steps = append(generatedTaskSpec.Steps, step) diff --git a/pkg/reconciler/buildrun/resources/taskrun_test.go b/pkg/reconciler/buildrun/resources/taskrun_test.go index 9a13604d9a..1db71209bf 100644 --- a/pkg/reconciler/buildrun/resources/taskrun_test.go +++ b/pkg/reconciler/buildrun/resources/taskrun_test.go @@ -110,27 +110,27 @@ var _ = Describe("GenerateTaskrun", func() { }) It("should ensure IMAGE is replaced by builder image when needed.", func() { - Expect(got.Steps[1].Container.Image).To(Equal("quay.io/containers/buildah:v1.20.1")) + Expect(got.Steps[1].Image).To(Equal("quay.io/containers/buildah:v1.20.1")) }) It("should ensure ImagePullPolicy can be set by the build strategy author.", func() { - Expect(got.Steps[1].Container.ImagePullPolicy).To(Equal(corev1.PullPolicy("Always"))) + Expect(got.Steps[1].ImagePullPolicy).To(Equal(corev1.PullPolicy("Always"))) }) It("should ensure command replacements happen when needed", func() { - Expect(got.Steps[1].Container.Command[0]).To(Equal("/usr/bin/buildah")) + Expect(got.Steps[1].Command[0]).To(Equal("/usr/bin/buildah")) }) It("should ensure resource replacements happen for the first step", func() { - Expect(got.Steps[1].Container.Resources).To(Equal(ctl.LoadCustomResources("500m", "1Gi"))) + Expect(got.Steps[1].Resources).To(Equal(ctl.LoadCustomResources("500m", "1Gi"))) }) It("should ensure resource replacements happen for the second step", func() { - Expect(got.Steps[2].Container.Resources).To(Equal(ctl.LoadCustomResources("100m", "65Mi"))) + Expect(got.Steps[2].Resources).To(Equal(ctl.LoadCustomResources("100m", "65Mi"))) }) It("should ensure arg replacements happen when needed", func() { - Expect(got.Steps[1].Container.Args).To(Equal(expectedCommandOrArg)) + Expect(got.Steps[1].Args).To(Equal(expectedCommandOrArg)) }) It("should ensure top level volumes are populated", func() { diff --git a/test/e2e/validators_test.go b/test/e2e/validators_test.go index 1c3f90a168..f46e8a4238 100644 --- a/test/e2e/validators_test.go +++ b/test/e2e/validators_test.go @@ -167,15 +167,15 @@ func validateBuildRunResultsFromGitSource(testBuildRun *buildv1alpha1.BuildRun) for _, result := range tr.Status.TaskRunResults { switch result.Name { case "shp-source-default-commit-sha": - Expect(result.Value).To(Equal(testBuildRun.Status.Sources[0].Git.CommitSha)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Sources[0].Git.CommitSha)) case "shp-source-default-commit-author": - Expect(result.Value).To(Equal(testBuildRun.Status.Sources[0].Git.CommitAuthor)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Sources[0].Git.CommitAuthor)) case "shp-source-default-branch-name": - Expect(result.Value).To(Equal(testBuildRun.Status.Sources[0].Git.BranchName)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Sources[0].Git.BranchName)) case "shp-image-digest": - Expect(result.Value).To(Equal(testBuildRun.Status.Output.Digest)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Output.Digest)) case "shp-image-size": - size, err := strconv.ParseInt(result.Value, 10, 64) + size, err := strconv.ParseInt(result.Value.StringVal, 10, 64) Expect(err).To(BeNil()) Expect(size).To(Equal(testBuildRun.Status.Output.Size)) } @@ -197,11 +197,11 @@ func validateBuildRunResultsFromBundleSource(testBuildRun *buildv1alpha1.BuildRu for _, result := range tr.Status.TaskRunResults { switch result.Name { case "shp-source-default-image-digest": - Expect(result.Value).To(Equal(testBuildRun.Status.Sources[0].Bundle.Digest)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Sources[0].Bundle.Digest)) case "shp-image-digest": - Expect(result.Value).To(Equal(testBuildRun.Status.Output.Digest)) + Expect(result.Value.StringVal).To(Equal(testBuildRun.Status.Output.Digest)) case "shp-image-size": - size, err := strconv.ParseInt(result.Value, 10, 64) + size, err := strconv.ParseInt(result.Value.StringVal, 10, 64) Expect(err).To(BeNil()) Expect(size).To(Equal(testBuildRun.Status.Output.Size)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go similarity index 53% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go index dcf7601d88..8d0b4bb10e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go @@ -14,24 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package config import ( "context" - - "knative.dev/pkg/apis" ) -var _ apis.Defaultable = (*Condition)(nil) +// isSubstituted is used for associating the parameter substitution inside the context.Context. +type isSubstituted struct{} -// SetDefaults sets the Condition's Spec's default values. -func (c *Condition) SetDefaults(ctx context.Context) { - c.Spec.SetDefaults(ctx) +// WithinSubstituted is used to note that it is calling within +// the context of a substitute variable operation. +func WithinSubstituted(ctx context.Context) context.Context { + return context.WithValue(ctx, isSubstituted{}, true) } -// SetDefaults sets defaults for all params on the ConditionSpec. -func (cs *ConditionSpec) SetDefaults(ctx context.Context) { - for i := range cs.Params { - cs.Params[i].SetDefaults(ctx) - } +// IsSubstituted indicates that the variables have been substituted. +func IsSubstituted(ctx context.Context) bool { + return ctx.Value(isSubstituted{}) != nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 400879e155..aa7656a1fb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -39,26 +39,30 @@ const ( DefaultManagedByLabelValue = "tekton-pipelines" // DefaultCloudEventSinkValue is the default value for cloud event sinks. DefaultCloudEventSinkValue = "" - - defaultTimeoutMinutesKey = "default-timeout-minutes" - defaultServiceAccountKey = "default-service-account" - defaultManagedByLabelValueKey = "default-managed-by-label-value" - defaultPodTemplateKey = "default-pod-template" - defaultAAPodTemplateKey = "default-affinity-assistant-pod-template" - defaultCloudEventsSinkKey = "default-cloud-events-sink" - defaultTaskRunWorkspaceBinding = "default-task-run-workspace-binding" + // DefaultMaxMatrixCombinationsCount is used when no max matrix combinations count is specified. + DefaultMaxMatrixCombinationsCount = 256 + + defaultTimeoutMinutesKey = "default-timeout-minutes" + defaultServiceAccountKey = "default-service-account" + defaultManagedByLabelValueKey = "default-managed-by-label-value" + defaultPodTemplateKey = "default-pod-template" + defaultAAPodTemplateKey = "default-affinity-assistant-pod-template" + defaultCloudEventsSinkKey = "default-cloud-events-sink" + defaultTaskRunWorkspaceBinding = "default-task-run-workspace-binding" + defaultMaxMatrixCombinationsCountKey = "default-max-matrix-combinations-count" ) // Defaults holds the default configurations // +k8s:deepcopy-gen=true type Defaults struct { - DefaultTimeoutMinutes int - DefaultServiceAccount string - DefaultManagedByLabelValue string - DefaultPodTemplate *pod.Template - DefaultAAPodTemplate *pod.AffinityAssistantTemplate - DefaultCloudEventsSink string - DefaultTaskRunWorkspaceBinding string + DefaultTimeoutMinutes int + DefaultServiceAccount string + DefaultManagedByLabelValue string + DefaultPodTemplate *pod.Template + DefaultAAPodTemplate *pod.AffinityAssistantTemplate + DefaultCloudEventsSink string + DefaultTaskRunWorkspaceBinding string + DefaultMaxMatrixCombinationsCount int } // GetDefaultsConfigName returns the name of the configmap containing all @@ -86,16 +90,18 @@ func (cfg *Defaults) Equals(other *Defaults) bool { other.DefaultPodTemplate.Equals(cfg.DefaultPodTemplate) && other.DefaultAAPodTemplate.Equals(cfg.DefaultAAPodTemplate) && other.DefaultCloudEventsSink == cfg.DefaultCloudEventsSink && - other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding + other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding && + other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount } // NewDefaultsFromMap returns a Config given a map corresponding to a ConfigMap func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc := Defaults{ - DefaultTimeoutMinutes: DefaultTimeoutMinutes, - DefaultServiceAccount: DefaultServiceAccountValue, - DefaultManagedByLabelValue: DefaultManagedByLabelValue, - DefaultCloudEventsSink: DefaultCloudEventSinkValue, + DefaultTimeoutMinutes: DefaultTimeoutMinutes, + DefaultServiceAccount: DefaultServiceAccountValue, + DefaultManagedByLabelValue: DefaultManagedByLabelValue, + DefaultCloudEventsSink: DefaultCloudEventSinkValue, + DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount, } if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok { @@ -137,6 +143,15 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { if bindingYAML, ok := cfgMap[defaultTaskRunWorkspaceBinding]; ok { tc.DefaultTaskRunWorkspaceBinding = bindingYAML } + + if defaultMaxMatrixCombinationsCount, ok := cfgMap[defaultMaxMatrixCombinationsCountKey]; ok { + matrixCombinationsCount, err := strconv.ParseInt(defaultMaxMatrixCombinationsCount, 10, 0) + if err != nil { + return nil, fmt.Errorf("failed parsing tracing config %q", defaultMaxMatrixCombinationsCountKey) + } + tc.DefaultMaxMatrixCombinationsCount = int(matrixCombinationsCount) + } + return &tc, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go index 17ef479ae6..555b28e5f3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "fmt" "os" "strconv" @@ -45,6 +46,8 @@ const ( DefaultDisableCredsInit = false // DefaultRunningInEnvWithInjectedSidecars is the default value for "running-in-environment-with-injected-sidecars". DefaultRunningInEnvWithInjectedSidecars = true + // DefaultAwaitSidecarReadiness is the default value for "await-sidecar-readiness". + DefaultAwaitSidecarReadiness = true // DefaultRequireGitSSHSecretKnownHosts is the default value for "require-git-ssh-secret-known-hosts". DefaultRequireGitSSHSecretKnownHosts = false // DefaultEnableTektonOciBundles is the default value for "enable-tekton-oci-bundles". @@ -61,6 +64,7 @@ const ( disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars" + awaitSidecarReadinessKey = "await-sidecar-readiness" requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" // nolint: gosec enableTektonOCIBundles = "enable-tekton-oci-bundles" enableCustomTasks = "enable-custom-tasks" @@ -81,6 +85,7 @@ type FeatureFlags struct { ScopeWhenExpressionsToTask bool EnableAPIFields string SendCloudEventsForRuns bool + AwaitSidecarReadiness bool EmbeddedStatus string } @@ -118,6 +123,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(runningInEnvWithInjectedSidecarsKey, DefaultRunningInEnvWithInjectedSidecars, &tc.RunningInEnvWithInjectedSidecars); err != nil { return nil, err } + if err := setFeature(awaitSidecarReadinessKey, DefaultAwaitSidecarReadiness, &tc.AwaitSidecarReadiness); err != nil { + return nil, err + } if err := setFeature(requireGitSSHSecretKnownHostsKey, DefaultRequireGitSSHSecretKnownHosts, &tc.RequireGitSSHSecretKnownHosts); err != nil { return nil, err } @@ -187,3 +195,17 @@ func setEmbeddedStatus(cfgMap map[string]string, defaultValue string, feature *s func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, error) { return NewFeatureFlagsFromMap(config.Data) } + +// EnableAlphaAPIFields enables alpha feature in an existing context (for use in testing) +func EnableAlphaAPIFields(ctx context.Context) context.Context { + featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ + "enable-api-fields": "alpha", + }) + cfg := &Config{ + Defaults: &Defaults{ + DefaultTimeoutMinutes: 60, + }, + FeatureFlags: featureFlags, + } + return ToContext(ctx, cfg) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go index 97ee57fe8c..a95b23e72a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go @@ -40,12 +40,6 @@ const ( // PipelineTaskLabelKey is used as the label identifier for a PipelineTask PipelineTaskLabelKey = GroupName + "/pipelineTask" - // ConditionCheckKey is used as the label identifier for a ConditionCheck - ConditionCheckKey = GroupName + "/conditionCheck" - - // ConditionNameKey is used as the label identifier for a Condition - ConditionNameKey = GroupName + "/conditionName" - // RunKey is used as the label identifier for a Run RunKey = GroupName + "/run" @@ -91,9 +85,4 @@ var ( Group: GroupName, Resource: "pipelineresources", } - // ConditionResource represents a Tekton Condition - ConditionResource = schema.GroupResource{ - Group: GroupName, - Resource: "conditions", - } ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go new file mode 100644 index 0000000000..f6e75960ff --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go @@ -0,0 +1,541 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Step runs a subcomponent of a Task +type Step struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script. + // +optional + Script string `json:"script,omitempty"` + + // Timeout is the time after which the step times out. Defaults to never. + // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Step wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` + + // OnError defines the exiting behavior of a container on error + // can be set to [ continue | stopAndFail ] + // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code + // continue indicates continue executing the rest of the steps irrespective of the container exit code + OnError string `json:"onError,omitempty"` + // Stores configuration for the stdout stream of the step. + // +optional + StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"` + // Stores configuration for the stderr stream of the step. + // +optional + StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"` +} + +// StepOutputConfig stores configuration for a step output stream. +type StepOutputConfig struct { + // Path to duplicate stdout stream to on container's local filesystem. + // +optional + Path string `json:"path,omitempty"` +} + +// ToK8sContainer converts the Step to a Kubernetes Container struct +func (s *Step) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *Step) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// StepTemplate is a template for a Step +type StepTemplate struct { + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *StepTemplate) SetContainerFields(c corev1.Container) { + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct +func (s *StepTemplate) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// Sidecar has nearly the same data structure as Step but does not have the ability to timeout. +type Sidecar struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command or Args. + // +optional + Script string `json:"script,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Sidecar wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` +} + +// ToK8sContainer converts the Sidecar to a Kubernetes Container struct +func (s *Sidecar) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.Ports, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.LivenessProbe, + ReadinessProbe: s.ReadinessProbe, + StartupProbe: s.StartupProbe, + Lifecycle: s.Lifecycle, + TerminationMessagePath: s.TerminationMessagePath, + TerminationMessagePolicy: s.TerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.Stdin, + StdinOnce: s.StdinOnce, + TTY: s.TTY, + } +} + +// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container +func (s *Sidecar) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.Ports = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.LivenessProbe = c.LivenessProbe + s.ReadinessProbe = c.ReadinessProbe + s.StartupProbe = c.StartupProbe + s.Lifecycle = c.Lifecycle + s.TerminationMessagePath = c.TerminationMessagePath + s.TerminationMessagePolicy = c.TerminationMessagePolicy + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.Stdin = c.Stdin + s.StdinOnce = c.StdinOnce + s.TTY = c.TTY +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go new file mode 100644 index 0000000000..d279002e61 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the pipeline v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/tektoncd/pipeline/pkg/apis/pipeline +// +k8s:defaulter-gen=TypeMeta +// +groupName=tekton.dev +package v1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go new file mode 100644 index 0000000000..b7995ae90a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +// mergeData is used to store the intermediate data needed to merge an object +// with a template. It's provided to avoid repeatedly re-serializing the template. +// +k8s:openapi-gen=false +type mergeData struct { + emptyJSON []byte + templateJSON []byte + patchSchema strategicpatch.PatchMetaFromStruct +} + +// MergeStepsWithStepTemplate takes a possibly nil container template and a +// list of steps, merging each of the steps with the container template, if +// it's not nil, and returning the resulting list. +func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) { + if template == nil { + return steps, nil + } + + md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{}) + if err != nil { + return nil, err + } + + for i, s := range steps { + merged := corev1.Container{} + err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged) + if err != nil { + return nil, err + } + + // If the container's args is nil, reset it to empty instead + if merged.Args == nil && s.Args != nil { + merged.Args = []string{} + } + + // Pass through original step Script, for later conversion. + newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig} + newStep.SetContainerFields(merged) + steps[i] = newStep + } + return steps, nil +} + +// getMergeData serializes the template and empty object to get the intermediate results necessary for +// merging an object of the same type with this template. +// This function is provided to avoid repeatedly serializing an identical template. +func getMergeData(template, empty interface{}) (*mergeData, error) { + // We need JSON bytes to generate a patch to merge the object + // onto the template, so marshal the template. + templateJSON, err := json.Marshal(template) + if err != nil { + return nil, err + } + // We need to do a three-way merge to actually merge the template and + // object, so we need an empty object as the "original" + emptyJSON, err := json.Marshal(empty) + if err != nil { + return nil, err + } + // Get the patch meta, which is needed for generating and applying the merge patch. + patchSchema, err := strategicpatch.NewPatchMetaFromStruct(template) + if err != nil { + return nil, err + } + return &mergeData{templateJSON: templateJSON, emptyJSON: emptyJSON, patchSchema: patchSchema}, nil +} + +// mergeObjWithTemplateBytes merges obj with md's template JSON and updates out to reflect the merged result. +// out is a pointer to the zero value of obj's type. +// This function is provided to avoid repeatedly serializing an identical template. +func mergeObjWithTemplateBytes(md *mergeData, obj, out interface{}) error { + // Marshal the object to JSON + objAsJSON, err := json.Marshal(obj) + if err != nil { + return err + } + // Create a merge patch, with the empty JSON as the original, the object JSON as the modified, and the template + // JSON as the current - this lets us do a deep merge of the template and object, with awareness of + // the "patchMerge" tags. + patch, err := strategicpatch.CreateThreeWayMergePatch(md.emptyJSON, objAsJSON, md.templateJSON, md.patchSchema, true) + if err != nil { + return err + } + + // Actually apply the merge patch to the template JSON. + mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(md.templateJSON, patch, md.patchSchema) + if err != nil { + return err + } + // Unmarshal the merged JSON to a pointer, and return it. + return json.Unmarshal(mergedAsJSON, out) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go new file mode 100644 index 0000000000..15ba0a4f8c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -0,0 +1,1791 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by openapi-gen. DO NOT EDIT. + +// This file was autogenerated by openapi-gen. Do not edit it manually! + +package v1 + +import ( + common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" +) + +func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + return map[string]common.OpenAPIDefinition{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString": schema_pkg_apis_pipeline_v1_ArrayOrString(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam": schema_pkg_apis_pipeline_v1_ResolverParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step": schema_pkg_apis_pipeline_v1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig": schema_pkg_apis_pipeline_v1_StepOutputConfig(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate": schema_pkg_apis_pipeline_v1_StepTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult": schema_pkg_apis_pipeline_v1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult": schema_pkg_apis_pipeline_v1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding": schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage": schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref), + } +} + +func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.Toleration"}, + } +} + +func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Template holds pod specific configuration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.", + Type: []string{"string"}, + Format: "", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "SchedulerName specifies the scheduler to be used to dispatch the Pod", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.HostAlias"), + }, + }, + }, + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "HostNetwork specifies whether the pod may use the node network namespace", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_pipeline_v1_ArrayOrString(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "stringVal": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the stored type of ArrayOrString.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "arrayVal": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "objectVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Param declares an ArrayOrString to use for the parameter called name.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name declares the name by which a parameter is referenced.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a user-facing description of the parameter that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs parameter.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of a workspace to be provided by a PipelineRun.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PropertySpec defines the struct for object keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverParam is a single parameter passed to a resolver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the string value of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resolver": { + SchemaProps: spec.SchemaProps{ + Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"}, + } +} + +func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Type: []string{"string"}, + Format: "", + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Step runs a subcomponent of a Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + "onError": { + SchemaProps: spec.SchemaProps{ + Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + Type: []string{"string"}, + Format: "", + }, + }, + "stdoutConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stdout stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + "stderrConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stderr stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepOutputConfig stores configuration for a step output stream.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to duplicate stdout stream to on container's local filesystem.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate is a template for a Step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds the desired state of the Task from the client", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskList contains a list of Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human-readable description of the result", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value the given value of the result", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskSpec defines the desired state of Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a user-facing description of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "steps": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step"), + }, + }, + }, + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a collection of volumes that are available to mount into the steps of the build.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "stepTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate"), + }, + }, + "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar"), + }, + }, + }, + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Workspaces are the volumes that this Task requires.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration"), + }, + }, + }, + }, + }, + "results": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Results are values that this Task can output", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceBinding maps a Task's declared workspace to a Volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace populated by the volume.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap represents a configMap that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret represents a secret that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name by which you can bind the volume at runtime.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is an optional human readable description of this volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath overrides the directory that the volume will be made available at.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace as declared by the task", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "workspace": { + SchemaProps: spec.SchemaProps{ + Description: "Workspace is the name of the workspace declared by the pipeline", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace this Step or Sidecar wants access to.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "mountPath"}, + }, + }, + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go new file mode 100644 index 0000000000..c3fc297628 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -0,0 +1,290 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + + resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "github.com/tektoncd/pipeline/pkg/substitution" +) + +// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else +// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. +const exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` + +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) + +// ParamsPrefix is the prefix used in $(...) expressions referring to parameters +const ParamsPrefix = "params" + +// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as +// resources). Parameter values are provided by users as inputs on a TaskRun +// or PipelineRun. +type ParamSpec struct { + // Name declares the name by which a parameter is referenced. + Name string `json:"name"` + // Type is the user-specified type of the parameter. The possible types + // are currently "string", "array" and "object", and "string" is the default. + // +optional + Type ParamType `json:"type,omitempty"` + // Description is a user-facing description of the parameter that may be + // used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + // Properties is the JSON Schema properties to support key-value pairs parameter. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + // Default is the value a parameter takes if no input value is supplied. If + // default is set, a Task may be executed without a supplied value for the + // parameter. + // +optional + Default *ArrayOrString `json:"default,omitempty"` +} + +// PropertySpec defines the struct for object keys +type PropertySpec struct { + Type ParamType `json:"type,omitempty"` +} + +// SetDefaults set the default type +func (pp *ParamSpec) SetDefaults(context.Context) { + if pp == nil { + return + } + + // Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type + // The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default + // If neither `properties` or `default` section is provided, ParamTypeString will be the default type. + switch { + case pp.Type != "": + // If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided. + pp.setDefaultsForProperties() + case pp.Properties != nil: + pp.Type = ParamTypeObject + // Also set default type for PropertySpec + pp.setDefaultsForProperties() + case pp.Default == nil: + // ParamTypeString is the default value (when no type can be inferred from the default value) + pp.Type = ParamTypeString + case pp.Default.Type != "": + pp.Type = pp.Default.Type + case pp.Default.ArrayVal != nil: + pp.Type = ParamTypeArray + case pp.Default.ObjectVal != nil: + pp.Type = ParamTypeObject + default: + pp.Type = ParamTypeString + } +} + +// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified +func (pp *ParamSpec) setDefaultsForProperties() { + for key, propertySpec := range pp.Properties { + if propertySpec.Type == "" { + pp.Properties[key] = PropertySpec{Type: ParamTypeString} + } + } +} + +// ResourceParam declares a string value to use for the parameter called Name, and is used in +// the specific context of PipelineResources. +type ResourceParam = resource.ResourceParam + +// Param declares an ArrayOrString to use for the parameter called name. +type Param struct { + Name string `json:"name"` + Value ArrayOrString `json:"value"` +} + +// ParamType indicates the type of an input parameter; +// Used to distinguish between a single string and an array of strings. +type ParamType string + +// Valid ParamTypes: +const ( + ParamTypeString ParamType = "string" + ParamTypeArray ParamType = "array" + ParamTypeObject ParamType = "object" +) + +// AllParamTypes can be used for ParamType validation. +var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject} + +// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: + +// ArrayOrString is a type that can hold a single string or string array. +// Used in JSON unmarshalling so that a single JSON field can accept +// either an individual string or an array of strings. +// TODO (@chuangw6): This struct will be renamed or be embedded in a new struct to take into +// consideration the object case after the community reaches an agreement on it. +type ArrayOrString struct { + Type ParamType `json:"type"` // Represents the stored type of ArrayOrString. + StringVal string `json:"stringVal"` + // +listType=atomic + ArrayVal []string `json:"arrayVal"` + ObjectVal map[string]string `json:"objectVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (arrayOrString *ArrayOrString) UnmarshalJSON(value []byte) error { + // ArrayOrString is used for Results Value as well, the results can be any kind of + // data so we need to check if it is empty. + if len(value) == 0 { + arrayOrString.Type = ParamTypeString + return nil + } + if value[0] == '[' { + // We're trying to Unmarshal to []string, but for cases like []int or other types + // of nested array which we don't support yet, we should continue and Unmarshal + // it to String. If the Type being set doesn't match what it actually should be, + // it will be captured by validation in reconciler. + // if failed to unmarshal to array, we will convert the value to string and marshal it to string + var a []string + if err := json.Unmarshal(value, &a); err == nil { + arrayOrString.Type = ParamTypeArray + arrayOrString.ArrayVal = a + return nil + } + } + if value[0] == '{' { + // if failed to unmarshal to map, we will convert the value to string and marshal it to string + var m map[string]string + if err := json.Unmarshal(value, &m); err == nil { + arrayOrString.Type = ParamTypeObject + arrayOrString.ObjectVal = m + return nil + } + } + + // By default we unmarshal to string + arrayOrString.Type = ParamTypeString + if err := json.Unmarshal(value, &arrayOrString.StringVal); err == nil { + return nil + } + arrayOrString.StringVal = string(value) + + return nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { + switch arrayOrString.Type { + case ParamTypeString: + return json.Marshal(arrayOrString.StringVal) + case ParamTypeArray: + return json.Marshal(arrayOrString.ArrayVal) + case ParamTypeObject: + return json.Marshal(arrayOrString.ObjectVal) + default: + return []byte{}, fmt.Errorf("impossible ArrayOrString.Type: %q", arrayOrString.Type) + } +} + +// ApplyReplacements applyes replacements for ArrayOrString type +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + switch arrayOrString.Type { + case ParamTypeArray: + var newArrayVal []string + for _, v := range arrayOrString.ArrayVal { + newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) + } + arrayOrString.ArrayVal = newArrayVal + case ParamTypeObject: + newObjectVal := map[string]string{} + for k, v := range arrayOrString.ObjectVal { + newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) + } + arrayOrString.ObjectVal = newObjectVal + default: + arrayOrString.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) + } +} + +// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result. +// If the value of arrayOrString is a reference to array or object, the type will be corrected from string to array/object. +func (arrayOrString *ArrayOrString) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + stringVal := arrayOrString.StringVal + + // if the stringVal is a string literal or a string that mixed with var references + // just do the normal string replacement + if !exactVariableSubstitutionRegex.MatchString(stringVal) { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + return + } + + // trim the head "$(" and the tail ")" or "[*])" + // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" + trimedStringVal := StripStarVarSubExpression(stringVal) + + // if the stringVal is a reference to a string param + if _, ok := stringReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } + + // if the stringVal is a reference to an array param, we need to change the type other than apply replacement + if _, ok := arrayReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements) + arrayOrString.Type = ParamTypeArray + } + + // if the stringVal is a reference an object param, we need to change the type other than apply replacement + if _, ok := objectReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ObjectVal = objectReplacements[trimedStringVal] + arrayOrString.Type = ParamTypeObject + } +} + +// StripStarVarSubExpression strips "$(target[*])"" to get "target" +func StripStarVarSubExpression(s string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") +} + +// NewArrayOrString creates an ArrayOrString of type ParamTypeString or ParamTypeArray, based on +// how many inputs are given (>1 input will create an array, not string). +func NewArrayOrString(value string, values ...string) *ArrayOrString { + if len(values) > 0 { + return &ArrayOrString{ + Type: ParamTypeArray, + ArrayVal: append([]string{value}, values...), + } + } + return &ArrayOrString{ + Type: ParamTypeString, + StringVal: value, + } +} + +// NewObject creates an ArrayOrString of type ParamTypeObject using the provided key-value pairs +func NewObject(pairs map[string]string) *ArrayOrString { + return &ArrayOrString{ + Type: ParamTypeObject, + ObjectVal: pairs, + } +} + +// ArrayReference returns the name of the parameter from array parameter reference +// returns arrayParam from $(params.arrayParam[*]) +func ArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go new file mode 100644 index 0000000000..0c8cf3337d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds Build types to the scheme. + AddToScheme = schemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Task{}, + &TaskList{}, + ) // TODO(#4983): v1 types go here + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go new file mode 100644 index 0000000000..bb547b2a0f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// ResolverName is the name of a resolver from which a resource can be +// requested. +type ResolverName string + +// ResolverRef can be used to refer to a Pipeline or Task in a remote +// location like a git repo. This feature is in alpha and these fields +// are only available when the alpha feature gate is enabled. +type ResolverRef struct { + // Resolver is the name of the resolver that should perform + // resolution of the referenced Tekton resource, such as "git". + // +optional + Resolver ResolverName `json:"resolver,omitempty"` + // Resource contains the parameters used to identify the + // referenced Tekton resource. Example entries might include + // "repo" or "path" but the set of params ultimately depends on + // the chosen resolver. + // +optional + // +listType=atomic + Resource []ResolverParam `json:"resource,omitempty"` +} + +// ResolverParam is a single parameter passed to a resolver. +type ResolverParam struct { + // Name is the name of the parameter that will be passed to the + // resolver. + Name string `json:"name"` + // Value is the string value of the parameter that will be + // passed to the resolver. + Value string `json:"value"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go similarity index 64% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go index 70e14bb774..9a5020ba12 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go @@ -1,12 +1,9 @@ /* -Copyright 2019 The Tekton Authors - +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,13 +11,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import "context" -// PipelineObject is implemented by Pipeline and ClusterPipeline -type PipelineObject interface { - PipelineMetadata() metav1.ObjectMeta - PipelineSpec() PipelineSpec - Copy() PipelineObject +// SetDefaults set the default type for TaskResult +func (tr *TaskResult) SetDefaults(context.Context) { + if tr != nil && tr.Type == "" { + // ResultsTypeString is the default value + tr.Type = ResultsTypeString + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go new file mode 100644 index 0000000000..daf9abf260 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "strings" + +// TaskResult used to describe the results of a task +type TaskResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Properties is the JSON Schema properties to support key-value pairs results. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + + // Description is a human-readable description of the result + // +optional + Description string `json:"description,omitempty"` +} + +// TaskRunResult used to describe the results of a task +type TaskRunResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Value the given value of the result + Value ArrayOrString `json:"value"` +} + +// ResultsType indicates the type of a result; +// Used to distinguish between a single string and an array of strings. +// Note that there is ResultType used to find out whether a +// PipelineResourceResult is from a task result or not, which is different from +// this ResultsType. +// TODO(#4723): add "array" and "object" support +// TODO(#4723): align ResultsType and ParamType in ArrayOrString +type ResultsType string + +// Valid ResultsType: +const ( + ResultsTypeString ResultsType = "string" + ResultsTypeArray ResultsType = "array" + ResultsTypeObject ResultsType = "object" +) + +// AllResultsTypes can be used for ResultsTypes validation. +var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject} + +// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*]) +func ResultsArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go new file mode 100644 index 0000000000..de17c84718 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "regexp" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" + "knative.dev/pkg/apis" +) + +// ResultNameFormat Constant used to define the the regex Result.Name should follow +const ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` + +var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) + +// Validate implements apis.Validatable +func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { + if !resultNameFormatRegex.MatchString(tr.Name) { + return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) + } + // Array and Object are alpha features + if tr.Type == ResultsTypeArray || tr.Type == ResultsTypeObject { + return errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + } + + // Resources created before the result. Type was introduced may not have Type set + // and should be considered valid + if tr.Type == "" { + return nil + } + + // By default the result type is string + if tr.Type != ResultsTypeString { + return apis.ErrInvalidValue(tr.Type, "type", fmt.Sprintf("type must be string")) + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json new file mode 100644 index 0000000000..87ba8b238f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -0,0 +1,956 @@ +{ + "swagger": "2.0", + "info": { + "description": "Tekton Pipeline", + "title": "Tekton", + "version": "v0.17.2" + }, + "paths": {}, + "definitions": { + "pod.AffinityAssistantTemplate": { + "description": "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template", + "type": "object", + "properties": { + "imagePullSecrets": { + "description": "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.LocalObjectReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Toleration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "pod.Template": { + "description": "Template holds pod specific configuration", + "type": "object", + "properties": { + "affinity": { + "description": "If specified, the pod's scheduling constraints", + "$ref": "#/definitions/v1.Affinity" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.", + "type": "boolean" + }, + "dnsConfig": { + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "$ref": "#/definitions/v1.PodDNSConfig" + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.", + "type": "string" + }, + "enableServiceLinks": { + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "type": "boolean" + }, + "hostAliases": { + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.HostAlias" + }, + "x-kubernetes-list-type": "atomic" + }, + "hostNetwork": { + "description": "HostNetwork specifies whether the pod may use the node network namespace", + "type": "boolean" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.LocalObjectReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "priorityClassName": { + "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "type": "string" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + "type": "string" + }, + "schedulerName": { + "description": "SchedulerName specifies the scheduler to be used to dispatch the Pod", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "$ref": "#/definitions/v1.PodSecurityContext" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Toleration" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumes": { + "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Volume" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + } + } + }, + "v1.ArrayOrString": { + "description": "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", + "type": "object", + "required": [ + "type", + "stringVal", + "arrayVal", + "objectVal" + ], + "properties": { + "arrayVal": { + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "objectVal": { + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "stringVal": { + "description": "Represents the stored type of ArrayOrString.", + "type": "string", + "default": "" + }, + "type": { + "type": "string", + "default": "" + } + } + }, + "v1.Param": { + "description": "Param declares an ArrayOrString to use for the parameter called name.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "value": { + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, + "v1.ParamSpec": { + "description": "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "default": { + "description": "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + "$ref": "#/definitions/v1.ArrayOrString" + }, + "description": { + "description": "Description is a user-facing description of the parameter that may be used to populate a UI.", + "type": "string" + }, + "name": { + "description": "Name declares the name by which a parameter is referenced.", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs parameter.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + "type": "string" + } + } + }, + "v1.PipelineWorkspaceDeclaration": { + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + "type": "string" + }, + "name": { + "description": "Name is the name of a workspace to be provided by a PipelineRun.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + } + } + }, + "v1.PropertySpec": { + "description": "PropertySpec defines the struct for object keys", + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, + "v1.ResolverParam": { + "description": "ResolverParam is a single parameter passed to a resolver.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name is the name of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the string value of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + } + } + }, + "v1.ResolverRef": { + "description": "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + "type": "object", + "properties": { + "resolver": { + "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + "type": "string" + }, + "resource": { + "description": "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ResolverParam" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Sidecar": { + "description": "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "$ref": "#/definitions/v1.Lifecycle" + }, + "livenessProbe": { + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "ports": { + "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "startupProbe": { + "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Step": { + "description": "Step runs a subcomponent of a Task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "onError": { + "description": "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "stderrConfig": { + "description": "Stores configuration for the stderr stream of the step.", + "$ref": "#/definitions/v1.StepOutputConfig" + }, + "stdoutConfig": { + "description": "Stores configuration for the stdout stream of the step.", + "$ref": "#/definitions/v1.StepOutputConfig" + }, + "timeout": { + "description": "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "$ref": "#/definitions/v1.Duration" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.StepOutputConfig": { + "description": "StepOutputConfig stores configuration for a step output stream.", + "type": "object", + "properties": { + "path": { + "description": "Path to duplicate stdout stream to on container's local filesystem.", + "type": "string" + } + } + }, + "v1.StepTemplate": { + "description": "StepTemplate is a template for a Step", + "type": "object", + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, + "v1.Task": { + "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds the desired state of the Task from the client", + "default": {}, + "$ref": "#/definitions/v1.TaskSpec" + } + } + }, + "v1.TaskList": { + "description": "TaskList contains a list of Task", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Task" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, + "v1.TaskResult": { + "description": "TaskResult used to describe the results of a task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human-readable description of the result", + "type": "string" + }, + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs results.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + } + } + }, + "v1.TaskRunResult": { + "description": "TaskRunResult used to describe the results of a task", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + }, + "value": { + "description": "Value the given value of the result", + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, + "v1.TaskSpec": { + "description": "TaskSpec defines the desired state of Task.", + "type": "object", + "properties": { + "description": { + "description": "Description is a user-facing description of the task that may be used to populate a UI.", + "type": "string" + }, + "params": { + "description": "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ParamSpec" + }, + "x-kubernetes-list-type": "atomic" + }, + "results": { + "description": "Results are values that this Task can output", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "sidecars": { + "description": "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Sidecar" + }, + "x-kubernetes-list-type": "atomic" + }, + "stepTemplate": { + "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + "$ref": "#/definitions/v1.StepTemplate" + }, + "steps": { + "description": "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Step" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumes": { + "description": "Volumes is a collection of volumes that are available to mount into the steps of the build.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Volume" + }, + "x-kubernetes-list-type": "atomic" + }, + "workspaces": { + "description": "Workspaces are the volumes that this Task requires.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceDeclaration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.WorkspaceBinding": { + "description": "WorkspaceBinding maps a Task's declared workspace to a Volume.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "configMap": { + "description": "ConfigMap represents a configMap that should populate this workspace.", + "$ref": "#/definitions/v1.ConfigMapVolumeSource" + }, + "emptyDir": { + "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + "$ref": "#/definitions/v1.EmptyDirVolumeSource" + }, + "name": { + "description": "Name is the name of the workspace populated by the volume.", + "type": "string", + "default": "" + }, + "persistentVolumeClaim": { + "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" + }, + "secret": { + "description": "Secret represents a secret that should populate this workspace.", + "$ref": "#/definitions/v1.SecretVolumeSource" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "volumeClaimTemplate": { + "description": "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + "$ref": "#/definitions/v1.PersistentVolumeClaim" + } + } + }, + "v1.WorkspaceDeclaration": { + "description": "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is an optional human readable description of this volume.", + "type": "string" + }, + "mountPath": { + "description": "MountPath overrides the directory that the volume will be made available at.", + "type": "string" + }, + "name": { + "description": "Name is the name by which you can bind the volume at runtime.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + }, + "readOnly": { + "description": "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + "type": "boolean" + } + } + }, + "v1.WorkspacePipelineTaskBinding": { + "description": "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name is the name of the workspace as declared by the task", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "workspace": { + "description": "Workspace is the name of the workspace declared by the pipeline", + "type": "string" + } + } + }, + "v1.WorkspaceUsage": { + "description": "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + "type": "object", + "required": [ + "name", + "mountPath" + ], + "properties": { + "mountPath": { + "description": "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + "type": "string", + "default": "" + }, + "name": { + "description": "Name is the name of the workspace this Step or Sidecar wants access to.", + "type": "string", + "default": "" + } + } + } + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go similarity index 76% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go index d61bc7b7cf..77a38425f2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( "context" @@ -34,14 +34,7 @@ func (ts *TaskSpec) SetDefaults(ctx context.Context) { for i := range ts.Params { ts.Params[i].SetDefaults(ctx) } - if ts.Inputs != nil { - ts.Inputs.SetDefaults(ctx) - } -} - -// SetDefaults implements apis.Defaultable -func (inputs *Inputs) SetDefaults(ctx context.Context) { - for i := range inputs.Params { - inputs.Params[i].SetDefaults(ctx) + for i := range ts.Results { + ts.Results[i].SetDefaults(ctx) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go new file mode 100644 index 0000000000..4283e8119c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go @@ -0,0 +1,105 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/kmeta" +) + +// +genclient +// +genclient:noStatus +// +genreconciler:krshapedlogic=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Task represents a collection of sequential steps that are run as part of a +// Pipeline using a set of inputs and producing a set of outputs. Tasks execute +// when TaskRuns are created that provide the input parameters and resources and +// output resources the Task requires. +// +// +k8s:openapi-gen=true +type Task struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata"` + + // Spec holds the desired state of the Task from the client + // +optional + Spec TaskSpec `json:"spec"` +} + +var _ kmeta.OwnerRefable = (*Task)(nil) + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*Task) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind(pipeline.TaskControllerName) +} + +// TaskSpec defines the desired state of Task. +type TaskSpec struct { + + // Params is a list of input parameters required to run the task. Params + // must be supplied as inputs in TaskRuns unless they declare a default + // value. + // +optional + // +listType=atomic + Params []ParamSpec `json:"params,omitempty"` + + // Description is a user-facing description of the task that may be + // used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + + // Steps are the steps of the build; each step is run sequentially with the + // source mounted into /workspace. + // +listType=atomic + Steps []Step `json:"steps,omitempty"` + + // Volumes is a collection of volumes that are available to mount into the + // steps of the build. + // +listType=atomic + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // StepTemplate can be used as the basis for all step containers within the + // Task, so that the steps inherit settings on the base container. + StepTemplate *StepTemplate `json:"stepTemplate,omitempty"` + + // Sidecars are run alongside the Task's step containers. They begin before + // the steps start and end after the steps complete. + // +listType=atomic + Sidecars []Sidecar `json:"sidecars,omitempty"` + + // Workspaces are the volumes that this Task requires. + // +listType=atomic + Workspaces []WorkspaceDeclaration `json:"workspaces,omitempty"` + + // Results are values that this Task can output + // +listType=atomic + Results []TaskResult `json:"results,omitempty"` +} + +// TaskList contains a list of Task +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TaskList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []Task `json:"items"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go new file mode 100644 index 0000000000..10d4470047 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -0,0 +1,600 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + "github.com/tektoncd/pipeline/pkg/list" + "github.com/tektoncd/pipeline/pkg/substitution" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" +) + +const ( + // stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules. + // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) + // - Must begin with a letter or an underscore (_) + stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" + + // objectVariableNameFormat is the regext used to validate object name and key names format + // The difference with the array or string name format is that object variable names shouldn't contain dots. + objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$" +) + +var _ apis.Validatable = (*Task)(nil) +var stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat) +var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) + +// Validate implements apis.Validatable +func (t *Task) Validate(ctx context.Context) *apis.FieldError { + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") + if apis.IsInDelete(ctx) { + return nil + } + return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate implements apis.Validatable +func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + if len(ts.Steps) == 0 { + errs = errs.Also(apis.ErrMissingField("steps")) + } + + if config.IsSubstituted(ctx) { + // Validate the task's workspaces only. + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + + return errs + } + + errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) + if err != nil { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("error merging step template and steps: %s", err), + Paths: []string{"stepTemplate"}, + Details: err.Error(), + }) + } + + errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps")) + errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params)) + errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps)) + errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) + return errs +} + +func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) { + for index, result := range results { + errs = errs.Also(result.Validate(ctx).ViaIndex(index)) + } + return errs +} + +// a mount path which conflicts with any other declared workspaces, with the explicitly +// declared volume mounts, or with the stepTemplate. The names must also be unique. +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) { + mountPaths := sets.NewString() + for _, step := range steps { + for _, vm := range step.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + if stepTemplate != nil { + for _, vm := range stepTemplate.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + + wsNames := sets.NewString() + for idx, w := range workspaces { + // Workspace names must be unique + if wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx)) + } else { + wsNames.Insert(w.Name) + } + // Workspaces must not try to use mount paths that are already used + mountPath := filepath.Clean(w.GetMountPath()) + if _, ok := mountPaths[mountPath]; ok { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx)) + } + mountPaths[mountPath] = struct{}{} + } + return errs +} + +// validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps +// refer to workspaces that are defined in the Task. +// +// This is an alpha feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "alpha". +func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { + workspaces := ts.Workspaces + steps := ts.Steps + sidecars := ts.Sidecars + + wsNames := sets.NewString() + for _, w := range workspaces { + wsNames.Insert(w.Name) + } + + for stepIdx, step := range steps { + if len(step.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + } + for workspaceIdx, w := range step.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(stepIdx).ViaField("steps")) + } + } + } + + for sidecarIdx, sidecar := range sidecars { + if len(sidecar.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + } + for workspaceIdx, w := range sidecar.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(sidecarIdx).ViaField("sidecars")) + } + } + } + + return errs +} + +// ValidateVolumes validates a slice of volumes to make sure there are no dupilcate names +func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) { + // Task must not have duplicate volume names. + vols := sets.NewString() + for idx, v := range volumes { + if vols.Has(v.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx)) + } else { + vols.Insert(v.Name) + } + } + return errs +} + +func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) { + // Task must not have duplicate step names. + names := sets.NewString() + for idx, s := range steps { + errs = errs.Also(validateStep(ctx, s, names).ViaIndex(idx)) + } + return errs +} + +func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) { + if s.Image == "" { + errs = errs.Also(apis.ErrMissingField("Image")) + } + + if s.Script != "" { + if len(s.Command) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: "script cannot be used with command", + Paths: []string{"script"}, + }) + } + } + + if s.Name != "" { + if names.Has(s.Name) { + errs = errs.Also(apis.ErrInvalidValue(s.Name, "name")) + } + if e := validation.IsDNS1123Label(s.Name); len(e) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", s.Name), + Paths: []string{"name"}, + Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }) + } + names.Insert(s.Name) + } + + if s.Timeout != nil { + if s.Timeout.Duration < time.Duration(0) { + return apis.ErrInvalidValue(s.Timeout.Duration, "negative timeout") + } + } + + for j, vm := range s.VolumeMounts { + if strings.HasPrefix(vm.MountPath, "/tekton/") && + !strings.HasPrefix(vm.MountPath, "/tekton/home") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j)) + } + if strings.HasPrefix(vm.Name, "tekton-internal-") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j)) + } + } + + if s.OnError != "" { + if s.OnError != "continue" && s.OnError != "stopAndFail" { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value: %v", s.OnError), + Paths: []string{"onError"}, + Details: "Task step onError must be either continue or stopAndFail", + }) + } + } + + if s.Script != "" { + cleaned := strings.TrimSpace(s.Script) + if strings.HasPrefix(cleaned, "#!win") { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) + } + } + // StdoutConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StdoutConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig")) + } + // StderrConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StderrConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig")) + } + return errs +} + +// ValidateParameterTypes validates all the types within a slice of ParamSpecs +func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { + for _, p := range params { + if p.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } + errs = errs.Also(p.ValidateType()) + } + return errs +} + +// ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type +func (p ParamSpec) ValidateType() *apis.FieldError { + // Ensure param has a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true + } + } + if !validType { + return apis.ErrInvalidValue(p.Type, fmt.Sprintf("%s.type", p.Name)) + } + + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("%s.type", p.Name), + fmt.Sprintf("%s.default.type", p.Name), + }, + } + } + + // Check object type and its PropertySpec type + return p.ValidateObjectType() +} + +// ValidateObjectType checks that object type parameter does not miss the +// definition of `properties` section and the type of a PropertySpec is allowed. +// (Currently, only string is allowed) +func (p ParamSpec) ValidateObjectType() *apis.FieldError { + if p.Type == ParamTypeObject && p.Properties == nil { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + + invalidKeys := []string{} + for key, propertySpec := range p.Properties { + if propertySpec.Type != ParamTypeString { + invalidKeys = append(invalidKeys, key) + } + } + + if len(invalidKeys) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys), + Paths: []string{fmt.Sprintf("%s.properties", p.Name)}, + } + } + + return nil +} + +// ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps +func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { + allParameterNames := sets.NewString() + stringParameterNames := sets.NewString() + arrayParameterNames := sets.NewString() + objectParamSpecs := []ParamSpec{} + var errs *apis.FieldError + for _, p := range params { + // validate no duplicate names + if allParameterNames.Has(p.Name) { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) + } + allParameterNames.Insert(p.Name) + + switch p.Type { + case ParamTypeArray: + arrayParameterNames.Insert(p.Name) + case ParamTypeObject: + objectParamSpecs = append(objectParamSpecs, p) + default: + stringParameterNames.Insert(p.Name) + } + } + + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) + errs = errs.Also(validateObjectDefault(objectParamSpecs)) + return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) +} + +func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { + taskRunContextNames := sets.NewString().Insert( + "name", + "namespace", + "uid", + ) + taskContextNames := sets.NewString().Insert( + "name", + "retry-count", + ) + errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames) + return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames)) +} + +// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object +func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) { + objectParameterNames := sets.NewString() + for _, p := range params { + // collect all names of object type params + objectParameterNames.Insert(p.Name) + + // collect all keys for this object param + objectKeys := sets.NewString() + for key := range p.Properties { + objectKeys.Insert(key) + } + + // check if the object's key names are referenced correctly i.e. param.objectParam.key1 + errs = errs.Also(validateVariables(ctx, steps, fmt.Sprintf("params\\.%s", p.Name), objectKeys)) + } + + return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) +} + +// validateObjectDefault validates the keys of all the object params within a +// slice of ParamSpecs are provided in default iff the default section is provided. +func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { + for _, p := range objectParams { + errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) + } + return errs +} + +// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. +func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { + if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { + return nil + } + + neededKeys := []string{} + providedKeys := []string{} + + // collect all needed keys + for key := range properties { + neededKeys = append(neededKeys, key) + } + + // collect all provided keys + for key := range propertiesProvider.ObjectVal { + providedKeys = append(providedKeys, key) + } + + missings := list.DiffLeft(neededKeys, providedKeys) + if len(missings) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), + Paths: []string{"properties", "default"}, + } + } + + return nil +} + +// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings +// i.e. param.objectParam, param.objectParam[*] +func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + // We've checked param name format. Now, we want to check if param names are referenced correctly in each step + for idx, step := range steps { + errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +// validateNameFormat validates that the name format of all param types follows the rules +func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) { + // checking string or array name format + // ---- + invalidStringAndArrayNames := []string{} + // Converting to sorted list here rather than just looping map keys + // because we want the order of items in vars to be deterministic for purpose of unit testing + for _, name := range stringAndArrayParams.List() { + if !stringAndArrayVariableNameFormatRegex.MatchString(name) { + invalidStringAndArrayNames = append(invalidStringAndArrayNames, name) + } + } + + if len(invalidStringAndArrayNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames), + Paths: []string{"params"}, + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }) + } + + // checking object name and key name format + // ----- + invalidObjectNames := map[string][]string{} + for _, obj := range objectParams { + // check object param name + if !objectVariableNameFormatRegex.MatchString(obj.Name) { + invalidObjectNames[obj.Name] = []string{} + } + + // check key names + for k := range obj.Properties { + if !objectVariableNameFormatRegex.MatchString(k) { + invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k) + } + } + } + + if len(invalidObjectNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }) + } + + return errs +} + +func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) + if !(config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" && prefix == "params") { + errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + } + for i, cmd := range step.Command { + errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + } + for _, env := range step.Env { + errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { + return substitution.ValidateVariableP(value, prefix, vars) +} + +func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { + return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) +} + +func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) +} + +func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go new file mode 100644 index 0000000000..da89660624 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go @@ -0,0 +1,124 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "path/filepath" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" +) + +// WorkspaceDeclaration is a declaration of a volume that a Task requires. +type WorkspaceDeclaration struct { + // Name is the name by which you can bind the volume at runtime. + Name string `json:"name"` + // Description is an optional human readable description of this volume. + // +optional + Description string `json:"description,omitempty"` + // MountPath overrides the directory that the volume will be made available at. + // +optional + MountPath string `json:"mountPath,omitempty"` + // ReadOnly dictates whether a mounted volume is writable. By default this + // field is false and so mounted volumes are writable. + ReadOnly bool `json:"readOnly,omitempty"` + // Optional marks a Workspace as not being required in TaskRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// GetMountPath returns the mountPath for w which is the MountPath if provided or the +// default if not. +func (w *WorkspaceDeclaration) GetMountPath() string { + if w.MountPath != "" { + return w.MountPath + } + return filepath.Join(pipeline.WorkspaceDir, w.Name) +} + +// WorkspaceBinding maps a Task's declared workspace to a Volume. +type WorkspaceBinding struct { + // Name is the name of the workspace populated by the volume. + Name string `json:"name"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` + // VolumeClaimTemplate is a template for a claim that will be created in the same namespace. + // The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. + // +optional + VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used. + // +optional + PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // EmptyDir represents a temporary directory that shares a Task's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // Either this OR PersistentVolumeClaim can be used. + // +optional + EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // ConfigMap represents a configMap that should populate this workspace. + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` + // Secret represents a secret that should populate this workspace. + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` +} + +// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +// Deprecated: use PipelineWorkspaceDeclaration type instead +type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration + +// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +type PipelineWorkspaceDeclaration struct { + // Name is the name of a workspace to be provided by a PipelineRun. + Name string `json:"name"` + // Description is a human readable string describing how the workspace will be + // used in the Pipeline. It can be useful to include a bit of detail about which + // tasks are intended to have access to the data on the workspace. + // +optional + Description string `json:"description,omitempty"` + // Optional marks a Workspace as not being required in PipelineRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be +// mapped to a task's declared workspace. +type WorkspacePipelineTaskBinding struct { + // Name is the name of the workspace as declared by the task + Name string `json:"name"` + // Workspace is the name of the workspace declared by the pipeline + // +optional + Workspace string `json:"workspace,omitempty"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` +} + +// WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access +// to a Workspace defined in a Task. +type WorkspaceUsage struct { + // Name is the name of the workspace this Step or Sidecar wants access to. + Name string `json:"name"` + // MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, + // overriding any MountPath specified in the Task's WorkspaceDeclaration. + MountPath string `json:"mountPath"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go new file mode 100644 index 0000000000..be852bb46d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" +) + +// allVolumeSourceFields is a list of all the volume source field paths that a +// WorkspaceBinding may include. +var allVolumeSourceFields = []string{ + "persistentvolumeclaim", + "volumeclaimtemplate", + "emptydir", + "configmap", + "secret", +} + +// Validate looks at the Volume provided in wb and makes sure that it is valid. +// This means that only one VolumeSource can be specified, and also that the +// supported VolumeSource is itself valid. +func (b *WorkspaceBinding) Validate(context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { + return apis.ErrMissingField(apis.CurrentField) + } + + numSources := b.numSources() + + if numSources > 1 { + return apis.ErrMultipleOneOf(allVolumeSourceFields...) + } + + if numSources == 0 { + return apis.ErrMissingOneOf(allVolumeSourceFields...) + } + + // For a PersistentVolumeClaim to work, you must at least provide the name of the PVC to use. + if b.PersistentVolumeClaim != nil && b.PersistentVolumeClaim.ClaimName == "" { + return apis.ErrMissingField("persistentvolumeclaim.claimname") + } + + // For a ConfigMap to work, you must provide the name of the ConfigMap to use. + if b.ConfigMap != nil && b.ConfigMap.LocalObjectReference.Name == "" { + return apis.ErrMissingField("configmap.name") + } + + // For a Secret to work, you must provide the name of the Secret to use. + if b.Secret != nil && b.Secret.SecretName == "" { + return apis.ErrMissingField("secret.secretName") + } + + return nil +} + +// numSources returns the total number of volume sources that this WorkspaceBinding +// has been configured with. +func (b *WorkspaceBinding) numSources() int { + n := 0 + if b.VolumeClaimTemplate != nil { + n++ + } + if b.PersistentVolumeClaim != nil { + n++ + } + if b.EmptyDir != nil { + n++ + } + if b.ConfigMap != nil { + n++ + } + if b.Secret != nil { + n++ + } + return n +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f618a68748 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go @@ -0,0 +1,660 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArrayOrString) DeepCopyInto(out *ArrayOrString) { + *out = *in + if in.ArrayVal != nil { + in, out := &in.ArrayVal, &out.ArrayVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectVal != nil { + in, out := &in.ObjectVal, &out.ObjectVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayOrString. +func (in *ArrayOrString) DeepCopy() *ArrayOrString { + if in == nil { + return nil + } + out := new(ArrayOrString) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Param) DeepCopyInto(out *Param) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Param. +func (in *Param) DeepCopy() *Param { + if in == nil { + return nil + } + out := new(Param) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamSpec) DeepCopyInto(out *ParamSpec) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(ArrayOrString) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpec. +func (in *ParamSpec) DeepCopy() *ParamSpec { + if in == nil { + return nil + } + out := new(ParamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration. +func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration { + if in == nil { + return nil + } + out := new(PipelineWorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. +func (in *PropertySpec) DeepCopy() *PropertySpec { + if in == nil { + return nil + } + out := new(PropertySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParam. +func (in *ResolverParam) DeepCopy() *ResolverParam { + if in == nil { + return nil + } + out := new(ResolverParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { + *out = *in + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = make([]ResolverParam, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRef. +func (in *ResolverRef) DeepCopy() *ResolverRef { + if in == nil { + return nil + } + out := new(ResolverRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(corev1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + if in.StdoutConfig != nil { + in, out := &in.StdoutConfig, &out.StdoutConfig + *out = new(StepOutputConfig) + **out = **in + } + if in.StderrConfig != nil { + in, out := &in.StderrConfig, &out.StderrConfig + *out = new(StepOutputConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig. +func (in *StepOutputConfig) DeepCopy() *StepOutputConfig { + if in == nil { + return nil + } + out := new(StepOutputConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate. +func (in *StepTemplate) DeepCopy() *StepTemplate { + if in == nil { + return nil + } + out := new(StepTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Task) DeepCopyInto(out *Task) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. +func (in *Task) DeepCopy() *Task { + if in == nil { + return nil + } + out := new(Task) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Task) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskList) DeepCopyInto(out *TaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Task, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. +func (in *TaskList) DeepCopy() *TaskList { + if in == nil { + return nil + } + out := new(TaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskResult) DeepCopyInto(out *TaskResult) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResult. +func (in *TaskResult) DeepCopy() *TaskResult { + if in == nil { + return nil + } + out := new(TaskResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResult. +func (in *TaskRunResult) DeepCopy() *TaskRunResult { + if in == nil { + return nil + } + out := new(TaskRunResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ParamSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepTemplate != nil { + in, out := &in.StepTemplate, &out.StepTemplate + *out = new(StepTemplate) + (*in).DeepCopyInto(*out) + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceDeclaration, len(*in)) + copy(*out, *in) + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]TaskResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { + *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(corev1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceBinding. +func (in *WorkspaceBinding) DeepCopy() *WorkspaceBinding { + if in == nil { + return nil + } + out := new(WorkspaceBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceDeclaration) DeepCopyInto(out *WorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceDeclaration. +func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration { + if in == nil { + return nil + } + out := new(WorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding. +func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding { + if in == nil { + return nil + } + out := new(WorkspacePipelineTaskBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceUsage) DeepCopyInto(out *WorkspaceUsage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceUsage. +func (in *WorkspaceUsage) DeepCopy() *WorkspaceUsage { + if in == nil { + return nil + } + out := new(WorkspaceUsage) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go deleted file mode 100644 index 976add0d50..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*ClusterTask)(nil) - -// ConvertTo implements api.Convertible -func (ct *ClusterTask) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.ClusterTask: - sink.ObjectMeta = ct.ObjectMeta - return ct.Spec.ConvertTo(ctx, &sink.Spec) - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertFrom implements api.Convertible -func (ct *ClusterTask) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.ClusterTask: - ct.ObjectMeta = source.ObjectMeta - return ct.Spec.ConvertFrom(ctx, &source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", ct) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go deleted file mode 100644 index 1b2ebd2785..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:noStatus -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterTask is a Task with a cluster scope. ClusterTasks are used to -// represent Tasks that should be publicly addressable from any namespace in the -// cluster. -type ClusterTask struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the desired state of the Task from the client - // +optional - Spec TaskSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterTaskList contains a list of ClusterTask. -type ClusterTaskList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []ClusterTask `json:"items"` -} - -// TaskSpec returns the ClusterTask's Spec. -func (t *ClusterTask) TaskSpec() TaskSpec { - return t.Spec -} - -// TaskMetadata returns the ObjectMeta for the ClusterTask. -func (t *ClusterTask) TaskMetadata() metav1.ObjectMeta { - return t.ObjectMeta -} - -// Copy returns a DeepCopy of the ClusterTask. -func (t *ClusterTask) Copy() TaskObject { - return t.DeepCopy() -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go deleted file mode 100644 index 77adb5ec12..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*ClusterTask)(nil) - -// Validate performs validation of the metadata and spec of this ClusterTask. -func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return t.Spec.Validate(ctx) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go deleted file mode 100644 index ad4ab611d6..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019-2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Condition declares a step that is used to gate the execution of a Task in a Pipeline. -// A condition execution (ConditionCheck) evaluates to either true or false -// +k8s:openapi-gen=true -type Condition struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata"` - - // Spec holds the desired state of the Condition from the client - // +optional - Spec ConditionSpec `json:"spec"` -} - -// ConditionCheckStatus defines the observed state of ConditionCheck -type ConditionCheckStatus = v1beta1.ConditionCheckStatus - -// ConditionCheckStatusFields holds the fields of ConfigurationCheck's status. -// This is defined separately and inlined so that other types can readily -// consume these fields via duck typing. -type ConditionCheckStatusFields = v1beta1.ConditionCheckStatusFields - -// ConditionSpec defines the desired state of the Condition -type ConditionSpec struct { - // Check declares container whose exit code determines where a condition is true or false - Check Step `json:"check,omitempty"` - - // Description is a user-facing description of the condition that may be - // used to populate a UI. - // +optional - Description string `json:"description,omitempty"` - - // Params is an optional set of parameters which must be supplied by the user when a Condition - // is evaluated - // +optional - Params []ParamSpec `json:"params,omitempty"` - - // Resources is a list of the ConditionResources required to run the condition. - // +optional - Resources []ResourceDeclaration `json:"resources,omitempty"` -} - -// ConditionCheck represents a single evaluation of a Condition step. -type ConditionCheck TaskRun - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConditionList contains a list of Conditions -type ConditionList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Condition `json:"items"` -} - -// NewConditionCheck creates a new ConditionCheck from a given TaskRun. -func NewConditionCheck(tr *TaskRun) *ConditionCheck { - if tr == nil { - return nil - } - - cc := ConditionCheck(*tr) - return &cc -} - -// IsDone returns true if the ConditionCheck's status indicates that it is done. -func (cc *ConditionCheck) IsDone() bool { - return !cc.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// IsSuccessful returns true if the ConditionCheck's status indicates that it is done. -func (cc *ConditionCheck) IsSuccessful() bool { - return cc.Status.GetCondition(apis.ConditionSucceeded).IsTrue() -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go deleted file mode 100644 index daa8b75fc8..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*Condition)(nil) - -// Validate performs validation on the Condition's metadata and spec -func (c Condition) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(c.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return c.Spec.Validate(ctx).ViaField("Spec") -} - -// Validate makes sure the ConditionSpec is actually configured and that its name is a valid DNS label, -// and finally validates its steps. -func (cs *ConditionSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(cs, ConditionSpec{}) { - return apis.ErrMissingField(apis.CurrentField) - } - - // Validate condition check name - if errs := validation.IsDNS1123Label(cs.Check.Name); cs.Check.Name != "" && len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", cs.Check.Name), - Paths: []string{"Check.name"}, - Details: "Condition check name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - - return validateSteps([]Step{cs.Check}).ViaField("Check") -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go deleted file mode 100644 index 63606afc6a..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" - corev1 "k8s.io/api/core/v1" -) - -// ApplyContainerReplacements replaces ${...} expressions in the container's name, image, args, env, command, workingDir, -// and volumes. -func ApplyContainerReplacements(step *corev1.Container, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Name = substitution.ApplyReplacements(step.Name, stringReplacements) - step.Image = substitution.ApplyReplacements(step.Image, stringReplacements) - - // Use ApplyArrayReplacements here, as additional args may be added via an array parameter. - var newArgs []string - for _, a := range step.Args { - newArgs = append(newArgs, substitution.ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...) - } - step.Args = newArgs - - for ie, e := range step.Env { - step.Env[ie].Value = substitution.ApplyReplacements(e.Value, stringReplacements) - if step.Env[ie].ValueFrom != nil { - if e.ValueFrom.SecretKeyRef != nil { - step.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements) - } - if e.ValueFrom.ConfigMapKeyRef != nil { - step.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements) - } - } - } - - for ie, e := range step.EnvFrom { - step.EnvFrom[ie].Prefix = substitution.ApplyReplacements(e.Prefix, stringReplacements) - if e.ConfigMapRef != nil { - step.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements) - } - if e.SecretRef != nil { - step.EnvFrom[ie].SecretRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements) - } - } - step.WorkingDir = substitution.ApplyReplacements(step.WorkingDir, stringReplacements) - - // Use ApplyArrayReplacements here, as additional commands may be added via an array parameter. - var newCommand []string - for _, c := range step.Command { - newCommand = append(newCommand, substitution.ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...) - } - step.Command = newCommand - - for iv, v := range step.VolumeMounts { - step.VolumeMounts[iv].Name = substitution.ApplyReplacements(v.Name, stringReplacements) - step.VolumeMounts[iv].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements) - step.VolumeMounts[iv].SubPath = substitution.ApplyReplacements(v.SubPath, stringReplacements) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go deleted file mode 100644 index 2ebdba1a1f..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2020 The Tekton Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -const ( - // ConditionTypeConvertible is a Warning condition that is set on - // resources when they cannot be converted to warn of a forthcoming - // breakage. - ConditionTypeConvertible apis.ConditionType = v1beta1.ConditionTypeConvertible - // ConversionErrorFieldNotAvailableMsg Conversion Error message for a field not available in v1alpha1 - ConversionErrorFieldNotAvailableMsg = "the specified field/section is not available in v1alpha1" -) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go deleted file mode 100644 index 69b21947c6..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as -// resources). Parameter values are provided by users as inputs on a TaskRun -// or PipelineRun. -type ParamSpec = v1beta1.ParamSpec - -// Param declares an ArrayOrString to use for the parameter called name. -type Param = v1beta1.Param - -// ParamType indicates the type of an input parameter; -// Used to distinguish between a single string and an array of strings. -type ParamType = v1beta1.ParamType - -// Valid ParamTypes: -const ( - ParamTypeString ParamType = v1beta1.ParamTypeString - ParamTypeArray ParamType = v1beta1.ParamTypeArray -) - -// AllParamTypes can be used for ParamType validation. -var AllParamTypes = v1beta1.AllParamTypes - -// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: - -// ArrayOrString is a type that can hold a single string or string array. -// Used in JSON unmarshalling so that a single JSON field can accept -// either an individual string or an array of strings. -type ArrayOrString = v1beta1.ArrayOrString diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go deleted file mode 100644 index 06b5e60a99..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -const finallyAnnotationKey = "tekton.dev/v1beta1Finally" - -var _ apis.Convertible = (*Pipeline)(nil) - -// ConvertTo implements api.Convertible -func (p *Pipeline) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.Pipeline: - sink.ObjectMeta = p.ObjectMeta - if err := p.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - if err := deserializeFinally(&sink.ObjectMeta, &sink.Spec); err != nil { - return err - } - if err := v1beta1.ValidatePipelineTasks(ctx, sink.Spec.Tasks, sink.Spec.Finally); err != nil { - return fmt.Errorf("error converting finally annotation into beta field: %w", err) - } - default: - return fmt.Errorf("unknown version, got: %T", sink) - } - return nil -} - -// ConvertTo implements api.Convertible -func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1beta1.PipelineSpec) error { - sink.Resources = ps.Resources - sink.Params = ps.Params - sink.Workspaces = ps.Workspaces - sink.Description = ps.Description - if len(ps.Tasks) > 0 { - sink.Tasks = make([]v1beta1.PipelineTask, len(ps.Tasks)) - for i := range ps.Tasks { - if err := ps.Tasks[i].ConvertTo(ctx, &sink.Tasks[i]); err != nil { - return err - } - } - } - sink.Finally = nil - return nil -} - -// ConvertTo implements api.Convertible -func (pt *PipelineTask) ConvertTo(ctx context.Context, sink *v1beta1.PipelineTask) error { - sink.Name = pt.Name - sink.TaskRef = pt.TaskRef - if pt.TaskSpec != nil { - sink.TaskSpec = &v1beta1.EmbeddedTask{TaskSpec: v1beta1.TaskSpec{}} - if err := pt.TaskSpec.ConvertTo(ctx, &sink.TaskSpec.TaskSpec); err != nil { - return err - } - } - sink.Conditions = pt.Conditions - sink.Retries = pt.Retries - sink.RunAfter = pt.RunAfter - sink.Resources = pt.Resources - sink.Params = pt.Params - sink.Workspaces = pt.Workspaces - sink.Timeout = pt.Timeout - return nil -} - -// ConvertFrom implements api.Convertible -func (p *Pipeline) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.Pipeline: - p.ObjectMeta = source.ObjectMeta - if err := serializeFinally(&p.ObjectMeta, source.Spec.Finally); err != nil { - return err - } - return p.Spec.ConvertFrom(ctx, source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", p) - } -} - -// ConvertFrom implements api.Convertible -func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source v1beta1.PipelineSpec) error { - ps.Resources = source.Resources - ps.Params = source.Params - ps.Workspaces = source.Workspaces - ps.Description = source.Description - if len(source.Tasks) > 0 { - ps.Tasks = make([]PipelineTask, len(source.Tasks)) - for i := range source.Tasks { - if err := ps.Tasks[i].ConvertFrom(ctx, source.Tasks[i]); err != nil { - return err - } - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (pt *PipelineTask) ConvertFrom(ctx context.Context, source v1beta1.PipelineTask) error { - pt.Name = source.Name - pt.TaskRef = source.TaskRef - if source.TaskSpec != nil { - pt.TaskSpec = &TaskSpec{} - if err := pt.TaskSpec.ConvertFrom(ctx, &source.TaskSpec.TaskSpec); err != nil { - return err - } - } - pt.Conditions = source.Conditions - pt.Retries = source.Retries - pt.RunAfter = source.RunAfter - pt.Resources = source.Resources - pt.Params = source.Params - pt.Workspaces = source.Workspaces - pt.Timeout = source.Timeout - return nil -} - -// serializeFinally serializes a list of Finally Tasks to the annotations -// of an object's metadata section. This can then be used to re-instantiate -// the Finally Tasks when converting back up to v1beta1 and beyond. -func serializeFinally(meta *metav1.ObjectMeta, finally []v1beta1.PipelineTask) error { - if len(finally) != 0 { - b, err := json.Marshal(finally) - if err != nil { - return err - } - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) - } - meta.Annotations[finallyAnnotationKey] = string(b) - } - return nil -} - -// deserializeFinally populates a PipelineSpec's Finally list -// from an annotation found on resources that have been previously -// converted down from v1beta1 to v1alpha1. -func deserializeFinally(meta *metav1.ObjectMeta, spec *v1beta1.PipelineSpec) error { - if meta.Annotations != nil { - if str, ok := meta.Annotations[finallyAnnotationKey]; ok { - finally := []v1beta1.PipelineTask{} - if err := json.Unmarshal([]byte(str), &finally); err != nil { - return fmt.Errorf("error converting finally annotation into beta field: %w", err) - } - delete(meta.Annotations, finallyAnnotationKey) - if len(meta.Annotations) == 0 { - meta.Annotations = nil - } - spec.Finally = finally - } - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go deleted file mode 100644 index 2bb226c034..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*Pipeline)(nil) - -// SetDefaults sets default values on the Pipeline's Spec -func (p *Pipeline) SetDefaults(ctx context.Context) { - p.Spec.SetDefaults(ctx) -} - -// SetDefaults sets default values for the PipelineSpec's Params and Tasks -func (ps *PipelineSpec) SetDefaults(ctx context.Context) { - for _, pt := range ps.Tasks { - if pt.TaskRef != nil { - if pt.TaskRef.Kind == "" { - pt.TaskRef.Kind = NamespacedTaskKind - } - } - if pt.TaskSpec != nil { - pt.TaskSpec.SetDefaults(ctx) - } - } - for i := range ps.Params { - ps.Params[i].SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go deleted file mode 100644 index d35e41bf1b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" -) - -// PipelineResourceType represents the type of endpoint the pipelineResource is, so that the -// controller will know this pipelineResource should be fetched and optionally what -// additional metatdata should be provided for it. -type PipelineResourceType = resource.PipelineResourceType - -const ( - // PipelineResourceTypeGit indicates that this source is a Git repo. - PipelineResourceTypeGit PipelineResourceType = resource.PipelineResourceTypeGit - - // PipelineResourceTypeStorage indicates that this source is a storage blob resource. - PipelineResourceTypeStorage PipelineResourceType = resource.PipelineResourceTypeStorage - - // PipelineResourceTypeImage indicates that this source is a docker Image. - PipelineResourceTypeImage PipelineResourceType = resource.PipelineResourceTypeImage - - // PipelineResourceTypeCluster indicates that this source is a k8s cluster Image. - PipelineResourceTypeCluster PipelineResourceType = resource.PipelineResourceTypeCluster - - // PipelineResourceTypePullRequest indicates that this source is a SCM Pull Request. - PipelineResourceTypePullRequest PipelineResourceType = resource.PipelineResourceTypePullRequest - - // PipelineResourceTypeCloudEvent indicates that this source is a cloud event URI - PipelineResourceTypeCloudEvent PipelineResourceType = resource.PipelineResourceTypeCloudEvent -) - -// AllResourceTypes can be used for validation to check if a provided Resource type is one of the known types. -var AllResourceTypes = resource.AllResourceTypes - -// PipelineResource describes a resource that is an input to or output from a -// Task. -// -type PipelineResource = resource.PipelineResource - -// PipelineResourceSpec defines an individual resources used in the pipeline. -type PipelineResourceSpec = resource.PipelineResourceSpec - -// SecretParam indicates which secret can be used to populate a field of the resource -type SecretParam = resource.SecretParam - -// ResourceParam declares a string value to use for the parameter called Name, and is used in -// the specific context of PipelineResources. -type ResourceParam = resource.ResourceParam - -// ResourceDeclaration defines an input or output PipelineResource declared as a requirement -// by another type such as a Task or Condition. The Name field will be used to refer to these -// PipelineResources within the type's definition, and when provided as an Input, the Name will be the -// path to the volume mounted containing this PipelineResource as an input (e.g. -// an input Resource named `workspace` will be mounted at `/workspace`). -type ResourceDeclaration = resource.ResourceDeclaration - -// PipelineResourceList contains a list of PipelineResources -type PipelineResourceList = resource.PipelineResourceList diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go deleted file mode 100644 index e02a19d862..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go +++ /dev/null @@ -1,259 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PipelineSpec defines the desired state of Pipeline. -type PipelineSpec struct { - // Description is a user-facing description of the pipeline that may be - // used to populate a UI. - // +optional - Description string `json:"description,omitempty"` - // Resources declares the names and types of the resources given to the - // Pipeline's tasks as inputs and outputs. - Resources []PipelineDeclaredResource `json:"resources,omitempty"` - // Tasks declares the graph of Tasks that execute when this Pipeline is run. - Tasks []PipelineTask `json:"tasks,omitempty"` - // Params declares a list of input parameters that must be supplied when - // this Pipeline is run. - Params []ParamSpec `json:"params,omitempty"` - // Workspaces declares a set of named workspaces that are expected to be - // provided by a PipelineRun. - // +optional - Workspaces []PipelineWorkspaceDeclaration `json:"workspaces,omitempty"` - // Results are values that this pipeline can output once run - // +optional - Results []PipelineResult `json:"results,omitempty"` -} - -// PipelineResult used to describe the results of a pipeline -type PipelineResult = v1beta1.PipelineResult - -// Check that Pipeline may be validated and defaulted. - -// TaskKind defines the type of Task used by the pipeline. -type TaskKind = v1beta1.TaskKind - -const ( - // NamespacedTaskKind indicates that the task type has a namepace scope. - NamespacedTaskKind TaskKind = v1beta1.NamespacedTaskKind - // ClusterTaskKind indicates that task type has a cluster scope. - ClusterTaskKind TaskKind = v1beta1.ClusterTaskKind -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:noStatus - -// Pipeline describes a list of Tasks to execute. It expresses how outputs -// of tasks feed into inputs of subsequent tasks. -// +k8s:openapi-gen=true -type Pipeline struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the desired state of the Pipeline from the client - // +optional - Spec PipelineSpec `json:"spec"` - - // Status is deprecated. - // It usually is used to communicate the observed state of the Pipeline from - // the controller, but was unused as there is no controller for Pipeline. - // +optional - Status *PipelineStatus `json:"status,omitempty"` -} - -// PipelineStatus does not contain anything because Pipelines on their own -// do not have a status, they just hold data which is later used by a -// PipelineRun. -// Deprecated -type PipelineStatus struct { -} - -// PipelineMetadata returns the Pipeline's ObjectMeta, implementing PipelineObject. -func (p *Pipeline) PipelineMetadata() metav1.ObjectMeta { - return p.ObjectMeta -} - -// PipelineSpec returns the Pipeline's Spec, implementing PipelineObject. -func (p *Pipeline) PipelineSpec() PipelineSpec { - return p.Spec -} - -// Copy returns a deep copy of the Pipeline, implementing PipelineObject. -func (p *Pipeline) Copy() PipelineObject { - return p.DeepCopy() -} - -// PipelineTask defines a task in a Pipeline, passing inputs from both -// Params and from the output of previous tasks. -type PipelineTask struct { - // Name is the name of this task within the context of a Pipeline. Name is - // used as a coordinate with the `from` and `runAfter` fields to establish - // the execution order of tasks relative to one another. - Name string `json:"name,omitempty"` - - // TaskRef is a reference to a task definition. - // +optional - TaskRef *TaskRef `json:"taskRef,omitempty"` - - // TaskSpec is specification of a task - // +optional - TaskSpec *TaskSpec `json:"taskSpec,omitempty"` - - // Conditions is a list of conditions that need to be true for the task to run - // +optional - Conditions []PipelineTaskCondition `json:"conditions,omitempty"` - - // Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False - // +optional - Retries int `json:"retries,omitempty"` - - // RunAfter is the list of PipelineTask names that should be executed before - // this Task executes. (Used to force a specific ordering in graph execution.) - // +optional - RunAfter []string `json:"runAfter,omitempty"` - - // Resources declares the resources given to this task as inputs and - // outputs. - // +optional - Resources *PipelineTaskResources `json:"resources,omitempty"` - // Parameters declares parameters passed to this task. - // +optional - Params []Param `json:"params,omitempty"` - - // Workspaces maps workspaces from the pipeline spec to the workspaces - // declared in the Task. - // +optional - Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` - - // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. - // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` -} - -// HashKey is used as the key for this PipelineTask in the DAG -func (pt PipelineTask) HashKey() string { - return pt.Name -} - -// Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering -func (pt PipelineTask) Deps() []string { - deps := []string{} - deps = append(deps, pt.RunAfter...) - if pt.Resources != nil { - for _, rd := range pt.Resources.Inputs { - deps = append(deps, rd.From...) - } - } - // Add any dependents from conditional resources. - for _, cond := range pt.Conditions { - for _, rd := range cond.Resources { - deps = append(deps, rd.From...) - } - for _, param := range cond.Params { - expressions, ok := v1beta1.GetVarSubstitutionExpressionsForParam(param) - if ok { - resultRefs := v1beta1.NewResultRefs(expressions) - for _, resultRef := range resultRefs { - deps = append(deps, resultRef.PipelineTask) - } - } - } - } - // Add any dependents from task results - for _, param := range pt.Params { - expressions, ok := v1beta1.GetVarSubstitutionExpressionsForParam(param) - if ok { - resultRefs := v1beta1.NewResultRefs(expressions) - for _, resultRef := range resultRefs { - deps = append(deps, resultRef.PipelineTask) - } - } - } - - return deps -} - -// PipelineTaskList is a list of PipelineTasks -type PipelineTaskList []PipelineTask - -// Items returns a slice of all tasks in the PipelineTaskList, converted to dag.Tasks -func (l PipelineTaskList) Items() []dag.Task { - tasks := []dag.Task{} - for _, t := range l { - tasks = append(tasks, dag.Task(t)) - } - return tasks -} - -// Deps returns a map with key as name of a pipelineTask and value as a list of its dependencies -func (l PipelineTaskList) Deps() map[string][]string { - deps := map[string][]string{} - for _, pt := range l { - deps[pt.HashKey()] = pt.Deps() - } - return deps -} - -// PipelineTaskParam is used to provide arbitrary string parameters to a Task. -type PipelineTaskParam = v1beta1.PipelineTaskParam - -// PipelineTaskCondition allows a PipelineTask to declare a Condition to be evaluated before -// the Task is run. -type PipelineTaskCondition = v1beta1.PipelineTaskCondition - -// PipelineDeclaredResource is used by a Pipeline to declare the types of the -// PipelineResources that it will required to run and names which can be used to -// refer to these PipelineResources in PipelineTaskResourceBindings. -type PipelineDeclaredResource = v1beta1.PipelineDeclaredResource - -// PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources -// should be provided to a Task as its inputs and outputs. -type PipelineTaskResources = v1beta1.PipelineTaskResources - -// PipelineTaskInputResource maps the name of a declared PipelineResource input -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. This input may come from a previous task. -type PipelineTaskInputResource = v1beta1.PipelineTaskInputResource - -// PipelineTaskOutputResource maps the name of a declared PipelineResource output -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. -type PipelineTaskOutputResource = v1beta1.PipelineTaskOutputResource - -// TaskRef can be used to refer to a specific instance of a task. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type TaskRef = v1beta1.TaskRef - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineList contains a list of Pipeline -type PipelineList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Pipeline `json:"items"` -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go deleted file mode 100644 index 7492a4d0c2..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "github.com/tektoncd/pipeline/pkg/list" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" - "github.com/tektoncd/pipeline/pkg/substitution" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*Pipeline)(nil) - -// Validate checks that the Pipeline structure is valid but does not validate -// that any references resources exist, that is done at run time. -func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(p.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return p.Spec.Validate(ctx) -} - -func validateDeclaredResources(ps *PipelineSpec) error { - encountered := sets.NewString() - for _, r := range ps.Resources { - if encountered.Has(r.Name) { - return fmt.Errorf("resource with name %q appears more than once", r.Name) - } - encountered.Insert(r.Name) - } - required := []string{} - for _, t := range ps.Tasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - - for _, condition := range t.Conditions { - for _, cr := range condition.Resources { - required = append(required, cr.Resource) - } - } - } - - provided := make([]string, 0, len(ps.Resources)) - for _, resource := range ps.Resources { - provided = append(provided, resource.Name) - } - missing := list.DiffLeft(required, provided) - if len(missing) > 0 { - return fmt.Errorf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing) - } - - return nil -} - -func isOutput(outputs []PipelineTaskOutputResource, resource string) bool { - for _, output := range outputs { - if output.Resource == resource { - return true - } - } - return false -} - -// validateFrom ensures that the `from` values make sense: that they rely on values from Tasks -// that ran previously, and that the PipelineResource is actually an output of the Task it should come from. -func validateFrom(tasks []PipelineTask) *apis.FieldError { - taskOutputs := map[string][]PipelineTaskOutputResource{} - for _, task := range tasks { - var to []PipelineTaskOutputResource - if task.Resources != nil { - to = make([]PipelineTaskOutputResource, len(task.Resources.Outputs)) - copy(to, task.Resources.Outputs) - } - taskOutputs[task.Name] = to - } - for _, t := range tasks { - inputResources := []PipelineTaskInputResource{} - if t.Resources != nil { - inputResources = append(inputResources, t.Resources.Inputs...) - } - - for _, c := range t.Conditions { - inputResources = append(inputResources, c.Resources...) - } - - for _, rd := range inputResources { - for _, pt := range rd.From { - outputs, found := taskOutputs[pt] - if !found { - return apis.ErrInvalidValue(fmt.Sprintf("expected resource %s to be from task %s, but task %s doesn't exist", rd.Resource, pt, pt), - "spec.tasks.resources.inputs.from") - } - if !isOutput(outputs, rd.Resource) { - return apis.ErrInvalidValue(fmt.Sprintf("the resource %s from %s must be an output but is an input", rd.Resource, pt), - "spec.tasks.resources.inputs.from") - } - } - } - } - return nil -} - -// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency -// cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource -// is actually an output of the Task it should come from. -func validateGraph(tasks []PipelineTask) error { - if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil { - return err - } - return nil -} - -// Validate checks that taskNames in the Pipeline are valid and that the graph -// of Tasks expressed in the Pipeline makes sense. -func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) { - return apis.ErrGeneric("expected at least one, got none", "spec.description", "spec.params", "spec.resources", "spec.tasks", "spec.workspaces") - } - - // PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified - if err := validatePipelineTasks(ctx, ps.Tasks); err != nil { - return err - } - - // All declared resources should be used, and the Pipeline shouldn't try to use any resources - // that aren't declared - if err := validateDeclaredResources(ps); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.resources") - } - - // The from values should make sense - if err := validateFrom(ps.Tasks); err != nil { - return err - } - - // Validate the pipeline task graph - if err := validateGraph(ps.Tasks); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.tasks") - } - - // The parameter variables should be valid - if err := validatePipelineParameterVariables(ps.Tasks, ps.Params); err != nil { - return err - } - - // Validate the pipeline's workspaces. - return validatePipelineWorkspaces(ps.Workspaces, ps.Tasks) -} - -func validatePipelineTasks(ctx context.Context, tasks []PipelineTask) *apis.FieldError { - // Names cannot be duplicated - taskNames := sets.NewString() - var err *apis.FieldError - for i, t := range tasks { - if err = validatePipelineTaskName(ctx, "spec.tasks", i, t, taskNames); err != nil { - return err - } - } - return nil -} - -func validatePipelineTaskName(ctx context.Context, prefix string, i int, t PipelineTask, taskNames sets.String) *apis.FieldError { - if errs := validation.IsDNS1123Label(t.Name); len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", t.Name), - Paths: []string{fmt.Sprintf(prefix+"[%d].name", i)}, - Details: "Pipeline Task name must be a valid DNS Label." + - "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - // can't have both taskRef and taskSpec at the same time - if (t.TaskRef != nil && t.TaskRef.Name != "") && t.TaskSpec != nil { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) - } - // Check that one of TaskRef and TaskSpec is present - if (t.TaskRef == nil || (t.TaskRef != nil && t.TaskRef.Name == "")) && t.TaskSpec == nil { - return apis.ErrMissingOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) - } - // Validate TaskSpec if it's present - if t.TaskSpec != nil { - if err := t.TaskSpec.Validate(ctx); err != nil { - return err - } - } - if t.TaskRef != nil && t.TaskRef.Name != "" { - // Task names are appended to the container name, which must exist and - // must be a valid k8s name - if errSlice := validation.IsQualifiedName(t.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].name", i)) - } - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(t.TaskRef.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].taskRef.name", i)) - } - if _, ok := taskNames[t.Name]; ok { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].name", i)) - } - taskNames[t.Name] = struct{}{} - } - return nil -} - -func validatePipelineWorkspaces(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) *apis.FieldError { - // Workspace names must be non-empty and unique. - wsTable := sets.NewString() - for i, ws := range wss { - if ws.Name == "" { - return apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i), "spec.workspaces") - } - if wsTable.Has(ws.Name) { - return apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name), "spec.workspaces") - } - wsTable.Insert(ws.Name) - } - - // Any workspaces used in PipelineTasks should have their name declared in the Pipeline's - // Workspaces list. - for ptIdx, pt := range pts { - for wsIdx, ws := range pt.Workspaces { - if _, ok := wsTable[ws.Workspace]; !ok { - return apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), - fmt.Sprintf("spec.tasks[%d].workspaces[%d]", ptIdx, wsIdx), - ) - } - } - } - return nil -} - -func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - - for _, p := range params { - // Verify that p is a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - return apis.ErrInvalidValue(string(p.Type), fmt.Sprintf("spec.params.%s.type", p.Name)) - } - - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("spec.params.%s.type", p.Name), - fmt.Sprintf("spec.params.%s.default.type", p.Name), - }, - } - } - - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - - return validatePipelineVariables(tasks, "params", parameterNames, arrayParameterNames) -} - -func validatePipelineVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) *apis.FieldError { - for _, task := range tasks { - for _, param := range task.Params { - if param.Value.Type == ParamTypeString { - if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, paramNames); err != nil { - return err - } - if err := validatePipelineNoArrayReferenced(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, arrayParamNames); err != nil { - return err - } - } else { - for _, arrayElement := range param.Value.ArrayVal { - if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, paramNames); err != nil { - return err - } - if err := validatePipelineArraysIsolated(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, arrayParamNames); err != nil { - return err - } - } - } - } - } - return nil -} - -func validatePipelineVariable(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariable(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} - -func validatePipelineNoArrayReferenced(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibited(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} - -func validatePipelineArraysIsolated(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolated(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go deleted file mode 100644 index 995f444e2c..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*PipelineRun)(nil) - -// ConvertTo implements api.Convertible -func (pr *PipelineRun) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.PipelineRun: - sink.ObjectMeta = pr.ObjectMeta - if err := pr.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - sink.Status = pr.Status - - spec := &v1beta1.PipelineSpec{} - if err := deserializeFinally(&sink.ObjectMeta, spec); err != nil { - return err - } - if len(spec.Finally) > 0 { - if sink.Spec.PipelineSpec == nil { - sink.Spec.PipelineSpec = spec - } else { - sink.Spec.PipelineSpec.Finally = spec.Finally - } - } - return nil - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (prs *PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1beta1.PipelineRunSpec) error { - sink.PipelineRef = prs.PipelineRef - if prs.PipelineSpec != nil { - sink.PipelineSpec = &v1beta1.PipelineSpec{} - if err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec); err != nil { - return err - } - } - sink.Resources = prs.Resources - sink.Params = prs.Params - sink.ServiceAccountName = prs.ServiceAccountName - sink.ServiceAccountNames = prs.ServiceAccountNames - sink.Status = prs.Status - sink.Timeout = prs.Timeout - sink.PodTemplate = prs.PodTemplate - sink.Workspaces = prs.Workspaces - return nil -} - -// ConvertFrom implements api.Convertible -func (pr *PipelineRun) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.PipelineRun: - pr.ObjectMeta = source.ObjectMeta - if err := pr.Spec.ConvertFrom(ctx, &source.Spec); err != nil { - return err - } - pr.Status = source.Status - - ps := source.Spec.PipelineSpec - if ps != nil && ps.Finally != nil { - if err := serializeFinally(&pr.ObjectMeta, ps.Finally); err != nil { - return err - } - } - return nil - default: - return fmt.Errorf("unknown version, got: %T", pr) - } -} - -// ConvertFrom implements api.Convertible -func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1beta1.PipelineRunSpec) error { - prs.PipelineRef = source.PipelineRef - if source.PipelineSpec != nil { - prs.PipelineSpec = &PipelineSpec{} - if err := prs.PipelineSpec.ConvertFrom(ctx, *source.PipelineSpec); err != nil { - return err - } - } - prs.Resources = source.Resources - prs.Params = source.Params - prs.ServiceAccountName = source.ServiceAccountName - prs.ServiceAccountNames = source.ServiceAccountNames - prs.Status = source.Status - prs.Timeout = source.Timeout - prs.PodTemplate = source.PodTemplate - prs.Workspaces = source.Workspaces - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go deleted file mode 100644 index ce1f35ad08..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "time" - - "github.com/tektoncd/pipeline/pkg/apis/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*PipelineRun)(nil) - -// SetDefaults implements apis.Defaultable -func (pr *PipelineRun) SetDefaults(ctx context.Context) { - pr.Spec.SetDefaults(ctx) -} - -// SetDefaults implements apis.Defaultable -func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { - cfg := config.FromContextOrDefaults(ctx) - if prs.Timeout == nil { - prs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} - } - - defaultSA := cfg.Defaults.DefaultServiceAccount - if prs.ServiceAccountName == "" && defaultSA != "" { - prs.ServiceAccountName = defaultSA - } - - defaultPodTemplate := cfg.Defaults.DefaultPodTemplate - if prs.PodTemplate == nil { - prs.PodTemplate = defaultPodTemplate - } - - if prs.PipelineSpec != nil { - prs.PipelineSpec.SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go deleted file mode 100644 index 2d67bdd76a..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright 2019-2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" - "knative.dev/pkg/apis" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineRun represents a single execution of a Pipeline. PipelineRuns are how -// the graph of Tasks declared in a Pipeline are executed; they specify inputs -// to Pipelines such as parameter values and capture operational aspects of the -// Tasks execution such as service account and tolerations. Creating a -// PipelineRun creates TaskRuns for Tasks in the referenced Pipeline. -// -// +k8s:openapi-gen=true -type PipelineRun struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - Spec PipelineRunSpec `json:"spec,omitempty"` - // +optional - Status PipelineRunStatus `json:"status,omitempty"` -} - -// GetName returns the PipelineRun's name -func (pr *PipelineRun) GetName() string { - return pr.ObjectMeta.GetName() -} - -// PipelineRunSpec defines the desired state of PipelineRun -type PipelineRunSpec struct { - // +optional - PipelineRef *PipelineRef `json:"pipelineRef,omitempty"` - // +optional - PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"` - // Resources is a list of bindings specifying which actual instances of - // PipelineResources to use for the resources the Pipeline has declared - // it needs. - Resources []PipelineResourceBinding `json:"resources,omitempty"` - // Params is a list of parameter names and values. - Params []Param `json:"params,omitempty"` - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // +optional - ServiceAccountNames []PipelineRunSpecServiceAccountName `json:"serviceAccountNames,omitempty"` - // Used for cancelling a pipelinerun (and maybe more later on) - // +optional - Status PipelineRunSpecStatus `json:"status,omitempty"` - // Time after which the Pipeline times out. Defaults to never. - // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` - // PodTemplate holds pod specific configuration - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` - // Workspaces holds a set of workspace bindings that must match names - // with those declared in the pipeline. - // +optional - Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` - // TaskRunSpecs holds a set of task specific specs - // +optional - TaskRunSpecs []PipelineTaskRunSpec `json:"taskRunSpecs,omitempty"` -} - -// PipelineRunSpecStatus defines the pipelinerun spec status the user can provide -type PipelineRunSpecStatus = v1beta1.PipelineRunSpecStatus - -const ( - // PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task, - // if not already cancelled or terminated - PipelineRunSpecStatusCancelled = v1beta1.PipelineRunSpecStatusCancelledDeprecated -) - -// PipelineResourceRef can be used to refer to a specific instance of a Resource -type PipelineResourceRef = v1beta1.PipelineResourceRef - -// PipelineRef can be used to refer to a specific instance of a Pipeline. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type PipelineRef = v1beta1.PipelineRef - -// PipelineRunStatus defines the observed state of PipelineRun -type PipelineRunStatus = v1beta1.PipelineRunStatus - -// PipelineRunStatusFields holds the fields of PipelineRunStatus' status. -// This is defined separately and inlined so that other types can readily -// consume these fields via duck typing. -type PipelineRunStatusFields = v1beta1.PipelineRunStatusFields - -// PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status -type PipelineRunTaskRunStatus = v1beta1.PipelineRunTaskRunStatus - -// PipelineRunSpecServiceAccountName can be used to configure specific -// ServiceAccountName for a concrete Task -type PipelineRunSpecServiceAccountName = v1beta1.PipelineRunSpecServiceAccountName - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineRunList contains a list of PipelineRun -type PipelineRunList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []PipelineRun `json:"items,omitempty"` -} - -// PipelineTaskRun reports the results of running a step in the Task. Each -// task has the potential to succeed or fail (based on the exit code) -// and produces logs. -type PipelineTaskRun = v1beta1.PipelineTaskRun - -// GetGroupVersionKind implements kmeta.OwnerRefable. -func (*PipelineRun) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind(pipeline.PipelineRunControllerName) -} - -// IsDone returns true if the PipelineRun's status indicates that it is done. -func (pr *PipelineRun) IsDone() bool { - return !pr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// HasStarted function check whether pipelinerun has valid start time set in its status -func (pr *PipelineRun) HasStarted() bool { - return pr.Status.StartTime != nil && !pr.Status.StartTime.IsZero() -} - -// IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state -func (pr *PipelineRun) IsCancelled() bool { - return pr.Spec.Status == PipelineRunSpecStatusCancelled -} - -// GetRunKey return the pipelinerun key for timeout handler map -func (pr *PipelineRun) GetRunKey() string { - // The address of the pointer is a threadsafe unique identifier for the pipelinerun - return fmt.Sprintf("%s/%p", pipeline.PipelineRunControllerName, pr) -} - -// IsTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout -func (pr *PipelineRun) IsTimedOut(c clock.PassiveClock) bool { - pipelineTimeout := pr.Spec.Timeout - startTime := pr.Status.StartTime - - if !startTime.IsZero() && pipelineTimeout != nil { - timeout := pipelineTimeout.Duration - if timeout == config.NoTimeoutDuration { - return false - } - runtime := c.Since(startTime.Time) - if runtime > timeout { - return true - } - } - return false -} - -// GetServiceAccountName returns the service account name for a given -// PipelineTask if configured, otherwise it returns the PipelineRun's serviceAccountName. -func (pr *PipelineRun) GetServiceAccountName(pipelineTaskName string) string { - serviceAccountName := pr.Spec.ServiceAccountName - for _, sa := range pr.Spec.ServiceAccountNames { - if sa.TaskName == pipelineTaskName { - serviceAccountName = sa.ServiceAccountName - } - } - return serviceAccountName -} - -// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is -// used for creating PersistentVolumeClaims with an OwnerReference for each run -func (pr *PipelineRun) HasVolumeClaimTemplate() bool { - for _, ws := range pr.Spec.Workspaces { - if ws.VolumeClaimTemplate != nil { - return true - } - } - return false -} - -// PipelineTaskRunSpec holds task specific specs -type PipelineTaskRunSpec struct { - PipelineTaskName string `json:"pipelineTaskName,omitempty"` - TaskServiceAccountName string `json:"taskServiceAccountName,omitempty"` - TaskPodTemplate *PodTemplate `json:"taskPodTemplate,omitempty"` -} - -// GetTaskRunSpecs returns the task specific spec for a given -// PipelineTask if configured, otherwise it returns the PipelineRun's default. -func (pr *PipelineRun) GetTaskRunSpecs(pipelineTaskName string) (string, *PodTemplate) { - serviceAccountName := pr.GetServiceAccountName(pipelineTaskName) - taskPodTemplate := pr.Spec.PodTemplate - for _, task := range pr.Spec.TaskRunSpecs { - if task.PipelineTaskName == pipelineTaskName { - taskPodTemplate = task.TaskPodTemplate - serviceAccountName = task.TaskServiceAccountName - } - } - return serviceAccountName, taskPodTemplate -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go deleted file mode 100644 index f6396cb433..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*PipelineRun)(nil) - -// Validate pipelinerun -func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - if apis.IsInDelete(ctx) { - return nil - } - return pr.Spec.Validate(ctx) -} - -// Validate pipelinerun spec -func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ps, &PipelineRunSpec{}) { - return apis.ErrMissingField("spec") - } - - // can't have both pipelineRef and pipelineSpec at the same time - if (ps.PipelineRef != nil && ps.PipelineRef.Name != "") && ps.PipelineSpec != nil { - return apis.ErrDisallowedFields("spec.pipelineref", "spec.pipelinespec") - } - - // Check that one of PipelineRef and PipelineSpec is present - if (ps.PipelineRef == nil || (ps.PipelineRef != nil && ps.PipelineRef.Name == "")) && ps.PipelineSpec == nil { - return apis.ErrMissingField("spec.pipelineref.name", "spec.pipelinespec") - } - - // Validate PipelineSpec if it's present - if ps.PipelineSpec != nil { - if err := ps.PipelineSpec.Validate(ctx); err != nil { - return err - } - } - - if ps.Timeout != nil { - // timeout should be a valid duration of at least 0. - if ps.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "spec.timeout") - } - } - - if ps.Workspaces != nil { - wsNames := make(map[string]int) - for idx, ws := range ps.Workspaces { - if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), - Paths: []string{"spec.workspaces"}, - } - } - wsNames[ws.Name] = idx - } - } - - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go deleted file mode 100644 index 156c66b2d5..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go +++ /dev/null @@ -1,8 +0,0 @@ -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" -) - -// PodTemplate holds pod specific configuration -type PodTemplate = pod.Template diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go index 78b59bfdce..42b5e4b18e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go @@ -46,20 +46,6 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Task{}, - &TaskList{}, - &Condition{}, - &ConditionList{}, - &ClusterTask{}, - &ClusterTaskList{}, - &TaskRun{}, - &TaskRunList{}, - &Pipeline{}, - &PipelineList{}, - &PipelineRun{}, - &PipelineRunList{}, - &PipelineResource{}, - &PipelineResourceList{}, &Run{}, &RunList{}, ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go deleted file mode 100644 index 6aa94913b0..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import "path/filepath" - -// InputResourcePath returns the path where the given input resource -// will get mounted in a Pod -func InputResourcePath(r ResourceDeclaration) string { - return path("/workspace", r) -} - -// OutputResourcePath returns the path to the output resource in a Pod -func OutputResourcePath(r ResourceDeclaration) string { - return path("/workspace/output", r) -} - -func path(root string, r ResourceDeclaration) string { - if r.TargetPath != "" { - if filepath.IsAbs(r.TargetPath) { - return r.TargetPath - } - return filepath.Join("/workspace", r.TargetPath) - } - return filepath.Join(root, r.Name) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go deleted file mode 100644 index d987494e17..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// PipelineResourceInterface interface to be implemented by different PipelineResource types -type PipelineResourceInterface interface { - // GetName returns the name of this PipelineResource instance. - GetName() string - // GetType returns the type of this PipelineResource (often a super type, e.g. in the case of storage). - GetType() PipelineResourceType - // Replacements returns all the attributes that this PipelineResource has that - // can be used for variable replacement. - Replacements() map[string]string - // GetOutputTaskModifier returns the TaskModifier instance that should be used on a Task - // in order to add this kind of resource when it is being used as an output. - GetOutputTaskModifier(ts *TaskSpec, path string) (TaskModifier, error) - // GetInputTaskModifier returns the TaskModifier instance that should be used on a Task - // in order to add this kind of resource when it is being used as an input. - GetInputTaskModifier(ts *TaskSpec, path string) (TaskModifier, error) -} - -// TaskModifier is an interface to be implemented by different PipelineResources -type TaskModifier = v1beta1.TaskModifier - -// InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines. -type InternalTaskModifier = v1beta1.InternalTaskModifier - -func checkStepNotAlreadyAdded(s Step, steps []Step) error { - for _, step := range steps { - if s.Name == step.Name { - return fmt.Errorf("Step %s cannot be added again", step.Name) - } - } - return nil -} - -// ApplyTaskModifier applies a modifier to the task by appending and prepending steps and volumes. -// If steps with the same name exist in ts an error will be returned. If identical Volumes have -// been added, they will not be added again. If Volumes with the same name but different contents -// have been added, an error will be returned. -// FIXME(vdemeester) de-duplicate this -func ApplyTaskModifier(ts *TaskSpec, tm TaskModifier) error { - steps := tm.GetStepsToPrepend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(steps, ts.Steps...) - - steps = tm.GetStepsToAppend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(ts.Steps, steps...) - - volumes := tm.GetVolumes() - for _, volume := range volumes { - var alreadyAdded bool - for _, v := range ts.Volumes { - if volume.Name == v.Name { - // If a Volume with the same name but different contents has already been added, we can't add both - if d := cmp.Diff(volume, v); d != "" { - return fmt.Errorf("tried to add volume %s already added but with different contents", volume.Name) - } - // If an identical Volume has already been added, don't add it again - alreadyAdded = true - } - } - if !alreadyAdded { - ts.Volumes = append(ts.Volumes, volume) - } - } - - return nil -} - -// PipelineResourceBinding connects a reference to an instance of a PipelineResource -// with a PipelineResource dependency that the Pipeline has declared -type PipelineResourceBinding = v1beta1.PipelineResourceBinding - -// PipelineResourceResult used to export the image name and digest as json -type PipelineResourceResult = v1beta1.PipelineResourceResult - -// ResultType used to find out whether a PipelineResourceResult is from a task result or not -type ResultType = v1beta1.ResultType diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go index 74e3e0b09d..7889cfa8b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go @@ -47,7 +47,7 @@ type EmbeddedRunSpec struct { // RunSpec defines the desired state of Run type RunSpec struct { // +optional - Ref *TaskRef `json:"ref,omitempty"` + Ref *v1beta1.TaskRef `json:"ref,omitempty"` // Spec is a specification of a custom task // +optional @@ -69,7 +69,7 @@ type RunSpec struct { // PodTemplate holds pod specific configuration // +optional - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` + PodTemplate *v1beta1.PodTemplate `json:"podTemplate,omitempty"` // Time after which the custom-task times out. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration @@ -201,7 +201,7 @@ func (r *Run) HasStarted() bool { // IsSuccessful returns true if the Run's status indicates that it is done. func (r *Run) IsSuccessful() bool { - return r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() + return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } // GetRunKey return the run's key for timeout handler map diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go index 483c918de6..59bcb43c9a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/validate" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" @@ -66,9 +67,9 @@ func (rs *RunSpec) Validate(ctx context.Context) *apis.FieldError { return apis.ErrMissingField("spec.spec.kind") } } - if err := validateParameters("spec.params", rs.Params); err != nil { + if err := v1beta1.ValidateParameters(ctx, rs.Params).ViaField("spec.params"); err != nil { return err } - return validateWorkspaceBindings(ctx, rs.Workspaces) + return v1beta1.ValidateWorkspaceBindings(ctx, rs.Workspaces).ViaField("spec.workspaces") } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go deleted file mode 100644 index 5a484579ec..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplyStepReplacements applies variable interpolation on a Step. -func ApplyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Script = substitution.ApplyReplacements(step.Script, stringReplacements) - ApplyContainerReplacements(&step.Container, stringReplacements, arrayReplacements) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go deleted file mode 100644 index 6f636bfb71..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2019-2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" -) - -// PipelineResourceTypeGCS is the subtype for the GCSResources, which is backed by a GCS blob/directory. -const PipelineResourceTypeGCS PipelineResourceType = resource.PipelineResourceTypeGCS diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go deleted file mode 100644 index 027d1493a5..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*Task)(nil) - -// ConvertTo implements api.Convertible -func (t *Task) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.Task: - sink.ObjectMeta = t.ObjectMeta - return t.Spec.ConvertTo(ctx, &sink.Spec) - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1beta1.TaskSpec) error { - sink.Steps = ts.Steps - sink.Volumes = ts.Volumes - sink.StepTemplate = ts.StepTemplate - sink.Sidecars = ts.Sidecars - sink.Workspaces = ts.Workspaces - sink.Results = ts.Results - sink.Resources = ts.Resources - sink.Params = ts.Params - sink.Description = ts.Description - if ts.Inputs != nil { - if len(ts.Inputs.Params) > 0 && len(ts.Params) > 0 { - // This shouldn't happen as it shouldn't pass validation - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if len(ts.Inputs.Params) > 0 { - sink.Params = make([]v1beta1.ParamSpec, len(ts.Inputs.Params)) - for i, param := range ts.Inputs.Params { - sink.Params[i] = *param.DeepCopy() - } - } - if len(ts.Inputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskResources{} - } - if len(ts.Inputs.Resources) > 0 && ts.Resources != nil && len(ts.Resources.Inputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - sink.Resources.Inputs = make([]v1beta1.TaskResource, len(ts.Inputs.Resources)) - for i, resource := range ts.Inputs.Resources { - sink.Resources.Inputs[i] = v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: resource.Name, - Type: resource.Type, - Description: resource.Description, - TargetPath: resource.TargetPath, - Optional: resource.Optional, - }} - } - } - } - if ts.Outputs != nil && len(ts.Outputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskResources{} - } - if len(ts.Outputs.Resources) > 0 && ts.Resources != nil && len(ts.Resources.Outputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - sink.Resources.Outputs = make([]v1beta1.TaskResource, len(ts.Outputs.Resources)) - for i, resource := range ts.Outputs.Resources { - sink.Resources.Outputs[i] = v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: resource.Name, - Type: resource.Type, - Description: resource.Description, - TargetPath: resource.TargetPath, - Optional: resource.Optional, - }} - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (t *Task) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.Task: - t.ObjectMeta = source.ObjectMeta - return t.Spec.ConvertFrom(ctx, &source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", t) - } -} - -// ConvertFrom implements api.Convertible -func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1beta1.TaskSpec) error { - ts.Steps = source.Steps - ts.Volumes = source.Volumes - ts.StepTemplate = source.StepTemplate - ts.Sidecars = source.Sidecars - ts.Workspaces = source.Workspaces - ts.Results = source.Results - ts.Params = source.Params - ts.Resources = source.Resources - ts.Description = source.Description - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go deleted file mode 100644 index 1121e93c78..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -const ( - // TaskRunResultType default task run result value - TaskRunResultType ResultType = v1beta1.TaskRunResultType - // PipelineResourceResultType default pipeline result value - PipelineResourceResultType ResultType = v1beta1.PipelineResourceResultType - // UnknownResultType default unknown result type value - UnknownResultType ResultType = v1beta1.UnknownResultType -) - -// TaskSpec returns the task's spec -func (t *Task) TaskSpec() TaskSpec { - return t.Spec -} - -// TaskMetadata returns the task's ObjectMeta -func (t *Task) TaskMetadata() metav1.ObjectMeta { - return t.ObjectMeta -} - -// Copy returns a deep copy of the task -func (t *Task) Copy() TaskObject { - return t.DeepCopy() -} - -// TaskSpec defines the desired state of Task. -type TaskSpec struct { - v1beta1.TaskSpec `json:",inline"` - - // Inputs is an optional set of parameters and resources which must be - // supplied by the user when a Task is executed by a TaskRun. - // +optional - Inputs *Inputs `json:"inputs,omitempty"` - // Outputs is an optional set of resources and results produced when this - // Task is run. - // +optional - Outputs *Outputs `json:"outputs,omitempty"` -} - -// TaskResult used to describe the results of a task -type TaskResult = v1beta1.TaskResult - -// Step embeds the Container type, which allows it to include fields not -// provided by Container. -type Step = v1beta1.Step - -// Sidecar has nearly the same data structure as Step, consisting of a Container and an optional Script, but does not have the ability to timeout. -type Sidecar = v1beta1.Sidecar - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Task represents a collection of sequential steps that are run as part of a -// Pipeline using a set of inputs and producing a set of outputs. Tasks execute -// when TaskRuns are created that provide the input parameters and resources and -// output resources the Task requires. -// -// +k8s:openapi-gen=true -type Task struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata"` - - // Spec holds the desired state of the Task from the client - // +optional - Spec TaskSpec `json:"spec"` -} - -// Inputs are the requirements that a task needs to run a Build. -type Inputs struct { - // Resources is a list of the input resources required to run the task. - // Resources are represented in TaskRuns as bindings to instances of - // PipelineResources. - // +optional - Resources []TaskResource `json:"resources,omitempty"` - // Params is a list of input parameters required to run the task. Params - // must be supplied as inputs in TaskRuns unless they declare a default - // value. - // +optional - Params []ParamSpec `json:"params,omitempty"` -} - -// TaskResource defines an input or output Resource declared as a requirement -// by a Task. The Name field will be used to refer to these Resources within -// the Task definition, and when provided as an Input, the Name will be the -// path to the volume mounted containing this Resource as an input (e.g. -// an input Resource named `workspace` will be mounted at `/workspace`). -type TaskResource = v1beta1.TaskResource - -// Outputs allow a task to declare what data the Build/Task will be producing, -// i.e. results such as logs and artifacts such as images. -type Outputs struct { - // +optional - Results []TestResult `json:"results,omitempty"` - // +optional - Resources []TaskResource `json:"resources,omitempty"` -} - -// TestResult allows a task to specify the location where test logs -// can be found and what format they will be in. -type TestResult struct { - // Name declares the name by which a result is referenced in the Task's - // definition. Results may be referenced by name in the definition of a - // Task's steps. - Name string `json:"name"` - // TODO: maybe this is an enum with types like "go test", "junit", etc. - Format string `json:"format"` - Path string `json:"path"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskList contains a list of Task -type TaskList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Task `json:"items"` -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go deleted file mode 100644 index 94a13a6918..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go +++ /dev/null @@ -1,437 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/apis/validate" - "github.com/tektoncd/pipeline/pkg/substitution" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*Task)(nil) - -// Validate implements apis.Validatable -func (t *Task) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return t.Spec.Validate(ctx) -} - -// Validate implements apis.Validatable -func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { - - if len(ts.Steps) == 0 { - return apis.ErrMissingField("steps") - } - if err := ValidateVolumes(ts.Volumes).ViaField("volumes"); err != nil { - return err - } - if err := validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate); err != nil { - return err - } - mergedSteps, err := v1beta1.MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) - if err != nil { - return &apis.FieldError{ - Message: fmt.Sprintf("error merging step template and steps: %s", err), - Paths: []string{"stepTemplate"}, - } - } - - if err := validateSteps(mergedSteps).ViaField("steps"); err != nil { - return err - } - - if ts.Inputs != nil { - if len(ts.Inputs.Params) > 0 && len(ts.Params) > 0 { - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if ts.Resources != nil && len(ts.Resources.Inputs) > 0 && len(ts.Inputs.Resources) > 0 { - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - } - if ts.Outputs != nil { - if ts.Resources != nil && len(ts.Resources.Outputs) > 0 && len(ts.Outputs.Resources) > 0 { - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - } - - // Validate Resources declaration - if err := ts.Resources.Validate(ctx); err != nil { - return err - } - // Validate that the parameters type are correct - if err := v1beta1.ValidateParameterTypes(ts.Params); err != nil { - return err - } - - // A task doesn't have to have inputs or outputs, but if it does they must be valid. - // A task can't duplicate input or output names. - // Deprecated - if ts.Inputs != nil { - for _, resource := range ts.Inputs.Resources { - if err := validateResourceType(resource, fmt.Sprintf("taskspec.Inputs.Resources.%s.Type", resource.Name)); err != nil { - return err - } - } - if err := checkForDuplicates(ts.Inputs.Resources, "taskspec.Inputs.Resources.Name"); err != nil { - return err - } - if err := validateInputParameterTypes(ts.Inputs); err != nil { - return err - } - } - // Deprecated - if ts.Outputs != nil { - for _, resource := range ts.Outputs.Resources { - if err := validateResourceType(resource, fmt.Sprintf("taskspec.Outputs.Resources.%s.Type", resource.Name)); err != nil { - return err - } - } - if err := checkForDuplicates(ts.Outputs.Resources, "taskspec.Outputs.Resources.Name"); err != nil { - return err - } - } - - // Validate task step names - for _, step := range ts.Steps { - if errs := validation.IsDNS1123Label(step.Name); step.Name != "" && len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", step.Name), - Paths: []string{"taskspec.steps.name"}, - Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - } - - if err := v1beta1.ValidateParameterVariables(ts.Steps, ts.Params); err != nil { - return err - } - // Deprecated - if err := validateInputParameterVariables(ts.Steps, ts.Inputs, ts.Params); err != nil { - return err - } - - if err := v1beta1.ValidateResourcesVariables(ts.Steps, ts.Resources); err != nil { - return err - } - // Deprecated - return validateResourceVariables(ts.Steps, ts.Inputs, ts.Outputs, ts.Resources) -} - -// validateDeclaredWorkspaces will make sure that the declared workspaces do not try to use -// a mount path which conflicts with any other declared workspaces, with the explicitly -// declared volume mounts, or with the stepTemplate. The names must also be unique. -func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) *apis.FieldError { - mountPaths := sets.NewString() - for _, step := range steps { - for _, vm := range step.VolumeMounts { - mountPaths.Insert(filepath.Clean(vm.MountPath)) - } - } - if stepTemplate != nil { - for _, vm := range stepTemplate.VolumeMounts { - mountPaths.Insert(filepath.Clean(vm.MountPath)) - } - } - - wsNames := sets.NewString() - for _, w := range workspaces { - // Workspace names must be unique - if wsNames.Has(w.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace name %q must be unique", w.Name), - Paths: []string{"workspaces.name"}, - } - } - wsNames.Insert(w.Name) - // Workspaces must not try to use mount paths that are already used - mountPath := filepath.Clean(w.GetMountPath()) - if mountPaths.Has(mountPath) { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace mount path %q must be unique", mountPath), - Paths: []string{"workspaces.mountpath"}, - } - } - mountPaths.Insert(mountPath) - } - return nil -} - -// ValidateVolumes validates a slice of volumes to make sure there are no duplicate names -func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError { - // Task must not have duplicate volume names. - vols := sets.NewString() - for _, v := range volumes { - if vols.Has(v.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("multiple volumes with same name %q", v.Name), - Paths: []string{"name"}, - } - } - vols.Insert(v.Name) - } - return nil -} - -func validateSteps(steps []Step) *apis.FieldError { - // Task must not have duplicate step names. - names := sets.NewString() - for idx, s := range steps { - if s.Image == "" { - return apis.ErrMissingField("Image") - } - - if s.Script != "" { - if len(s.Command) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d script cannot be used with command", idx), - Paths: []string{"script"}, - } - } - } - - if s.Name != "" { - if names.Has(s.Name) { - return apis.ErrInvalidValue(s.Name, "name") - } - names.Insert(s.Name) - } - - for _, vm := range s.VolumeMounts { - if strings.HasPrefix(vm.MountPath, "/tekton/") && - !strings.HasPrefix(vm.MountPath, "/tekton/home") { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", idx, vm.Name, vm.MountPath), - Paths: []string{"volumeMounts.mountPath"}, - } - } - if strings.HasPrefix(vm.Name, "tekton-internal-") { - return &apis.FieldError{ - Message: fmt.Sprintf(`step %d volumeMount name %q cannot start with "tekton-internal-"`, idx, vm.Name), - Paths: []string{"volumeMounts.name"}, - } - } - } - } - return nil -} - -func validateInputParameterTypes(inputs *Inputs) *apis.FieldError { - for _, p := range inputs.Params { - // Ensure param has a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - return apis.ErrInvalidValue(p.Type, fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name)) - } - - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name), - fmt.Sprintf("taskspec.inputs.params.%s.default.type", p.Name), - }, - } - } - } - return nil -} - -func validateInputParameterVariables(steps []Step, inputs *Inputs, params []v1beta1.ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - - for _, p := range params { - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - // Deprecated - if inputs != nil { - for _, p := range inputs.Params { - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - } - - if err := validateVariables(steps, "params", parameterNames); err != nil { - return err - } - return validateArrayUsage(steps, "params", arrayParameterNames) -} - -func validateResourceVariables(steps []Step, inputs *Inputs, outputs *Outputs, resources *v1beta1.TaskResources) *apis.FieldError { - resourceNames := sets.NewString() - if resources != nil { - for _, r := range resources.Inputs { - resourceNames.Insert(r.Name) - } - for _, r := range resources.Outputs { - resourceNames.Insert(r.Name) - } - } - // Deprecated - if inputs != nil { - for _, r := range inputs.Resources { - resourceNames.Insert(r.Name) - } - } - // Deprecated - if outputs != nil { - for _, r := range outputs.Resources { - resourceNames.Insert(r.Name) - } - } - return validateVariables(steps, "resources", resourceNames) -} - -func validateArrayUsage(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskNoArrayReferenced("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskArraysIsolated(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskArraysIsolated(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } - } - return nil -} - -func validateVariables(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskVariable("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskVariable(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskVariable(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskVariable(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } - } - return nil -} - -func validateTaskVariable(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariable(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", vars) -} - -func validateTaskNoArrayReferenced(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibited(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", arrayNames) -} - -func validateTaskArraysIsolated(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolated(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", arrayNames) -} - -func checkForDuplicates(resources []TaskResource, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - if encountered.Has(strings.ToLower(r.Name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(strings.ToLower(r.Name)) - } - return nil -} - -func validateResourceType(r TaskResource, path string) *apis.FieldError { - for _, allowed := range AllResourceTypes { - if r.Type == allowed { - return nil - } - } - return apis.ErrInvalidValue(r.Type, path) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go deleted file mode 100644 index e41b49ae1e..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*TaskRun)(nil) - -// ConvertTo implements api.Convertible -func (tr *TaskRun) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.TaskRun: - sink.ObjectMeta = tr.ObjectMeta - if err := tr.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - sink.Status = tr.Status - return nil - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1beta1.TaskRunSpec) error { - sink.ServiceAccountName = trs.ServiceAccountName - sink.TaskRef = trs.TaskRef - if trs.TaskSpec != nil { - sink.TaskSpec = &v1beta1.TaskSpec{} - if err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec); err != nil { - return err - } - } - sink.Status = trs.Status - sink.Timeout = trs.Timeout - sink.PodTemplate = trs.PodTemplate - sink.Workspaces = trs.Workspaces - sink.Params = trs.Params - sink.Resources = trs.Resources - // Deprecated fields - if trs.Inputs != nil { - if len(trs.Inputs.Params) > 0 && len(trs.Params) > 0 { - // This shouldn't happen as it shouldn't pass validation - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if len(trs.Inputs.Params) > 0 { - sink.Params = make([]v1beta1.Param, len(trs.Inputs.Params)) - for i, param := range trs.Inputs.Params { - sink.Params[i] = *param.DeepCopy() - } - } - if len(trs.Inputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskRunResources{} - } - if len(trs.Inputs.Resources) > 0 && trs.Resources != nil && len(trs.Resources.Inputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - sink.Resources.Inputs = make([]v1beta1.TaskResourceBinding, len(trs.Inputs.Resources)) - for i, resource := range trs.Inputs.Resources { - sink.Resources.Inputs[i] = v1beta1.TaskResourceBinding{ - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: resource.Name, - ResourceRef: resource.ResourceRef, - ResourceSpec: resource.ResourceSpec, - }, - Paths: resource.Paths, - } - } - } - } - - if trs.Outputs != nil && len(trs.Outputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskRunResources{} - } - if len(trs.Outputs.Resources) > 0 && trs.Resources != nil && len(trs.Resources.Outputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - sink.Resources.Outputs = make([]v1beta1.TaskResourceBinding, len(trs.Outputs.Resources)) - for i, resource := range trs.Outputs.Resources { - sink.Resources.Outputs[i] = v1beta1.TaskResourceBinding{ - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: resource.Name, - ResourceRef: resource.ResourceRef, - ResourceSpec: resource.ResourceSpec, - }, - Paths: resource.Paths, - } - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (tr *TaskRun) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.TaskRun: - tr.ObjectMeta = source.ObjectMeta - if err := tr.Spec.ConvertFrom(ctx, &source.Spec); err != nil { - return err - } - tr.Status = source.Status - return nil - default: - return fmt.Errorf("unknown version, got: %T", tr) - } -} - -// ConvertFrom implements api.Convertible -func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1beta1.TaskRunSpec) error { - trs.ServiceAccountName = source.ServiceAccountName - trs.TaskRef = source.TaskRef - if source.TaskSpec != nil { - trs.TaskSpec = &TaskSpec{} - if err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec); err != nil { - return err - } - } - trs.Status = source.Status - trs.Timeout = source.Timeout - trs.PodTemplate = source.PodTemplate - trs.Workspaces = source.Workspaces - trs.Params = source.Params - trs.Resources = source.Resources - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go deleted file mode 100644 index 871703ca84..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "time" - - "github.com/tektoncd/pipeline/pkg/apis/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*TaskRun)(nil) - -// ManagedByLabelKey is the label key used to mark what is managing this resource -const ManagedByLabelKey = "app.kubernetes.io/managed-by" - -// SetDefaults implements apis.Defaultable -func (tr *TaskRun) SetDefaults(ctx context.Context) { - ctx = apis.WithinParent(ctx, tr.ObjectMeta) - tr.Spec.SetDefaults(apis.WithinSpec(ctx)) - - // If the TaskRun doesn't have a managed-by label, apply the default - // specified in the config. - cfg := config.FromContextOrDefaults(ctx) - if tr.ObjectMeta.Labels == nil { - tr.ObjectMeta.Labels = map[string]string{} - } - if _, found := tr.ObjectMeta.Labels[ManagedByLabelKey]; !found { - tr.ObjectMeta.Labels[ManagedByLabelKey] = cfg.Defaults.DefaultManagedByLabelValue - } -} - -// SetDefaults implements apis.Defaultable -func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { - cfg := config.FromContextOrDefaults(ctx) - if trs.TaskRef != nil && trs.TaskRef.Kind == "" { - trs.TaskRef.Kind = NamespacedTaskKind - } - - if trs.Timeout == nil { - trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} - } - - defaultSA := cfg.Defaults.DefaultServiceAccount - if trs.ServiceAccountName == "" && defaultSA != "" { - trs.ServiceAccountName = defaultSA - } - - defaultPodTemplate := cfg.Defaults.DefaultPodTemplate - if trs.PodTemplate == nil { - trs.PodTemplate = defaultPodTemplate - } - - // If this taskrun has an embedded task, apply the usual task defaults - if trs.TaskSpec != nil { - trs.TaskSpec.SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go deleted file mode 100644 index 6f6af8cb47..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - "time" - - apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" - "knative.dev/pkg/apis" -) - -// TaskRunSpec defines the desired state of TaskRun -type TaskRunSpec struct { - // +optional - ServiceAccountName string `json:"serviceAccountName"` - // no more than one of the TaskRef and TaskSpec may be specified. - // +optional - TaskRef *TaskRef `json:"taskRef,omitempty"` - // +optional - TaskSpec *TaskSpec `json:"taskSpec,omitempty"` - // Used for cancelling a taskrun (and maybe more later on) - // +optional - Status TaskRunSpecStatus `json:"status,omitempty"` - // Time after which the build times out. Defaults to 10 minutes. - // Specified build timeout should be less than 24h. - // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` - // PodTemplate holds pod specific configuration - // +optional - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` - // Workspaces is a list of WorkspaceBindings from volumes to workspaces. - // +optional - Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` - // From v1beta1 - // +optional - Params []Param `json:"params,omitempty"` - // +optional - Resources *v1beta1.TaskRunResources `json:"resources,omitempty"` - // Deprecated - // +optional - Inputs *TaskRunInputs `json:"inputs,omitempty"` - // +optional - Outputs *TaskRunOutputs `json:"outputs,omitempty"` -} - -// TaskRunSpecStatus defines the taskrun spec status the user can provide -type TaskRunSpecStatus = v1beta1.TaskRunSpecStatus - -const ( - // TaskRunSpecStatusCancelled indicates that the user wants to cancel the task, - // if not already cancelled or terminated - TaskRunSpecStatusCancelled = v1beta1.TaskRunSpecStatusCancelled - - // TaskRunReasonCancelled indicates that the TaskRun has been cancelled - // because it was requested so by the user - TaskRunReasonCancelled = v1beta1.TaskRunSpecStatusCancelled -) - -// TaskRunInputs holds the input values that this task was invoked with. -type TaskRunInputs struct { - // +optional - Resources []TaskResourceBinding `json:"resources,omitempty"` - // +optional - Params []Param `json:"params,omitempty"` -} - -// TaskResourceBinding points to the PipelineResource that -// will be used for the Task input or output called Name. -type TaskResourceBinding = v1beta1.TaskResourceBinding - -// TaskRunOutputs holds the output values that this task was invoked with. -type TaskRunOutputs struct { - // +optional - Resources []TaskResourceBinding `json:"resources,omitempty"` -} - -// TaskRunStatus defines the observed state of TaskRun -type TaskRunStatus = v1beta1.TaskRunStatus - -// TaskRunStatusFields holds the fields of TaskRun's status. This is defined -// separately and inlined so that other types can readily consume these fields -// via duck typing. -type TaskRunStatusFields = v1beta1.TaskRunStatusFields - -// TaskRunResult used to describe the results of a task -type TaskRunResult = v1beta1.TaskRunResult - -// StepState reports the results of running a step in the Task. -type StepState = v1beta1.StepState - -// SidecarState reports the results of sidecar in the Task. -type SidecarState = v1beta1.SidecarState - -// CloudEventDelivery is the target of a cloud event along with the state of -// delivery. -type CloudEventDelivery = v1beta1.CloudEventDelivery - -// CloudEventCondition is a string that represents the condition of the event. -type CloudEventCondition = v1beta1.CloudEventCondition - -const ( - // CloudEventConditionUnknown means that the condition for the event to be - // triggered was not met yet, or we don't know the state yet. - CloudEventConditionUnknown CloudEventCondition = v1beta1.CloudEventConditionUnknown - // CloudEventConditionSent means that the event was sent successfully - CloudEventConditionSent CloudEventCondition = v1beta1.CloudEventConditionSent - // CloudEventConditionFailed means that there was one or more attempts to - // send the event, and none was successful so far. - CloudEventConditionFailed CloudEventCondition = v1beta1.CloudEventConditionFailed -) - -// CloudEventDeliveryState reports the state of a cloud event to be sent. -type CloudEventDeliveryState = v1beta1.CloudEventDeliveryState - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskRun represents a single execution of a Task. TaskRuns are how the steps -// specified in a Task are executed; they specify the parameters and resources -// used to run the steps in a Task. -// -// +k8s:openapi-gen=true -type TaskRun struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - Spec TaskRunSpec `json:"spec,omitempty"` - // +optional - Status TaskRunStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskRunList contains a list of TaskRun -type TaskRunList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []TaskRun `json:"items"` -} - -// GetGroupVersionKind implements kmeta.OwnerRefable. -func (*TaskRun) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind(pipeline.TaskRunControllerName) -} - -// GetPipelineRunPVCName for taskrun gets pipelinerun -func (tr *TaskRun) GetPipelineRunPVCName() string { - if tr == nil { - return "" - } - for _, ref := range tr.GetOwnerReferences() { - if ref.Kind == pipeline.PipelineRunControllerName { - return fmt.Sprintf("%s-pvc", ref.Name) - } - } - return "" -} - -// HasPipelineRunOwnerReference returns true of TaskRun has -// owner reference of type PipelineRun -func (tr *TaskRun) HasPipelineRunOwnerReference() bool { - for _, ref := range tr.GetOwnerReferences() { - if ref.Kind == pipeline.PipelineRunControllerName { - return true - } - } - return false -} - -// IsDone returns true if the TaskRun's status indicates that it is done. -func (tr *TaskRun) IsDone() bool { - return !tr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// HasStarted function check whether taskrun has valid start time set in its status -func (tr *TaskRun) HasStarted() bool { - return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() -} - -// IsSuccessful returns true if the TaskRun's status indicates that it is done. -func (tr *TaskRun) IsSuccessful() bool { - return tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() -} - -// IsCancelled returns true if the TaskRun's spec status is set to Cancelled state -func (tr *TaskRun) IsCancelled() bool { - return tr.Spec.Status == TaskRunSpecStatusCancelled -} - -// HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout -func (tr *TaskRun) HasTimedOut(c clock.PassiveClock) bool { - if tr.Status.StartTime.IsZero() { - return false - } - timeout := tr.GetTimeout() - // If timeout is set to 0 or defaulted to 0, there is no timeout. - if timeout == apisconfig.NoTimeoutDuration { - return false - } - runtime := c.Since(tr.Status.StartTime.Time) - return runtime > timeout -} - -// GetTimeout returns the timeout for the TaskRun, or the default if not specified -func (tr *TaskRun) GetTimeout() time.Duration { - // Use the platform default is no timeout is set - if tr.Spec.Timeout == nil { - return apisconfig.DefaultTimeoutMinutes * time.Minute - } - return tr.Spec.Timeout.Duration -} - -// GetRunKey return the taskrun key for timeout handler map -func (tr *TaskRun) GetRunKey() string { - // The address of the pointer is a threadsafe unique identifier for the taskrun - return fmt.Sprintf("%s/%p", "TaskRun", tr) -} - -// IsPartOfPipeline return true if TaskRun is a part of a Pipeline. -// It also return the name of Pipeline and PipelineRun -func (tr *TaskRun) IsPartOfPipeline() (bool, string, string) { - if tr == nil || len(tr.Labels) == 0 { - return false, "", "" - } - - if pl, ok := tr.Labels[pipeline.PipelineLabelKey]; ok { - return true, pl, tr.Labels[pipeline.PipelineRunLabelKey] - } - - return false, "", "" -} - -// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is -// used for creating PersistentVolumeClaims with an OwnerReference for each run -func (tr *TaskRun) HasVolumeClaimTemplate() bool { - for _, ws := range tr.Spec.Workspaces { - if ws.VolumeClaimTemplate != nil { - return true - } - } - return false -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go deleted file mode 100644 index a898c21d6d..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*TaskRun)(nil) - -// Validate taskrun -func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - if apis.IsInDelete(ctx) { - return nil - } - return tr.Spec.Validate(ctx) -} - -// Validate taskrun spec -func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ts, &TaskRunSpec{}) { - return apis.ErrMissingField("spec") - } - - // can't have both taskRef and taskSpec at the same time - if (ts.TaskRef != nil && ts.TaskRef.Name != "") && ts.TaskSpec != nil { - return apis.ErrDisallowedFields("spec.taskref", "spec.taskspec") - } - - // Check that one of TaskRef and TaskSpec is present - if (ts.TaskRef == nil || (ts.TaskRef != nil && ts.TaskRef.Name == "")) && ts.TaskSpec == nil { - return apis.ErrMissingField("spec.taskref.name", "spec.taskspec") - } - - // Validate TaskSpec if it's present - if ts.TaskSpec != nil { - if err := ts.TaskSpec.Validate(ctx); err != nil { - return err - } - } - - // Deprecated - // check for input resources - if ts.Inputs != nil { - if err := ts.Inputs.Validate(ctx, "spec.Inputs"); err != nil { - return err - } - } - - // Deprecated - // check for output resources - if ts.Outputs != nil { - if err := ts.Outputs.Validate(ctx, "spec.Outputs"); err != nil { - return err - } - } - - // Validate Resources - if err := ts.Resources.Validate(ctx); err != nil { - return err - } - - if err := validateWorkspaceBindings(ctx, ts.Workspaces); err != nil { - return err - } - if err := validateParameters("spec.inputs.params", ts.Params); err != nil { - return err - } - - if ts.Timeout != nil { - // timeout should be a valid duration of at least 0. - if ts.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "spec.timeout") - } - } - - return nil -} - -// Validate implements apis.Validatable -func (i TaskRunInputs) Validate(ctx context.Context, path string) *apis.FieldError { - if err := validatePipelineResources(ctx, i.Resources, fmt.Sprintf("%s.Resources.Name", path)); err != nil { - return err - } - return validateParameters("spec.inputs.params", i.Params) -} - -// Validate implements apis.Validatable -func (o TaskRunOutputs) Validate(ctx context.Context, path string) *apis.FieldError { - return validatePipelineResources(ctx, o.Resources, fmt.Sprintf("%s.Resources.Name", path)) -} - -// validateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. -func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) *apis.FieldError { - seen := sets.NewString() - for _, w := range wb { - if seen.Has(w.Name) { - return apis.ErrMultipleOneOf("spec.workspaces.name") - } - seen.Insert(w.Name) - - if err := w.Validate(ctx).ViaField("workspace"); err != nil { - return err - } - } - - return nil -} - -// validatePipelineResources validates that -// 1. resource is not declared more than once -// 2. if both resource reference and resource spec is defined at the same time -// 3. at least resource ref or resource spec is defined -func validatePipelineResources(ctx context.Context, resources []TaskResourceBinding, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - // We should provide only one binding for each resource required by the Task. - name := strings.ToLower(r.Name) - if encountered.Has(strings.ToLower(name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(name) - // Check that both resource ref and resource Spec are not present - if r.ResourceRef != nil && r.ResourceSpec != nil { - return apis.ErrDisallowedFields(fmt.Sprintf("%s.ResourceRef", path), fmt.Sprintf("%s.ResourceSpec", path)) - } - // Check that one of resource ref and resource Spec is present - if (r.ResourceRef == nil || r.ResourceRef.Name == "") && r.ResourceSpec == nil { - return apis.ErrMissingField(fmt.Sprintf("%s.ResourceRef", path), fmt.Sprintf("%s.ResourceSpec", path)) - } - if r.ResourceSpec != nil && r.ResourceSpec.Validate(ctx) != nil { - return r.ResourceSpec.Validate(ctx) - } - } - - return nil -} - -// TODO(jasonhall): Share this with v1beta1/taskrun_validation.go -func validateParameters(path string, params []Param) *apis.FieldError { - // Template must not duplicate parameter names. - seen := sets.NewString() - for _, p := range params { - if seen.Has(strings.ToLower(p.Name)) { - return apis.ErrMultipleOneOf(path) - } - seen.Insert(p.Name) - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go deleted file mode 100644 index 4dfd2c5a67..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// WorkspaceDeclaration is a declaration of a volume that a Task requires. -type WorkspaceDeclaration = v1beta1.WorkspaceDeclaration - -// WorkspaceBinding maps a Task's declared workspace to a Volume. -type WorkspaceBinding = v1beta1.WorkspaceBinding - -// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun -// is expected to populate with a workspace binding. -type PipelineWorkspaceDeclaration = v1beta1.PipelineWorkspaceDeclaration - -// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be -// mapped to a task's declared workspace. -type WorkspacePipelineTaskBinding = v1beta1.WorkspacePipelineTaskBinding diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index f537fa4b5b..47123e63dd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -24,608 +24,25 @@ package v1alpha1 import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTask) DeepCopyInto(out *ClusterTask) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTask. -func (in *ClusterTask) DeepCopy() *ClusterTask { - if in == nil { - return nil - } - out := new(ClusterTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTask) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTaskList) DeepCopyInto(out *ClusterTaskList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaskList. -func (in *ClusterTaskList) DeepCopy() *ClusterTaskList { - if in == nil { - return nil - } - out := new(ClusterTaskList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTaskList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Condition) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionCheck) DeepCopyInto(out *ConditionCheck) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheck. -func (in *ConditionCheck) DeepCopy() *ConditionCheck { - if in == nil { - return nil - } - out := new(ConditionCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionList) DeepCopyInto(out *ConditionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionList. -func (in *ConditionList) DeepCopy() *ConditionList { - if in == nil { - return nil - } - out := new(ConditionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConditionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionSpec) DeepCopyInto(out *ConditionSpec) { - *out = *in - in.Check.DeepCopyInto(&out.Check) - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.ParamSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]resourcev1alpha1.ResourceDeclaration, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSpec. -func (in *ConditionSpec) DeepCopy() *ConditionSpec { - if in == nil { - return nil - } - out := new(ConditionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Metadata.DeepCopyInto(&out.Metadata) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedRunSpec. -func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec { - if in == nil { - return nil - } - out := new(EmbeddedRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Inputs) DeepCopyInto(out *Inputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResource, len(*in)) - copy(*out, *in) - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.ParamSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inputs. -func (in *Inputs) DeepCopy() *Inputs { - if in == nil { - return nil - } - out := new(Inputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Outputs) DeepCopyInto(out *Outputs) { - *out = *in - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]TestResult, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Outputs. -func (in *Outputs) DeepCopy() *Outputs { - if in == nil { - return nil - } - out := new(Outputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pipeline) DeepCopyInto(out *Pipeline) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(PipelineStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. -func (in *Pipeline) DeepCopy() *Pipeline { - if in == nil { - return nil - } - out := new(Pipeline) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pipeline) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineList) DeepCopyInto(out *PipelineList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList. -func (in *PipelineList) DeepCopy() *PipelineList { - if in == nil { - return nil - } - out := new(PipelineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRun) DeepCopyInto(out *PipelineRun) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRun. -func (in *PipelineRun) DeepCopy() *PipelineRun { - if in == nil { - return nil - } - out := new(PipelineRun) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineRun) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PipelineRun, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunList. -func (in *PipelineRunList) DeepCopy() *PipelineRunList { - if in == nil { - return nil - } - out := new(PipelineRunList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineRunList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { - *out = *in - if in.PipelineRef != nil { - in, out := &in.PipelineRef, &out.PipelineRef - *out = new(v1beta1.PipelineRef) - (*in).DeepCopyInto(*out) - } - if in.PipelineSpec != nil { - in, out := &in.PipelineSpec, &out.PipelineSpec - *out = new(PipelineSpec) - (*in).DeepCopyInto(*out) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.PipelineResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ServiceAccountNames != nil { - in, out := &in.ServiceAccountNames, &out.ServiceAccountNames - *out = make([]v1beta1.PipelineRunSpecServiceAccountName, len(*in)) - copy(*out, *in) - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - if in.PodTemplate != nil { - in, out := &in.PodTemplate, &out.PodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspaceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TaskRunSpecs != nil { - in, out := &in.TaskRunSpecs, &out.TaskRunSpecs - *out = make([]PipelineTaskRunSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpec. -func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec { - if in == nil { - return nil - } - out := new(PipelineRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.PipelineDeclaredResource, len(*in)) - copy(*out, *in) - } - if in.Tasks != nil { - in, out := &in.Tasks, &out.Tasks - *out = make([]PipelineTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.ParamSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.PipelineWorkspaceDeclaration, len(*in)) - copy(*out, *in) - } - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]v1beta1.PipelineResult, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec. -func (in *PipelineSpec) DeepCopy() *PipelineSpec { - if in == nil { - return nil - } - out := new(PipelineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus. -func (in *PipelineStatus) DeepCopy() *PipelineStatus { - if in == nil { - return nil - } - out := new(PipelineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { - *out = *in - if in.TaskRef != nil { - in, out := &in.TaskRef, &out.TaskRef - *out = new(v1beta1.TaskRef) - (*in).DeepCopyInto(*out) - } - if in.TaskSpec != nil { - in, out := &in.TaskSpec, &out.TaskSpec - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1beta1.PipelineTaskCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RunAfter != nil { - in, out := &in.RunAfter, &out.RunAfter - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(v1beta1.PipelineTaskResources) - (*in).DeepCopyInto(*out) - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspacePipelineTaskBinding, len(*in)) - copy(*out, *in) - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTask. -func (in *PipelineTask) DeepCopy() *PipelineTask { - if in == nil { - return nil - } - out := new(PipelineTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PipelineTaskList) DeepCopyInto(out *PipelineTaskList) { - { - in := &in - *out = make(PipelineTaskList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskList. -func (in PipelineTaskList) DeepCopy() PipelineTaskList { - if in == nil { - return nil - } - out := new(PipelineTaskList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { +func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) { *out = *in - if in.TaskPodTemplate != nil { - in, out := &in.TaskPodTemplate, &out.TaskPodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } + out.TypeMeta = in.TypeMeta + in.Metadata.DeepCopyInto(&out.Metadata) + in.Spec.DeepCopyInto(&out.Spec) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunSpec. -func (in *PipelineTaskRunSpec) DeepCopy() *PipelineTaskRunSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedRunSpec. +func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec { if in == nil { return nil } - out := new(PipelineTaskRunSpec) + out := new(EmbeddedRunSpec) in.DeepCopyInto(out) return out } @@ -740,285 +157,3 @@ func (in *RunSpec) DeepCopy() *RunSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Task) DeepCopyInto(out *Task) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. -func (in *Task) DeepCopy() *Task { - if in == nil { - return nil - } - out := new(Task) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Task) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskList) DeepCopyInto(out *TaskList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Task, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. -func (in *TaskList) DeepCopy() *TaskList { - if in == nil { - return nil - } - out := new(TaskList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRun) DeepCopyInto(out *TaskRun) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRun. -func (in *TaskRun) DeepCopy() *TaskRun { - if in == nil { - return nil - } - out := new(TaskRun) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskRun) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunInputs. -func (in *TaskRunInputs) DeepCopy() *TaskRunInputs { - if in == nil { - return nil - } - out := new(TaskRunInputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunList) DeepCopyInto(out *TaskRunList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]TaskRun, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunList. -func (in *TaskRunList) DeepCopy() *TaskRunList { - if in == nil { - return nil - } - out := new(TaskRunList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskRunList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunOutputs) DeepCopyInto(out *TaskRunOutputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunOutputs. -func (in *TaskRunOutputs) DeepCopy() *TaskRunOutputs { - if in == nil { - return nil - } - out := new(TaskRunOutputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { - *out = *in - if in.TaskRef != nil { - in, out := &in.TaskRef, &out.TaskRef - *out = new(v1beta1.TaskRef) - (*in).DeepCopyInto(*out) - } - if in.TaskSpec != nil { - in, out := &in.TaskSpec, &out.TaskSpec - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - if in.PodTemplate != nil { - in, out := &in.PodTemplate, &out.PodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspaceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(v1beta1.TaskRunResources) - (*in).DeepCopyInto(*out) - } - if in.Inputs != nil { - in, out := &in.Inputs, &out.Inputs - *out = new(TaskRunInputs) - (*in).DeepCopyInto(*out) - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(TaskRunOutputs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSpec. -func (in *TaskRunSpec) DeepCopy() *TaskRunSpec { - if in == nil { - return nil - } - out := new(TaskRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { - *out = *in - in.TaskSpec.DeepCopyInto(&out.TaskSpec) - if in.Inputs != nil { - in, out := &in.Inputs, &out.Inputs - *out = new(Inputs) - (*in).DeepCopyInto(*out) - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. -func (in *TaskSpec) DeepCopy() *TaskSpec { - if in == nil { - return nil - } - out := new(TaskSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestResult) DeepCopyInto(out *TestResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestResult. -func (in *TestResult) DeepCopy() *TestResult { - if in == nil { - return nil - } - out := new(TestResult) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/condition_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/condition_types.go deleted file mode 100644 index 6139a33122..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/condition_types.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" - duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" -) - -// ConditionCheck represents a single evaluation of a Condition step. -type ConditionCheck TaskRun - -// NewConditionCheck creates a new ConditionCheck from a given TaskRun -func NewConditionCheck(tr *TaskRun) *ConditionCheck { - if tr == nil { - return nil - } - - cc := ConditionCheck(*tr) - return &cc -} - -// IsDone returns true if the ConditionCheck's status indicates that it is done. -func (cc *ConditionCheck) IsDone() bool { - return !cc.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// IsSuccessful returns true if the ConditionCheck's status indicates that it is done. -func (cc *ConditionCheck) IsSuccessful() bool { - return cc.Status.GetCondition(apis.ConditionSucceeded).IsTrue() -} - -// ConditionCheckStatus defines the observed state of ConditionCheck -type ConditionCheckStatus struct { - duckv1beta1.Status `json:",inline"` - - // ConditionCheckStatusFields inlines the status fields. - ConditionCheckStatusFields `json:",inline"` -} - -// ConditionCheckStatusFields holds the fields of ConfigurationCheck's status. -// This is defined separately and inlined so that other types can readily -// consume these fields via duck typing. -type ConditionCheckStatusFields struct { - // PodName is the name of the pod responsible for executing this condition check. - PodName string `json:"podName"` - - // StartTime is the time the check is actually started. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty"` - - // CompletionTime is the time the check pod completed. - // +optional - CompletionTime *metav1.Time `json:"completionTime,omitempty"` - - // Check describes the state of the check container. - // +optional - Check corev1.ContainerState `json:"check,omitempty"` -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go deleted file mode 100644 index 30af4e11e8..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" - corev1 "k8s.io/api/core/v1" -) - -// applyContainerReplacements applies variable interpolation on a Container (subset of a Step). -func applyContainerReplacements(step *corev1.Container, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Name = substitution.ApplyReplacements(step.Name, stringReplacements) - step.Image = substitution.ApplyReplacements(step.Image, stringReplacements) - step.ImagePullPolicy = corev1.PullPolicy(substitution.ApplyReplacements(string(step.ImagePullPolicy), stringReplacements)) - - // Use ApplyArrayReplacements here, as additional args may be added via an array parameter. - var newArgs []string - for _, a := range step.Args { - newArgs = append(newArgs, substitution.ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...) - } - step.Args = newArgs - - for ie, e := range step.Env { - step.Env[ie].Value = substitution.ApplyReplacements(e.Value, stringReplacements) - if step.Env[ie].ValueFrom != nil { - if e.ValueFrom.SecretKeyRef != nil { - step.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements) - } - if e.ValueFrom.ConfigMapKeyRef != nil { - step.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements) - } - } - } - - for ie, e := range step.EnvFrom { - step.EnvFrom[ie].Prefix = substitution.ApplyReplacements(e.Prefix, stringReplacements) - if e.ConfigMapRef != nil { - step.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements) - } - if e.SecretRef != nil { - step.EnvFrom[ie].SecretRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements) - } - } - step.WorkingDir = substitution.ApplyReplacements(step.WorkingDir, stringReplacements) - - // Use ApplyArrayReplacements here, as additional commands may be added via an array parameter. - var newCommand []string - for _, c := range step.Command { - newCommand = append(newCommand, substitution.ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...) - } - step.Command = newCommand - - for iv, v := range step.VolumeMounts { - step.VolumeMounts[iv].Name = substitution.ApplyReplacements(v.Name, stringReplacements) - step.VolumeMounts[iv].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements) - step.VolumeMounts[iv].SubPath = substitution.ApplyReplacements(v.SubPath, stringReplacements) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go new file mode 100644 index 0000000000..c2ffbe37bd --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go @@ -0,0 +1,752 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Step runs a subcomponent of a Task +type Step struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Deprecated. This field will be removed in a future release. + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + DeprecatedPorts []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Deprecated. This field will be removed in a future release. + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Deprecated. This field will be removed in a future release. + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Deprecated. This field will be removed in a future release. + // DeprecatedStartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Deprecated. This field will be removed in a future release. + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Deprecated. This field will be removed in a future release. + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Deprecated. This field will be removed in a future release. + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Deprecated. This field will be removed in a future release. + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Deprecated. This field will be removed in a future release. + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Deprecated. This field will be removed in a future release. + // Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script. + // +optional + Script string `json:"script,omitempty"` + + // Timeout is the time after which the step times out. Defaults to never. + // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Step wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` + + // OnError defines the exiting behavior of a container on error + // can be set to [ continue | stopAndFail ] + // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code + // continue indicates continue executing the rest of the steps irrespective of the container exit code + OnError string `json:"onError,omitempty"` + + // Stores configuration for the stdout stream of the step. + // +optional + StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"` + // Stores configuration for the stderr stream of the step. + // +optional + StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"` +} + +// StepOutputConfig stores configuration for a step output stream. +type StepOutputConfig struct { + // Path to duplicate stdout stream to on container's local filesystem. + // +optional + Path string `json:"path,omitempty"` +} + +// ToK8sContainer converts the Step to a Kubernetes Container struct +func (s *Step) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.DeprecatedPorts, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.DeprecatedLivenessProbe, + ReadinessProbe: s.DeprecatedReadinessProbe, + StartupProbe: s.DeprecatedStartupProbe, + Lifecycle: s.DeprecatedLifecycle, + TerminationMessagePath: s.DeprecatedTerminationMessagePath, + TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.DeprecatedStdin, + StdinOnce: s.DeprecatedStdinOnce, + TTY: s.DeprecatedTTY, + } +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *Step) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.DeprecatedPorts = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.DeprecatedLivenessProbe = c.LivenessProbe + s.DeprecatedReadinessProbe = c.ReadinessProbe + s.DeprecatedStartupProbe = c.StartupProbe + s.DeprecatedLifecycle = c.Lifecycle + s.DeprecatedTerminationMessagePath = c.TerminationMessagePath + s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.DeprecatedStdin = c.Stdin + s.DeprecatedStdinOnce = c.StdinOnce + s.DeprecatedTTY = c.TTY +} + +// StepTemplate is a template for a Step +type StepTemplate struct { + + // Deprecated. This field will be removed in a future release. + // DeprecatedName of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + DeprecatedName string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Deprecated. This field will be removed in a future release. + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + DeprecatedPorts []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Deprecated. This field will be removed in a future release. + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedLivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Deprecated. This field will be removed in a future release. + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Deprecated. This field will be removed in a future release. + // DeprecatedStartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + DeprecatedStartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Deprecated. This field will be removed in a future release. + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + DeprecatedLifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Deprecated. This field will be removed in a future release. + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + DeprecatedTerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Deprecated. This field will be removed in a future release. + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + DeprecatedTerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Deprecated. This field will be removed in a future release. + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + DeprecatedStdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Deprecated. This field will be removed in a future release. + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + DeprecatedStdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Deprecated. This field will be removed in a future release. + // Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + DeprecatedTTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *StepTemplate) SetContainerFields(c corev1.Container) { + s.DeprecatedName = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.DeprecatedPorts = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.DeprecatedLivenessProbe = c.LivenessProbe + s.DeprecatedReadinessProbe = c.ReadinessProbe + s.DeprecatedStartupProbe = c.StartupProbe + s.DeprecatedLifecycle = c.Lifecycle + s.DeprecatedTerminationMessagePath = c.TerminationMessagePath + s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.DeprecatedStdin = c.Stdin + s.DeprecatedStdinOnce = c.StdinOnce + s.DeprecatedTTY = c.TTY +} + +// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct +func (s *StepTemplate) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.DeprecatedName, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.DeprecatedPorts, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.DeprecatedLivenessProbe, + ReadinessProbe: s.DeprecatedReadinessProbe, + StartupProbe: s.DeprecatedStartupProbe, + Lifecycle: s.DeprecatedLifecycle, + TerminationMessagePath: s.DeprecatedTerminationMessagePath, + TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.DeprecatedStdin, + StdinOnce: s.DeprecatedStdinOnce, + TTY: s.DeprecatedTTY, + } +} + +// Sidecar has nearly the same data structure as Step but does not have the ability to timeout. +type Sidecar struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command or Args. + // +optional + Script string `json:"script,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Sidecar wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` +} + +// ToK8sContainer converts the Sidecar to a Kubernetes Container struct +func (s *Sidecar) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.Ports, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.LivenessProbe, + ReadinessProbe: s.ReadinessProbe, + StartupProbe: s.StartupProbe, + Lifecycle: s.Lifecycle, + TerminationMessagePath: s.TerminationMessagePath, + TerminationMessagePolicy: s.TerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.Stdin, + StdinOnce: s.StdinOnce, + TTY: s.TTY, + } +} + +// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container +func (s *Sidecar) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.Ports = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.LivenessProbe = c.LivenessProbe + s.ReadinessProbe = c.ReadinessProbe + s.StartupProbe = c.StartupProbe + s.Lifecycle = c.Lifecycle + s.TerminationMessagePath = c.TerminationMessagePath + s.TerminationMessagePolicy = c.TerminationMessagePolicy + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.Stdin = c.Stdin + s.StdinOnce = c.StdinOnce + s.TTY = c.TTY +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go index f200c99e3e..335a43f777 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go @@ -19,6 +19,7 @@ package v1beta1 import ( "encoding/json" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/strategicpatch" ) @@ -35,19 +36,19 @@ type mergeData struct { // MergeStepsWithStepTemplate takes a possibly nil container template and a // list of steps, merging each of the steps with the container template, if // it's not nil, and returning the resulting list. -func MergeStepsWithStepTemplate(template *v1.Container, steps []Step) ([]Step, error) { +func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) { if template == nil { return steps, nil } - md, err := getMergeData(template, &v1.Container{}) + md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{}) if err != nil { return nil, err } for i, s := range steps { - merged := v1.Container{} - err := mergeObjWithTemplateBytes(md, &s.Container, &merged) + merged := corev1.Container{} + err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged) if err != nil { return nil, err } @@ -58,7 +59,9 @@ func MergeStepsWithStepTemplate(template *v1.Container, steps []Step) ([]Step, e } // Pass through original step Script, for later conversion. - steps[i] = Step{Container: merged, Script: s.Script, OnError: s.OnError, Timeout: s.Timeout} + newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig} + newStep.SetContainerFields(merged) + steps[i] = newStep } return steps, nil } @@ -81,7 +84,7 @@ func MergeStepsWithOverrides(steps []Step, overrides []TaskRunStepOverride) ([]S if err != nil { return nil, err } - steps[i].Container.Resources = merged + steps[i].Resources = merged } return steps, nil } @@ -107,7 +110,7 @@ func MergeSidecarsWithOverrides(sidecars []Sidecar, overrides []TaskRunSidecarOv if err != nil { return nil, err } - sidecars[i].Container.Resources = merged + sidecars[i].Resources = merged } return sidecars, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index 3f55b20243..64b1040f24 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -30,92 +30,88 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString": schema_pkg_apis_pipeline_v1beta1_ArrayOrString(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference": schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery": schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState": schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTask": schema_pkg_apis_pipeline_v1beta1_ClusterTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTaskList": schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheck": schema_pkg_apis_pipeline_v1beta1_ConditionCheck(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatus": schema_pkg_apis_pipeline_v1beta1_ConditionCheckStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatusFields": schema_pkg_apis_pipeline_v1beta1_ConditionCheckStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec": schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline": schema_pkg_apis_pipeline_v1beta1_Pipeline(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource": schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineList": schema_pkg_apis_pipeline_v1beta1_PipelineList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult": schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunChildConditionCheckStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunChildConditionCheckStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunConditionCheckStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunConditionCheckStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult": schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpecServiceAccountName": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpecServiceAccountName(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec": schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask": schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskCondition": schema_pkg_apis_pipeline_v1beta1_PipelineTaskCondition(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskParam": schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources": schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRun": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverParam": schema_pkg_apis_pipeline_v1beta1_ResolverParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskList": schema_pkg_apis_pipeline_v1beta1_TaskList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef": schema_pkg_apis_pipeline_v1beta1_TaskRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource": schema_pkg_apis_pipeline_v1beta1_TaskResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding": schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources": schema_pkg_apis_pipeline_v1beta1_TaskResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult": schema_pkg_apis_pipeline_v1beta1_TaskResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun": schema_pkg_apis_pipeline_v1beta1_TaskRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug": schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunInputs": schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunList": schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunOutputs": schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources": schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult": schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec": schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus": schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec": schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields": schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression": schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding": schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage": schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource": schema_pkg_apis_resource_v1alpha1_PipelineResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceList": schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec": schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus": schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceDeclaration": schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam": schema_pkg_apis_resource_v1alpha1_ResourceParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam": schema_pkg_apis_resource_v1alpha1_SecretParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString": schema_pkg_apis_pipeline_v1beta1_ArrayOrString(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference": schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery": schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState": schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTask": schema_pkg_apis_pipeline_v1beta1_ClusterTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTaskList": schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec": schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline": schema_pkg_apis_pipeline_v1beta1_Pipeline(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource": schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineList": schema_pkg_apis_pipeline_v1beta1_PipelineList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult": schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult": schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec": schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask": schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskParam": schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources": schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRun": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec": schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverParam": schema_pkg_apis_pipeline_v1beta1_ResolverParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskList": schema_pkg_apis_pipeline_v1beta1_TaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef": schema_pkg_apis_pipeline_v1beta1_TaskRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource": schema_pkg_apis_pipeline_v1beta1_TaskResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding": schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources": schema_pkg_apis_pipeline_v1beta1_TaskResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult": schema_pkg_apis_pipeline_v1beta1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun": schema_pkg_apis_pipeline_v1beta1_TaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug": schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunInputs": schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunList": schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunOutputs": schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources": schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult": schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec": schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus": schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec": schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields": schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression": schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding": schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage": schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource": schema_pkg_apis_resource_v1alpha1_PipelineResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceList": schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec": schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus": schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceDeclaration": schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam": schema_pkg_apis_resource_v1alpha1_ResourceParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam": schema_pkg_apis_resource_v1alpha1_SecretParam(ref), } } @@ -368,7 +364,7 @@ func schema_pkg_apis_pipeline_v1beta1_ArrayOrString(ref common.ReferenceCallback return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings.", + Description: "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "type": { @@ -405,8 +401,23 @@ func schema_pkg_apis_pipeline_v1beta1_ArrayOrString(ref common.ReferenceCallback }, }, }, + "objectVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, - Required: []string{"type", "stringVal", "arrayVal"}, + Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, }, }, } @@ -445,24 +456,6 @@ func schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref common.ReferenceC Format: "", }, }, - "conditionChecks": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "ConditionChecks is the the list of condition checks, including their names and statuses, for the PipelineTask. Deprecated: This field will be removed when conditions are removed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunChildConditionCheckStatus"), - }, - }, - }, - }, - }, "whenExpressions": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -486,7 +479,7 @@ func schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref common.ReferenceC }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunChildConditionCheckStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, } } @@ -654,182 +647,6 @@ func schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref common.ReferenceCallba } } -func schema_pkg_apis_pipeline_v1beta1_ConditionCheck(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConditionCheck represents a single evaluation of a Condition step.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_ConditionCheckStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConditionCheckStatus defines the observed state of ConditionCheck", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "observedGeneration": { - SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Conditions the latest available observations of a resource's current state.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("knative.dev/pkg/apis.Condition"), - }, - }, - }, - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "podName": { - SchemaProps: spec.SchemaProps{ - Description: "PodName is the name of the pod responsible for executing this condition check.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "startTime": { - SchemaProps: spec.SchemaProps{ - Description: "StartTime is the time the check is actually started.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "completionTime": { - SchemaProps: spec.SchemaProps{ - Description: "CompletionTime is the time the check pod completed.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "check": { - SchemaProps: spec.SchemaProps{ - Description: "Check describes the state of the check container.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerState"), - }, - }, - }, - Required: []string{"podName"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerState", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_ConditionCheckStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConditionCheckStatusFields holds the fields of ConfigurationCheck's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "podName": { - SchemaProps: spec.SchemaProps{ - Description: "PodName is the name of the pod responsible for executing this condition check.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "startTime": { - SchemaProps: spec.SchemaProps{ - Description: "StartTime is the time the check is actually started.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "completionTime": { - SchemaProps: spec.SchemaProps{ - Description: "CompletionTime is the time the check pod completed.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "check": { - SchemaProps: spec.SchemaProps{ - Description: "Check describes the state of the check container.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerState"), - }, - }, - }, - Required: []string{"podName"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerState", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -935,7 +752,7 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) "stepTemplate": { SchemaProps: spec.SchemaProps{ Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate"), }, }, "sidecars": { @@ -999,7 +816,7 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -1119,7 +936,7 @@ func schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref common.ReferenceCallback) co }, "type": { SchemaProps: spec.SchemaProps{ - Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\" and \"array\", and \"string\" is the default.", + Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", Type: []string{"string"}, Format: "", }, @@ -1131,6 +948,21 @@ func schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref common.ReferenceCallback) co Format: "", }, }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs parameter.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"), + }, + }, + }, + }, + }, "default": { SchemaProps: spec.SchemaProps{ Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", @@ -1142,7 +974,7 @@ func schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref common.ReferenceCallback) co }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"}, } } @@ -1278,7 +1110,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRef can be used to refer to a specific instance of a Pipeline. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + Description: "PipelineRef can be used to refer to a specific instance of a Pipeline.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -1396,12 +1228,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref common.Referenc Format: "", }, }, - "resourceRef": { - SchemaProps: spec.SchemaProps{ - Description: "The field ResourceRef should be deprecated and removed in the next API version. See https://github.com/tektoncd/pipeline/issues/2694 for more information.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef"), - }, - }, "type": { SchemaProps: spec.SchemaProps{ Type: []string{"integer"}, @@ -1412,8 +1238,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref common.Referenc Required: []string{"key", "value"}, }, }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef"}, } } @@ -1432,6 +1256,13 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallbac Format: "", }, }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a human-readable description of the result", @@ -1443,15 +1274,16 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallbac "value": { SchemaProps: spec.SchemaProps{ Description: "Value the expression used to retrieve the value", - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"), }, }, }, Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, } } @@ -1502,143 +1334,82 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref common.ReferenceCallback) } } -func schema_pkg_apis_pipeline_v1beta1_PipelineRunChildConditionCheckStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRunChildConditionCheckStatus is used to record the status of condition checks within StatusChildReferences.", + Description: "PipelineRunList contains a list of PipelineRun", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "conditionName": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "ConditionName is the name of the Condition", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "status": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Status is the ConditionCheckStatus for the corresponding ConditionCheck", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatus"), + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "conditionCheckName": { + "metadata": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun"), + }, + }, + }, }, }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatus"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_pkg_apis_pipeline_v1beta1_PipelineRunConditionCheckStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRunConditionCheckStatus returns the condition check status", + Description: "PipelineRunResult used to describe the results of a pipeline", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "conditionName": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "ConditionName is the name of the Condition", + Description: "Name is the result's name as declared by the Pipeline", + Default: "", Type: []string{"string"}, Format: "", }, }, - "status": { + "value": { SchemaProps: spec.SchemaProps{ - Description: "Status is the ConditionCheckStatus for the corresponding ConditionCheck", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConditionCheckStatus"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineRunList contains a list of PipelineRun", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineRunResult used to describe the results of a pipeline", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the result's name as declared by the Pipeline", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the result returned from the execution of this PipelineRun", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "Value is the result returned from the execution of this PipelineRun", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"), }, }, }, Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, } } @@ -1750,25 +1521,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallba Format: "", }, }, - "serviceAccountNames": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: use taskRunSpecs.ServiceAccountName instead", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpecServiceAccountName"), - }, - }, - }, - }, - }, "status": { SchemaProps: spec.SchemaProps{ Description: "Used for cancelling a pipelinerun (and maybe more later on)", @@ -1778,13 +1530,13 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallba }, "timeouts": { SchemaProps: spec.SchemaProps{ - Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nTime after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally", + Description: "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields"), }, }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -1836,32 +1588,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpecServiceAccountName", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_PipelineRunSpecServiceAccountName(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineRunSpecServiceAccountName can be used to configure specific ServiceAccountName for a concrete Task", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "taskName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } @@ -2164,20 +1891,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref common.Refere Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus"), }, }, - "conditionChecks": { - SchemaProps: spec.SchemaProps{ - Description: "ConditionChecks maps the name of a condition check to its Status", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunConditionCheckStatus"), - }, - }, - }, - }, - }, "whenExpressions": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -2201,7 +1914,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref common.Refere }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunConditionCheckStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, } } @@ -2367,25 +2080,6 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask"), }, }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Conditions is a list of conditions that need to be true for the task to run Conditions are deprecated, use WhenExpressions instead", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskCondition"), - }, - }, - }, - }, - }, "when": { SchemaProps: spec.SchemaProps{ Description: "WhenExpressions is a list of when expressions that need to be true for the task to run", @@ -2500,69 +2194,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskCondition", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_PipelineTaskCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskCondition allows a PipelineTask to declare a Condition to be evaluated before the Task is run.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "conditionRef": { - SchemaProps: spec.SchemaProps{ - Description: "ConditionRef is the name of the Condition to use for the conditionCheck", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "params": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Params declare parameters passed to this Condition", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), - }, - }, - }, - }, - }, - "resources": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Resources declare the resources provided to this Condition as input", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource"), - }, - }, - }, - }, - }, - }, - Required: []string{"conditionRef"}, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } @@ -2849,11 +2481,22 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref common.ReferenceCa }, }, }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"), + }, + }, + "computeResources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "k8s.io/api/core/v1.ResourceRequirements"}, } } @@ -2893,6 +2536,25 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref common.Re } } +func schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PropertySpec defines the struct for object keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_pipeline_v1beta1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2985,8 +2647,22 @@ func schema_pkg_apis_pipeline_v1beta1_ResultRef(ref common.ReferenceCallback) co Format: "", }, }, + "resultsIndex": { + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + "property": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"pipelineTask", "result"}, + Required: []string{"pipelineTask", "result", "resultsIndex", "property"}, }, }, } @@ -2996,7 +2672,7 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Sidecar has nearly the same data structure as Step, consisting of a Container and an optional Script, but does not have the ability to timeout.", + Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -3015,6 +2691,11 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, }, "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -3030,6 +2711,11 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, }, "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -3077,6 +2763,11 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, }, "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", Type: []string{"array"}, @@ -3093,6 +2784,7 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm "env": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, @@ -3120,6 +2812,7 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge", }, @@ -3140,6 +2833,7 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm "volumeDevices": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge", }, @@ -3190,7 +2884,399 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, "terminationMessagePolicy": { SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Type: []string{"string"}, + Format: "", + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_SidecarState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SidecarState reports the results of running a sidecar in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a waiting container", + Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), + }, + }, + "running": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a running container", + Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), + }, + }, + "terminated": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a terminated container", + Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "imageID": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the Pipeline Task name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Reason is the cause of the PipelineTask being skipped.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "whenExpressions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"), + }, + }, + }, + }, + }, + }, + Required: []string{"name", "reason"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Step runs a subcomponent of a Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. This field will be removed in a future release. Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", Type: []string{"string"}, Format: "", }, @@ -3210,32 +3296,38 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, "stdin": { SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", Type: []string{"boolean"}, Format: "", }, }, "stdinOnce": { SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Description: "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", Type: []string{"boolean"}, Format: "", }, }, "tty": { SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", Type: []string{"boolean"}, Format: "", }, }, "script": { SchemaProps: spec.SchemaProps{ - Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", Type: []string{"string"}, Format: "", }, }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, "workspaces": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -3243,7 +3335,7 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, }, SchemaProps: spec.SchemaProps{ - Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3255,20 +3347,59 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm }, }, }, + "onError": { + SchemaProps: spec.SchemaProps{ + Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + Type: []string{"string"}, + Format: "", + }, + }, + "stdoutConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stdout stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"), + }, + }, + "stderrConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stderr stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } -func schema_pkg_apis_pipeline_v1beta1_SidecarState(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SidecarState reports the results of running a sidecar in a Task.", + Description: "StepOutputConfig stores configuration for a step output stream.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to duplicate stdout stream to on container's local filesystem.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1beta1_StepState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepState reports the results of running a step in a Task.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "waiting": { @@ -3315,59 +3446,16 @@ func schema_pkg_apis_pipeline_v1beta1_SidecarState(ref common.ReferenceCallback) } } -func schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the Pipeline Task name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "whenExpressions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"), - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Step embeds the Container type, which allows it to include fields not provided by Container.", + Description: "StepTemplate is a template for a Step", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Description: "Deprecated. This field will be removed in a future release. DeprecatedName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", Default: "", Type: []string{"string"}, Format: "", @@ -3381,6 +3469,11 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, }, "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -3396,6 +3489,11 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, }, "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", Type: []string{"array"}, @@ -3430,7 +3528,7 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, }, SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Description: "Deprecated. This field will be removed in a future release. List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3443,6 +3541,11 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, }, "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", Type: []string{"array"}, @@ -3459,6 +3562,7 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. "env": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, @@ -3486,6 +3590,7 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge", }, @@ -3506,6 +3611,7 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. "volumeDevices": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge", }, @@ -3525,38 +3631,38 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, "livenessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "readinessProbe": { SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "startupProbe": { SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Description: "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", Ref: ref("k8s.io/api/core/v1.Probe"), }, }, "lifecycle": { SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Description: "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", Ref: ref("k8s.io/api/core/v1.Lifecycle"), }, }, "terminationMessagePath": { SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Description: "Deprecated. This field will be removed in a future release. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", Type: []string{"string"}, Format: "", }, }, "terminationMessagePolicy": { SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Description: "Deprecated. This field will be removed in a future release. Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", Type: []string{"string"}, Format: "", }, @@ -3576,121 +3682,31 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. }, "stdin": { SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", Type: []string{"boolean"}, Format: "", }, }, "stdinOnce": { SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Description: "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", Type: []string{"boolean"}, Format: "", }, }, "tty": { SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Description: "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", Type: []string{"boolean"}, Format: "", }, }, - "script": { - SchemaProps: spec.SchemaProps{ - Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", - Type: []string{"string"}, - Format: "", - }, - }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "workspaces": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage"), - }, - }, - }, - }, - }, - "onError": { - SchemaProps: spec.SchemaProps{ - Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - } -} - -func schema_pkg_apis_pipeline_v1beta1_StepState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "StepState reports the results of running a step in a Task.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a waiting container", - Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), - }, - }, - "running": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a running container", - Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), - }, - }, - "terminated": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a terminated container", - Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "imageID": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, } } @@ -3789,7 +3805,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRef(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRef can be used to refer to a specific instance of a task. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + Description: "TaskRef can be used to refer to a specific instance of a task.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -3999,10 +4015,31 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResult(ref common.ReferenceCallback) c Format: "", }, }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"), + }, + }, + }, + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a human-readable description of the result", - Default: "", Type: []string{"string"}, Format: "", }, @@ -4011,6 +4048,8 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResult(ref common.ReferenceCallback) c Required: []string{"name"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"}, } } @@ -4294,18 +4333,26 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref common.ReferenceCallback Format: "", }, }, - "value": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Value the given value of the result", - Default: "", + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", Type: []string{"string"}, Format: "", }, }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value the given value of the result", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"), + }, + }, }, Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, } } @@ -4469,11 +4516,17 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) }, }, }, + "computeResources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } @@ -4943,7 +4996,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref common.ReferenceCallback) com "stepTemplate": { SchemaProps: spec.SchemaProps{ Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate"), }, }, "sidecars": { @@ -5007,7 +5060,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref common.ReferenceCallback) com }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.Volume"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"}, } } @@ -5146,12 +5199,24 @@ func schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref common.ReferenceCallb Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), }, }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "Projected represents a projected volume that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, } } @@ -5223,7 +5288,6 @@ func schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref common.Re "workspace": { SchemaProps: spec.SchemaProps{ Description: "Workspace is the name of the workspace declared by the pipeline", - Default: "", Type: []string{"string"}, Format: "", }, @@ -5236,7 +5300,7 @@ func schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref common.Re }, }, }, - Required: []string{"name", "workspace"}, + Required: []string{"name"}, }, }, } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_context.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_context.go deleted file mode 100644 index a2d3882f31..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_context.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2021 The Tekton Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1beta1 - -import ( - "context" - "fmt" -) - -// paramCtxKey is the unique type for referencing param information from -// a context.Context. See [context.Context.Value](https://pkg.go.dev/context#Context) -// for more details. -// -// +k8s:openapi-gen=false -type paramCtxKeyType struct{} - -var ( - paramCtxKey = paramCtxKeyType{} -) - -// paramCtxVal is the data type stored in the param context. -// This maps param names -> ParamSpec. -type paramCtxVal map[string]ParamSpec - -// addContextParams adds the given Params to the param context. This only -// preserves the fields included in ParamSpec - Name and Type. -func addContextParams(ctx context.Context, in []Param) context.Context { - if in == nil { - return ctx - } - - out := paramCtxVal{} - // Copy map to ensure that contexts are unique. - v := ctx.Value(paramCtxKey) - if v != nil { - for n, cps := range v.(paramCtxVal) { - out[n] = cps - } - } - for _, p := range in { - // The user may have omitted type data. Fill this in in to normalize data. - if v := p.Value; v.Type == "" { - if len(v.ArrayVal) > 0 { - p.Value.Type = ParamTypeArray - } - if v.StringVal != "" { - p.Value.Type = ParamTypeString - } - } - out[p.Name] = ParamSpec{ - Name: p.Name, - Type: p.Value.Type, - } - } - return context.WithValue(ctx, paramCtxKey, out) -} - -// addContextParamSpec adds the given ParamSpecs to the param context. -func addContextParamSpec(ctx context.Context, in []ParamSpec) context.Context { - if in == nil { - return ctx - } - - out := paramCtxVal{} - // Copy map to ensure that contexts are unique. - v := ctx.Value(paramCtxKey) - if v != nil { - for n, ps := range v.(paramCtxVal) { - out[n] = ps - } - } - for _, p := range in { - cps := ParamSpec{ - Name: p.Name, - Type: p.Type, - Description: p.Description, - Default: p.Default, - } - out[p.Name] = cps - } - return context.WithValue(ctx, paramCtxKey, out) -} - -// getContextParams returns the current context parameters overlayed with a -// given set of params. Overrides should generally be the current layer you -// are trying to evaluate. Any context params not in the overrides will default -// to a generic pass-through param of the given type (i.e. $(params.name) or -// $(params.name[*])). -func getContextParams(ctx context.Context, overlays ...Param) []Param { - pv := paramCtxVal{} - v := ctx.Value(paramCtxKey) - if v == nil && len(overlays) == 0 { - return nil - } - if v != nil { - pv = v.(paramCtxVal) - } - out := make([]Param, 0, len(pv)) - - // Overlays take precedence over any context params. Keep track of - // these and automatically add them to the output. - overrideSet := make(map[string]Param, len(overlays)) - for _, p := range overlays { - overrideSet[p.Name] = p - out = append(out, p) - } - - // Include the rest of the context params. - for _, ps := range pv { - // Don't do anything for any overlay params - these are already - // included. - if _, ok := overrideSet[ps.Name]; ok { - continue - } - - // If there is no overlay, pass through the param to the next level. - // e.g. for strings $(params.name), for arrays $(params.name[*]). - p := Param{ - Name: ps.Name, - } - if ps.Type == ParamTypeString { - p.Value = ArrayOrString{ - Type: ParamTypeString, - StringVal: fmt.Sprintf("$(params.%s)", ps.Name), - } - } else { - p.Value = ArrayOrString{ - Type: ParamTypeArray, - ArrayVal: []string{fmt.Sprintf("$(params.%s[*])", ps.Name)}, - } - } - out = append(out, p) - } - - return out -} - -// getContextParamSpecs returns the current context ParamSpecs. -func getContextParamSpecs(ctx context.Context) []ParamSpec { - v := ctx.Value(paramCtxKey) - if v == nil { - return nil - } - - pv := v.(paramCtxVal) - out := make([]ParamSpec, 0, len(pv)) - for _, ps := range pv { - out = append(out, ParamSpec{ - Name: ps.Name, - Type: ps.Type, - Description: ps.Description, - Default: ps.Default, - }) - } - return out -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 4e863ffc05..23e71a52ef 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -38,13 +38,16 @@ type ParamSpec struct { // Name declares the name by which a parameter is referenced. Name string `json:"name"` // Type is the user-specified type of the parameter. The possible types - // are currently "string" and "array", and "string" is the default. + // are currently "string", "array" and "object", and "string" is the default. // +optional Type ParamType `json:"type,omitempty"` // Description is a user-facing description of the parameter that may be // used to populate a UI. // +optional Description string `json:"description,omitempty"` + // Properties is the JSON Schema properties to support key-value pairs parameter. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` // Default is the value a parameter takes if no input value is supplied. If // default is set, a Task may be executed without a supplied value for the // parameter. @@ -52,25 +55,47 @@ type ParamSpec struct { Default *ArrayOrString `json:"default,omitempty"` } +// PropertySpec defines the struct for object keys +type PropertySpec struct { + Type ParamType `json:"type,omitempty"` +} + // SetDefaults set the default type -func (pp *ParamSpec) SetDefaults(ctx context.Context) { - if pp != nil && pp.Type == "" { - if pp.Default != nil { - // propagate the parsed ArrayOrString's type to the parent ParamSpec's type - if pp.Default.Type != "" { - // propagate the default type if specified - pp.Type = pp.Default.Type - } else { - // determine the type based on the array or string values when default value is specified but not the type - if pp.Default.ArrayVal != nil { - pp.Type = ParamTypeArray - } else { - pp.Type = ParamTypeString - } - } - } else { - // ParamTypeString is the default value (when no type can be inferred from the default value) - pp.Type = ParamTypeString +func (pp *ParamSpec) SetDefaults(context.Context) { + if pp == nil { + return + } + + // Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type + // The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default + // If neither `properties` or `default` section is provided, ParamTypeString will be the default type. + switch { + case pp.Type != "": + // If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided. + pp.setDefaultsForProperties() + case pp.Properties != nil: + pp.Type = ParamTypeObject + // Also set default type for PropertySpec + pp.setDefaultsForProperties() + case pp.Default == nil: + // ParamTypeString is the default value (when no type can be inferred from the default value) + pp.Type = ParamTypeString + case pp.Default.Type != "": + pp.Type = pp.Default.Type + case pp.Default.ArrayVal != nil: + pp.Type = ParamTypeArray + case pp.Default.ObjectVal != nil: + pp.Type = ParamTypeObject + default: + pp.Type = ParamTypeString + } +} + +// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified +func (pp *ParamSpec) setDefaultsForProperties() { + for key, propertySpec := range pp.Properties { + if propertySpec.Type == "" { + pp.Properties[key] = PropertySpec{Type: ParamTypeString} } } } @@ -93,31 +118,66 @@ type ParamType string const ( ParamTypeString ParamType = "string" ParamTypeArray ParamType = "array" + ParamTypeObject ParamType = "object" ) // AllParamTypes can be used for ParamType validation. -var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray} +var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject} // ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: // ArrayOrString is a type that can hold a single string or string array. // Used in JSON unmarshalling so that a single JSON field can accept // either an individual string or an array of strings. +// TODO (@chuangw6): This struct will be renamed or be embedded in a new struct to take into +// consideration the object case after the community reaches an agreement on it. type ArrayOrString struct { Type ParamType `json:"type"` // Represents the stored type of ArrayOrString. StringVal string `json:"stringVal"` // +listType=atomic - ArrayVal []string `json:"arrayVal"` + ArrayVal []string `json:"arrayVal"` + ObjectVal map[string]string `json:"objectVal"` } // UnmarshalJSON implements the json.Unmarshaller interface. func (arrayOrString *ArrayOrString) UnmarshalJSON(value []byte) error { - if value[0] == '"' { + // ArrayOrString is used for Results Value as well, the results can be any kind of + // data so we need to check if it is empty. + if len(value) == 0 { arrayOrString.Type = ParamTypeString - return json.Unmarshal(value, &arrayOrString.StringVal) + return nil + } + if value[0] == '[' { + // We're trying to Unmarshal to []string, but for cases like []int or other types + // of nested array which we don't support yet, we should continue and Unmarshal + // it to String. If the Type being set doesn't match what it actually should be, + // it will be captured by validation in reconciler. + // if failed to unmarshal to array, we will convert the value to string and marshal it to string + var a []string + if err := json.Unmarshal(value, &a); err == nil { + arrayOrString.Type = ParamTypeArray + arrayOrString.ArrayVal = a + return nil + } } - arrayOrString.Type = ParamTypeArray - return json.Unmarshal(value, &arrayOrString.ArrayVal) + if value[0] == '{' { + // if failed to unmarshal to map, we will convert the value to string and marshal it to string + var m map[string]string + if err := json.Unmarshal(value, &m); err == nil { + arrayOrString.Type = ParamTypeObject + arrayOrString.ObjectVal = m + return nil + } + } + + // By default we unmarshal to string + arrayOrString.Type = ParamTypeString + if err := json.Unmarshal(value, &arrayOrString.StringVal); err == nil { + return nil + } + arrayOrString.StringVal = string(value) + + return nil } // MarshalJSON implements the json.Marshaller interface. @@ -127,21 +187,66 @@ func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { return json.Marshal(arrayOrString.StringVal) case ParamTypeArray: return json.Marshal(arrayOrString.ArrayVal) + case ParamTypeObject: + return json.Marshal(arrayOrString.ObjectVal) default: return []byte{}, fmt.Errorf("impossible ArrayOrString.Type: %q", arrayOrString.Type) } } // ApplyReplacements applyes replacements for ArrayOrString type -func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string) { - if arrayOrString.Type == ParamTypeString { - arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) - } else { - var newArrayVal []string +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + switch arrayOrString.Type { + case ParamTypeArray: + newArrayVal := []string{} for _, v := range arrayOrString.ArrayVal { newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) } arrayOrString.ArrayVal = newArrayVal + case ParamTypeObject: + newObjectVal := map[string]string{} + for k, v := range arrayOrString.ObjectVal { + newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) + } + arrayOrString.ObjectVal = newObjectVal + default: + arrayOrString.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) + } +} + +// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result. +// If the value of arrayOrString is a reference to array or object, the type will be corrected from string to array/object. +func (arrayOrString *ArrayOrString) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + stringVal := arrayOrString.StringVal + + // if the stringVal is a string literal or a string that mixed with var references + // just do the normal string replacement + if !exactVariableSubstitutionRegex.MatchString(stringVal) { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + return + } + + // trim the head "$(" and the tail ")" or "[*])" + // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" + trimedStringVal := substitution.StripStarVarSubExpression(stringVal) + + // if the stringVal is a reference to a string param + if _, ok := stringReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } + + // if the stringVal is a reference to an array param, we need to change the type other than apply replacement + if _, ok := arrayReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements) + arrayOrString.Type = ParamTypeArray + } + + // if the stringVal is a reference an object param, we need to change the type other than apply replacement + if _, ok := objectReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ObjectVal = objectReplacements[trimedStringVal] + arrayOrString.Type = ParamTypeObject } } @@ -160,29 +265,46 @@ func NewArrayOrString(value string, values ...string) *ArrayOrString { } } +// NewObject creates an ArrayOrString of type ParamTypeObject using the provided key-value pairs +func NewObject(pairs map[string]string) *ArrayOrString { + return &ArrayOrString{ + Type: ParamTypeObject, + ObjectVal: pairs, + } +} + // ArrayReference returns the name of the parameter from array parameter reference // returns arrayParam from $(params.arrayParam[*]) func ArrayReference(a string) string { return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])") } -func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +// validatePipelineParametersVariablesInTaskParameters validates param value that +// may contain the reference(s) to other params to make sure those references are used appropriately. +func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for _, param := range params { - if param.Value.Type == ParamTypeString { - errs = errs.Also(validateStringVariable(param.Value.StringVal, prefix, paramNames, arrayParamNames).ViaFieldKey("params", param.Name)) - } else { + switch param.Value.Type { + case ParamTypeArray: for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name)) + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name)) + } + case ParamTypeObject: + for key, val := range param.Value.ObjectVal { + errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name)) } + default: + errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys)) } } return errs } -func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +// validatePipelineParametersVariablesInMatrixParameters validates matrix param value +// that may contain the reference(s) to other params to make sure those references are used appropriately. +func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for _, param := range matrix { for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) } } return errs @@ -193,10 +315,6 @@ func validateParametersInTaskMatrix(matrix []Param) (errs *apis.FieldError) { if param.Value.Type != ParamTypeArray { errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) } - // results are not yet allowed in parameters in a matrix - dynamic fanning out will be supported in future milestone - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok && LooksLikeContainsResultRefs(expressions) { - return errs.Also(apis.ErrInvalidValue("result references are not allowed in parameters in a matrix", "value").ViaFieldKey("matrix", param.Name)) - } } return errs } @@ -214,12 +332,42 @@ func validateParameterInOneOfMatrixOrParams(matrix []Param, params []Param) (err return errs } -func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { +// validateParamStringValue validates the param value field of string type +// that may contain references to other isolated array/object params other than string param. +func validateParamStringValue(param Param, prefix string, paramNames sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + stringValue := param.Value.StringVal + + // if the provided param value is an isolated reference to the whole array/object, we just check if the param name exists. + isIsolated, errs := substitution.ValidateWholeArrayOrObjectRefInStringVariable(param.Name, stringValue, prefix, paramNames) + if isIsolated { + return errs + } + + // if the provided param value is string literal and/or contains multiple variables + // valid example: "$(params.myString) and another $(params.myObject.key1)" + // invalid example: "$(params.myString) and another $(params.myObject[*])" + return validateStringVariable(stringValue, prefix, paramNames, arrayVars, objectParamNameKeys).ViaFieldKey("params", param.Name) +} + +// validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param +func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) } -func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { +func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) } + +func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + objectNames := sets.NewString() + for objectParamName, keys := range objectParamNameKeys { + objectNames.Insert(objectParamName) + errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + } + + return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go index af9cf387c9..a7463ae53e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go @@ -58,32 +58,3 @@ func (ps *PipelineSpec) SetDefaults(ctx context.Context) { } } } - -// applyImplicitParams propagates implicit params from the parent context -// through the Pipeline and underlying specs. -func (ps *PipelineSpec) applyImplicitParams(ctx context.Context) { - ctx = addContextParamSpec(ctx, ps.Params) - ps.Params = getContextParamSpecs(ctx) - - for i, pt := range ps.Tasks { - ctx := ctx // Ensure local scoping per Task - - // Only propagate param context to the spec - ref params should - // still be explicitly set. - if pt.TaskSpec != nil { - ctx = addContextParams(ctx, pt.Params) - ps.Tasks[i].Params = getContextParams(ctx, pt.Params...) - pt.TaskSpec.applyImplicitParams(ctx) - } - } - - for i, ft := range ps.Finally { - ctx := ctx // Ensure local scoping per Task - - if ft.TaskSpec != nil { - ctx = addContextParams(ctx, ft.Params) - ps.Finally[i].Params = getContextParams(ctx, ft.Params...) - ft.TaskSpec.applyImplicitParams(ctx) - } - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index 5e177b698f..ecacac87ae 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/apis/version" "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -122,12 +123,17 @@ type PipelineResult struct { // Name the given name Name string `json:"name"` + // Type is the user-specified type of the result. + // The possible types are 'string', 'array', and 'object', with 'string' as the default. + // 'array' and 'object' types are alpha features. + Type ResultsType `json:"type,omitempty"` + // Description is a human-readable description of the result // +optional Description string `json:"description"` // Value the expression used to retrieve the value - Value string `json:"value"` + Value ArrayOrString `json:"value"` } // PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask @@ -172,12 +178,6 @@ type PipelineTask struct { // +optional TaskSpec *EmbeddedTask `json:"taskSpec,omitempty"` - // Conditions is a list of conditions that need to be true for the task to run - // Conditions are deprecated, use WhenExpressions instead - // +optional - // +listType=atomic - Conditions []PipelineTaskCondition `json:"conditions,omitempty"` - // WhenExpressions is a list of when expressions that need to be true for the task to run // +optional WhenExpressions WhenExpressions `json:"when,omitempty"` @@ -248,11 +248,6 @@ func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { errs = errs.Also(apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion")) } - // Conditions are deprecated so the effort to support them with custom tasks is not justified. - // When expressions should be used instead. - if len(pt.Conditions) > 0 { - errs = errs.Also(apis.ErrInvalidValue("custom tasks do not support conditions - use when expressions instead", "conditions")) - } // TODO(#3133): Support these features if possible. if pt.Resources != nil { errs = errs.Also(apis.ErrInvalidValue("custom tasks do not support PipelineResources", "resources")) @@ -277,6 +272,7 @@ func (pt PipelineTask) validateBundle() (errs *apis.FieldError) { // validateTask validates a pipeline task or a final task for taskRef and taskSpec func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { + cfg := config.FromContextOrDefaults(ctx) // Validate TaskSpec if it's present if pt.TaskSpec != nil { errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) @@ -287,21 +283,21 @@ func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) } - } else { + } else if pt.TaskRef.Resolver == "" { errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) } // fail if bundle is present when EnableTektonOCIBundles feature flag is off (as it won't be allowed nor used) - if pt.TaskRef.Bundle != "" { + if !cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef.Bundle != "" { errs = errs.Also(apis.ErrDisallowedFields("taskref.bundle")) } - // fail if resolver or resource are present regardless - // of enabled api fields because remote resolution is - // not implemented yet for PipelineTasks. - if pt.TaskRef.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) - } - if len(pt.TaskRef.Resource) > 0 { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resource")) + if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { + // fail if resolver or resource are present when enable-api-fields is false. + if pt.TaskRef.Resolver != "" { + errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) + } + if len(pt.TaskRef.Resource) > 0 { + errs = errs.Also(apis.ErrDisallowedFields("taskref.resource")) + } } } return errs @@ -311,13 +307,58 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr if len(pt.Matrix) != 0 { // This is an alpha feature and will fail validation if it's used in a pipeline spec // when the enable-api-fields feature gate is anything but "alpha". - errs = errs.Also(ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + // Matrix requires "embedded-status" feature gate to be set to "minimal", and will fail + // validation if it is anything but "minimal". + errs = errs.Also(ValidateEmbeddedStatus(ctx, "matrix", config.MinimalEmbeddedStatus)) + errs = errs.Also(pt.validateMatrixCombinationsCount(ctx)) } errs = errs.Also(validateParameterInOneOfMatrixOrParams(pt.Matrix, pt.Params)) errs = errs.Also(validateParametersInTaskMatrix(pt.Matrix)) return errs } +func (pt *PipelineTask) validateMatrixCombinationsCount(ctx context.Context) (errs *apis.FieldError) { + matrixCombinationsCount := pt.GetMatrixCombinationsCount() + maxMatrixCombinationsCount := config.FromContextOrDefaults(ctx).Defaults.DefaultMaxMatrixCombinationsCount + if matrixCombinationsCount > maxMatrixCombinationsCount { + errs = errs.Also(apis.ErrOutOfBoundsValue(matrixCombinationsCount, 0, maxMatrixCombinationsCount, "matrix")) + } + return errs +} + +func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { + // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. + // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. + if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { + if pt.TaskSpec.APIVersion != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.apiVersion"}, + }) + } + if pt.TaskSpec.Kind != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.kind cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.kind"}, + }) + } + } + return +} + +// GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. +func (pt *PipelineTask) GetMatrixCombinationsCount() int { + if len(pt.Matrix) == 0 { + return 0 + } + count := 1 + for _, param := range pt.Matrix { + count *= len(param.Value.ArrayVal) + } + return count +} + func (pt *PipelineTask) validateResultsFromMatrixedPipelineTasksNotConsumed(matrixedPipelineTasks sets.String) (errs *apis.FieldError) { for _, ref := range PipelineTaskResultRefs(pt) { if matrixedPipelineTasks.Has(ref.PipelineTask) { @@ -405,7 +446,14 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s func (pt *PipelineTask) validateWorkspaces(workspaceNames sets.String) (errs *apis.FieldError) { for i, ws := range pt.Workspaces { - if !workspaceNames.Has(ws.Workspace) { + if ws.Workspace == "" { + if !workspaceNames.Has(ws.Name) { + errs = errs.Also(apis.ErrInvalidValue( + fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Name), + "", + ).ViaFieldIndex("workspaces", i)) + } + } else if !workspaceNames.Has(ws.Workspace) { errs = errs.Also(apis.ErrInvalidValue( fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), "", @@ -442,6 +490,9 @@ func (pt PipelineTask) ValidateName() *apis.FieldError { // calls the validation routine based on the type of the task func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(pt.validateRefOrSpec()) + + errs = errs.Also(pt.validateEmbeddedOrType()) + cfg := config.FromContextOrDefaults(ctx) // If EnableCustomTasks feature flag is on, validate custom task specifications // pipeline task having taskRef with APIVersion is classified as custom task @@ -485,13 +536,6 @@ func (pt PipelineTask) resourceDeps() []string { } } - // Add any dependents from conditional resources. - for _, cond := range pt.Conditions { - for _, rd := range cond.Resources { - resourceDeps = append(resourceDeps, rd.From...) - } - } - // Add any dependents from result references. for _, ref := range PipelineTaskResultRefs(&pt) { resourceDeps = append(resourceDeps, ref.PipelineTask) @@ -565,22 +609,6 @@ type PipelineTaskParam struct { Value string `json:"value"` } -// PipelineTaskCondition allows a PipelineTask to declare a Condition to be evaluated before -// the Task is run. -type PipelineTaskCondition struct { - // ConditionRef is the name of the Condition to use for the conditionCheck - ConditionRef string `json:"conditionRef"` - - // Params declare parameters passed to this Condition - // +optional - // +listType=atomic - Params []Param `json:"params,omitempty"` - - // Resources declare the resources provided to this Condition as input - // +listType=atomic - Resources []PipelineTaskInputResource `json:"resources,omitempty"` -} - // PipelineDeclaredResource is used by a Pipeline to declare the types of the // PipelineResources that it will required to run and names which can be used to // refer to these PipelineResources in PipelineTaskResourceBindings. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index 774ce7a9be..38c7067e21 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -59,8 +59,8 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateGraph(ps.Tasks)) errs = errs.Also(validateParamResults(ps.Tasks)) // The parameter variables should be valid - errs = errs.Also(validatePipelineParameterVariables(ps.Tasks, ps.Params).ViaField("tasks")) - errs = errs.Also(validatePipelineParameterVariables(ps.Finally, ps.Params).ViaField("finally")) + errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally")) errs = errs.Also(validatePipelineContextVariables(ps.Tasks).ViaField("tasks")) errs = errs.Also(validatePipelineContextVariables(ps.Finally).ViaField("finally")) errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) @@ -69,7 +69,7 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results - errs = errs.Also(validatePipelineResults(ps.Results)) + errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks)) errs = errs.Also(validateTasksAndFinallySection(ps)) errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally)) errs = errs.Also(validateWhenExpressions(ps.Tasks, ps.Finally)) @@ -125,28 +125,15 @@ func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []P // validatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches // with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { +func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { parameterNames := sets.NewString() arrayParameterNames := sets.NewString() + objectParameterNameKeys := map[string][]string{} - for _, p := range params { - // Verify that p is a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - errs = errs.Also(apis.ErrInvalidValue(string(p.Type), "type").ViaFieldKey("params", p.Name)) - } - - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - "type", "default.type").ViaFieldKey("params", p.Name)) - } + // validates all the types within a slice of ParamSpecs + errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) + for _, p := range params { if parameterNames.Has(p.Name) { errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) } @@ -155,16 +142,21 @@ func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec if p.Type == ParamTypeArray { arrayParameterNames.Insert(p.Name) } - } - return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames)) + if p.Type == ParamTypeObject { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } + } + } + return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) } -func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { - errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames).ViaIndex(idx)) - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames).ViaIndex(idx)) - errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames).ViaIndex(idx)) + errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } return errs } @@ -263,24 +255,63 @@ func filter(arr []string, cond func(string) bool) []string { } // validatePipelineResults ensure that pipeline result variables are properly configured -func validatePipelineResults(results []PipelineResult) (errs *apis.FieldError) { +func validatePipelineResults(results []PipelineResult, tasks []PipelineTask) (errs *apis.FieldError) { + pipelineTaskNames := getPipelineTasksNames(tasks) for idx, result := range results { expressions, ok := GetVarSubstitutionExpressionsForPipelineResult(result) - if ok { - if LooksLikeContainsResultRefs(expressions) { - expressions = filter(expressions, looksLikeResultRef) - resultRefs := NewResultRefs(expressions) - if len(expressions) != len(resultRefs) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), - "value").ViaFieldIndex("results", idx)) - } - } + if !ok { + errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but no expressions were found", + "value").ViaFieldIndex("results", idx)) + } + + if !LooksLikeContainsResultRefs(expressions) { + errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but an invalid expressions was found", + "value").ViaFieldIndex("results", idx)) + } + + expressions = filter(expressions, looksLikeResultRef) + resultRefs := NewResultRefs(expressions) + if len(expressions) != len(resultRefs) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), + "value").ViaFieldIndex("results", idx)) + } + + if !taskContainsResult(result.Value.StringVal, pipelineTaskNames) { + errs = errs.Also(apis.ErrInvalidValue("referencing a nonexistent task", + "value").ViaFieldIndex("results", idx)) } } return errs } +// put task names in a set +func getPipelineTasksNames(pipelineTasks []PipelineTask) sets.String { + pipelineTaskNames := make(sets.String) + for _, pipelineTask := range pipelineTasks { + pipelineTaskNames.Insert(pipelineTask.Name) + } + + return pipelineTaskNames +} + +// taskContainsResult ensures the result value is referenced within the +// task names +func taskContainsResult(resultExpression string, pipelineTaskNames sets.String) bool { + // split incase of multiple resultExpressions in the same result.Value string + // i.e "$(task.) - $(task2.)" + split := strings.Split(resultExpression, "$") + for _, expression := range split { + if expression != "" { + pipelineTaskName, _, _, _, _ := parseExpression(stripVarSubExpression("$" + expression)) + if !pipelineTaskNames.Has(pipelineTaskName) { + return false + } + } + } + return true +} + func validateTasksAndFinallySection(ps *PipelineSpec) *apis.FieldError { if len(ps.Finally) != 0 && len(ps.Tasks) == 0 { return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "finally") @@ -293,9 +324,6 @@ func validateFinalTasks(tasks []PipelineTask, finalTasks []PipelineTask) (errs * if len(f.RunAfter) != 0 { errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("no runAfter allowed under spec.finally, final task %s has runAfter specified", f.Name), "").ViaFieldIndex("finally", idx)) } - if len(f.Conditions) != 0 { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("no conditions allowed under spec.finally, final task %s has conditions specified", f.Name), "").ViaFieldIndex("finally", idx)) - } } ts := PipelineTaskList(tasks).Names() @@ -360,7 +388,6 @@ func validateTasksInputFrom(tasks []PipelineTask) (errs *apis.FieldError) { func validateWhenExpressions(tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) { for i, t := range tasks { - errs = errs.Also(validateOneOfWhenExpressionsOrConditions(t).ViaFieldIndex("tasks", i)) errs = errs.Also(t.WhenExpressions.validate().ViaFieldIndex("tasks", i)) } for i, t := range finalTasks { @@ -369,13 +396,6 @@ func validateWhenExpressions(tasks []PipelineTask, finalTasks []PipelineTask) (e return errs } -func validateOneOfWhenExpressionsOrConditions(t PipelineTask) *apis.FieldError { - if t.WhenExpressions != nil && t.Conditions != nil { - return apis.ErrMultipleOneOf("when", "conditions") - } - return nil -} - // validateDeclaredResources ensures that the specified resources have unique names and // validates that all the resources referenced by pipeline tasks are declared in the pipeline func validateDeclaredResources(resources []PipelineDeclaredResource, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { @@ -397,11 +417,6 @@ func validateDeclaredResources(resources []PipelineDeclaredResource, tasks []Pip } } - for _, condition := range t.Conditions { - for _, cr := range condition.Resources { - required = append(required, cr.Resource) - } - } } for _, t := range finalTasks { if t.Resources != nil { @@ -452,10 +467,6 @@ func validateFrom(tasks []PipelineTask) (errs *apis.FieldError) { inputResources = append(inputResources, t.Resources.Inputs...) } - for _, c := range t.Conditions { - inputResources = append(inputResources, c.Resources...) - } - for j, rd := range inputResources { for _, pt := range rd.From { outputs, found := taskOutputs[pt] diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go new file mode 100644 index 0000000000..17bd57428c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// PipelineRef can be used to refer to a specific instance of a Pipeline. +type PipelineRef struct { + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name,omitempty"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Bundle url reference to a Tekton Bundle. + // +optional + Bundle string `json:"bundle,omitempty"` + + // ResolverRef allows referencing a Pipeline in a remote location + // like a git repo. This field is only supported when the alpha + // feature gate is enabled. + // +optional + ResolverRef `json:",omitempty"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go index 1a61b7b219..45300a5480 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go @@ -18,71 +18,58 @@ package v1beta1 import ( "context" + "fmt" "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) // Validate ensures that a supplied PipelineRef field is populated // correctly. No errors are returned for a nil PipelineRef. func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) if ref == nil { return } - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - errs = errs.Also(ref.validateAlphaRef(ctx)) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) - } - return -} - -// validateInTreeRef returns errors if the given pipelineRef is not -// valid for Pipelines' built-in resolution machinery. -func (ref *PipelineRef) validateInTreeRef(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if ref.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("resolver")) - } - if ref.Resource != nil { - errs = errs.Also(apis.ErrDisallowedFields("resource")) - } - if ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if cfg.FeatureFlags.EnableTektonOCIBundles { - if ref.Bundle != "" && ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if ref.Bundle != "" { - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } - } - } else if ref.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("bundle")) - } - return -} -// validateAlphaRef ensures that the user has passed either a -// valid remote resource reference or a valid in-tree resource reference, -// but not both. -func (ref *PipelineRef) validateAlphaRef(ctx context.Context) (errs *apis.FieldError) { switch { - case ref.Resolver == "" && ref.Resource != nil: - errs = errs.Also(apis.ErrMissingField("resolver")) - case ref.Resolver == "": - errs = errs.Also(ref.validateInTreeRef(ctx)) - default: + case ref.Resolver != "": + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) } if ref.Bundle != "" { errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) } + case ref.Resource != nil: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) + } + if ref.Resolver == "" { + errs = errs.Also(apis.ErrMissingField("resolver")) + } + case ref.Name == "": + errs = errs.Also(apis.ErrMissingField("name")) + case ref.Bundle != "": + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } } return } + +func validateBundleFeatureFlag(ctx context.Context, featureName string, wantValue bool) *apis.FieldError { + flagValue := config.FromContextOrDefaults(ctx).FeatureFlags.EnableTektonOCIBundles + if flagValue != wantValue { + var errs *apis.FieldError + message := fmt.Sprintf(`%s requires "enable-tekton-oci-bundles" feature gate to be %t but it is %t`, featureName, wantValue, flagValue) + return errs.Also(apis.ErrGeneric(message)) + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go index 54157031e0..0ee194be1e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go @@ -53,8 +53,5 @@ func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { if prs.PipelineSpec != nil { prs.PipelineSpec.SetDefaults(ctx) - if config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" { - prs.PipelineSpec.applyImplicitParams(addContextParams(ctx, prs.Params)) - } } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index d06a7f5700..93500365c9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -20,6 +20,7 @@ import ( "context" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -83,7 +84,7 @@ func (pr *PipelineRun) HasStarted() bool { // IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state func (pr *PipelineRun) IsCancelled() bool { - return pr.Spec.Status == PipelineRunSpecStatusCancelled || pr.Spec.Status == PipelineRunSpecStatusCancelledDeprecated + return pr.Spec.Status == PipelineRunSpecStatusCancelled } // IsGracefullyCancelled returns true if the PipelineRun's spec status is set to CancelledRunFinally state @@ -172,18 +173,6 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } -// GetServiceAccountName returns the service account name for a given -// PipelineTask if configured, otherwise it returns the PipelineRun's serviceAccountName. -func (pr *PipelineRun) GetServiceAccountName(pipelineTaskName string) string { - serviceAccountName := pr.Spec.ServiceAccountName - for _, sa := range pr.Spec.ServiceAccountNames { - if sa.TaskName == pipelineTaskName { - serviceAccountName = sa.ServiceAccountName - } - } - return serviceAccountName -} - // HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is // used for creating PersistentVolumeClaims with an OwnerReference for each run func (pr *PipelineRun) HasVolumeClaimTemplate() bool { @@ -212,22 +201,17 @@ type PipelineRunSpec struct { // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` - // Deprecated: use taskRunSpecs.ServiceAccountName instead - // +optional - // +listType=atomic - ServiceAccountNames []PipelineRunSpecServiceAccountName `json:"serviceAccountNames,omitempty"` // Used for cancelling a pipelinerun (and maybe more later on) // +optional Status PipelineRunSpecStatus `json:"status,omitempty"` - // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" - // for this field to be supported. - // // Time after which the Pipeline times out. // Currently three keys are accepted in the map // pipeline, tasks and finally // with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally // +optional Timeouts *TimeoutFields `json:"timeouts,omitempty"` + + // Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead // Time after which the Pipeline times out. Defaults to never. // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional @@ -259,10 +243,6 @@ type TimeoutFields struct { type PipelineRunSpecStatus string const ( - // PipelineRunSpecStatusCancelledDeprecated Deprecated: indicates that the user wants to cancel the task, - // if not already cancelled or terminated (replaced by "Cancelled") - PipelineRunSpecStatusCancelledDeprecated = "PipelineRunCancelled" - // PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task, // if not already cancelled or terminated PipelineRunSpecStatusCancelled = "Cancelled" @@ -281,25 +261,6 @@ const ( PipelineRunSpecStatusPending = "PipelineRunPending" ) -// PipelineRef can be used to refer to a specific instance of a Pipeline. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type PipelineRef struct { - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name,omitempty"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Bundle url reference to a Tekton Bundle. - // +optional - Bundle string `json:"bundle,omitempty"` - - // ResolverRef allows referencing a Pipeline in a remote location - // like a git repo. This field is only supported when the alpha - // feature gate is enabled. - // +optional - ResolverRef `json:",omitempty"` -} - // PipelineRunStatus defines the observed state of PipelineRun type PipelineRunStatus struct { duckv1beta1.Status `json:",inline"` @@ -411,32 +372,12 @@ type ChildStatusReference struct { // PipelineTaskName is the name of the PipelineTask this is referencing. PipelineTaskName string `json:"pipelineTaskName,omitempty"` - // ConditionChecks is the the list of condition checks, including their names and statuses, for the PipelineTask. - // Deprecated: This field will be removed when conditions are removed. - // +optional - // +listType=atomic - ConditionChecks []*PipelineRunChildConditionCheckStatus `json:"conditionChecks,omitempty"` // WhenExpressions is the list of checks guarding the execution of the PipelineTask // +optional // +listType=atomic WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` } -// GetConditionChecks returns a map representation of this ChildStatusReference's ConditionChecks, in the same form -// as PipelineRunTaskRunStatus.ConditionChecks. -func (cr ChildStatusReference) GetConditionChecks() map[string]*PipelineRunConditionCheckStatus { - if len(cr.ConditionChecks) == 0 { - return nil - } - ccMap := make(map[string]*PipelineRunConditionCheckStatus) - - for _, cc := range cr.ConditionChecks { - ccMap[cc.ConditionCheckName] = &cc.PipelineRunConditionCheckStatus - } - - return ccMap -} - // PipelineRunStatusFields holds the fields of PipelineRunStatus' status. // This is defined separately and inlined so that other types can readily // consume these fields via duck typing. @@ -484,19 +425,41 @@ type PipelineRunStatusFields struct { type SkippedTask struct { // Name is the Pipeline Task name Name string `json:"name"` + // Reason is the cause of the PipelineTask being skipped. + Reason SkippingReason `json:"reason"` // WhenExpressions is the list of checks guarding the execution of the PipelineTask // +optional // +listType=atomic WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` } +// SkippingReason explains why a PipelineTask was skipped. +type SkippingReason string + +const ( + // WhenExpressionsSkip means the task was skipped due to at least one of its when expressions evaluating to false + WhenExpressionsSkip SkippingReason = "When Expressions evaluated to false" + // ParentTasksSkip means the task was skipped because its parent was skipped + ParentTasksSkip SkippingReason = "Parent Tasks were skipped" + // StoppingSkip means the task was skipped because the pipeline run is stopping + StoppingSkip SkippingReason = "PipelineRun was stopping" + // GracefullyCancelledSkip means the task was skipped because the pipeline run has been gracefully cancelled + GracefullyCancelledSkip SkippingReason = "PipelineRun was gracefully cancelled" + // GracefullyStoppedSkip means the task was skipped because the pipeline run has been gracefully stopped + GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped" + // MissingResultsSkip means the task was skipped because it's missing necessary results + MissingResultsSkip SkippingReason = "Results were missing" + // None means the task was not skipped + None SkippingReason = "None" +) + // PipelineRunResult used to describe the results of a pipeline type PipelineRunResult struct { // Name is the result's name as declared by the Pipeline Name string `json:"name"` // Value is the result returned from the execution of this PipelineRun - Value string `json:"value"` + Value ArrayOrString `json:"value"` } // PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status @@ -506,9 +469,6 @@ type PipelineRunTaskRunStatus struct { // Status is the TaskRunStatus for the corresponding TaskRun // +optional Status *TaskRunStatus `json:"status,omitempty"` - // ConditionChecks maps the name of a condition check to its Status - // +optional - ConditionChecks map[string]*PipelineRunConditionCheckStatus `json:"conditionChecks,omitempty"` // WhenExpressions is the list of checks guarding the execution of the PipelineTask // +optional // +listType=atomic @@ -528,28 +488,6 @@ type PipelineRunRunStatus struct { WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` } -// PipelineRunConditionCheckStatus returns the condition check status -type PipelineRunConditionCheckStatus struct { - // ConditionName is the name of the Condition - ConditionName string `json:"conditionName,omitempty"` - // Status is the ConditionCheckStatus for the corresponding ConditionCheck - // +optional - Status *ConditionCheckStatus `json:"status,omitempty"` -} - -// PipelineRunChildConditionCheckStatus is used to record the status of condition checks within StatusChildReferences. -type PipelineRunChildConditionCheckStatus struct { - PipelineRunConditionCheckStatus `json:",inline"` - ConditionCheckName string `json:"conditionCheckName,omitempty"` -} - -// PipelineRunSpecServiceAccountName can be used to configure specific -// ServiceAccountName for a concrete Task -type PipelineRunSpecServiceAccountName struct { - TaskName string `json:"taskName,omitempty"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PipelineRunList contains a list of PipelineRun @@ -577,6 +515,12 @@ type PipelineTaskRunSpec struct { StepOverrides []TaskRunStepOverride `json:"stepOverrides,omitempty"` // +listType=atomic SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"` + + // +optional + Metadata *PipelineTaskMetadata `json:"metadata,omitempty"` + + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` } // GetTaskRunSpec returns the task specific spec for a given @@ -584,7 +528,7 @@ type PipelineTaskRunSpec struct { func (pr *PipelineRun) GetTaskRunSpec(pipelineTaskName string) PipelineTaskRunSpec { s := PipelineTaskRunSpec{ PipelineTaskName: pipelineTaskName, - TaskServiceAccountName: pr.GetServiceAccountName(pipelineTaskName), + TaskServiceAccountName: pr.Spec.ServiceAccountName, TaskPodTemplate: pr.Spec.PodTemplate, } for _, task := range pr.Spec.TaskRunSpecs { @@ -597,6 +541,7 @@ func (pr *PipelineRun) GetTaskRunSpec(pipelineTaskName string) PipelineTaskRunSp } s.StepOverrides = task.StepOverrides s.SidecarOverrides = task.SidecarOverrides + s.Metadata = task.Metadata } } return s diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 0a9dc70285..334ffa1a3c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -22,8 +22,8 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/config" - apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -72,16 +72,12 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) } } - // This is an alpha feature and will fail validation if it's used in a pipelinerun spec - // when the enable-api-fields feature gate is anything but "alpha". if ps.Timeouts != nil { if ps.Timeout != nil { // can't have both at the same time errs = errs.Also(apis.ErrDisallowedFields("timeout", "timeouts")) } - errs = errs.Also(ValidateEnabledAPIFields(ctx, "timeouts", config.AlphaAPIFields)) - // tasks timeout should be a valid duration of at least 0. errs = errs.Also(validateTimeoutDuration("tasks", ps.Timeouts.Tasks)) @@ -99,7 +95,7 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) } } - errs = errs.Also(validateSpecStatus(ctx, ps.Status)) + errs = errs.Also(validateSpecStatus(ps.Status)) if ps.Workspaces != nil { wsNames := make(map[string]int) @@ -119,12 +115,11 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) return errs } -func validateSpecStatus(ctx context.Context, status PipelineRunSpecStatus) *apis.FieldError { +func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { switch status { case "": return nil - case PipelineRunSpecStatusPending, - PipelineRunSpecStatusCancelledDeprecated: + case PipelineRunSpecStatusPending: return nil case PipelineRunSpecStatusCancelled, PipelineRunSpecStatusCancelledRunFinally, @@ -155,7 +150,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Tasks.Duration > timeout { tasksTimeoutErr = true } - if ps.Timeouts.Tasks.Duration == apisconfig.NoTimeoutDuration && timeout != apisconfig.NoTimeoutDuration { + if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { tasksTimeoutErr = true tasksTimeoutStr += " (no timeout)" } @@ -170,7 +165,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Finally.Duration > timeout { finallyTimeoutErr = true } - if ps.Timeouts.Finally.Duration == apisconfig.NoTimeoutDuration && timeout != apisconfig.NoTimeoutDuration { + if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { finallyTimeoutErr = true finallyTimeoutStr += " (no timeout)" } @@ -189,21 +184,17 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM } func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - if trs.StepOverrides != nil { - errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides")) - } - if trs.SidecarOverrides != nil { - errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides")) - } - } else { - if trs.StepOverrides != nil { - errs = errs.Also(apis.ErrDisallowedFields("stepOverrides")) - } - if trs.SidecarOverrides != nil { - errs = errs.Also(apis.ErrDisallowedFields("sidecarOverrides")) - } + if trs.StepOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides")) + } + if trs.SidecarOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides")) + } + if trs.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(trs.ComputeResources, trs.StepOverrides)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go index 5b07109e85..203a87c40a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go @@ -128,16 +128,15 @@ type PipelineResourceBinding struct { // PipelineResourceResult used to export the image name and digest as json type PipelineResourceResult struct { - Key string `json:"key"` - Value string `json:"value"` - ResourceName string `json:"resourceName,omitempty"` - // The field ResourceRef should be deprecated and removed in the next API version. - // See https://github.com/tektoncd/pipeline/issues/2694 for more information. - ResourceRef *PipelineResourceRef `json:"resourceRef,omitempty"` - ResultType ResultType `json:"type,omitempty"` + Key string `json:"key"` + Value string `json:"value"` + ResourceName string `json:"resourceName,omitempty"` + ResultType ResultType `json:"type,omitempty"` } // ResultType used to find out whether a PipelineResourceResult is from a task result or not +// Note that ResultsType is another type which is used to define the data type +// (e.g. string, array, etc) we used for Results type ResultType int // UnmarshalJSON unmarshals either an int or a string into a ResultType. String diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go index 2c453f136a..2898fd9dfc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go @@ -26,7 +26,7 @@ import ( ) // Validate implements apis.Validatable -func (tr *TaskResources) Validate(ctx context.Context) (errs *apis.FieldError) { +func (tr *TaskResources) Validate(context.Context) (errs *apis.FieldError) { if tr != nil { errs = errs.Also(validateTaskResources(tr.Inputs).ViaField("inputs")) errs = errs.Also(validateTaskResources(tr.Outputs).ViaField("outputs")) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go similarity index 64% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go index fe18e3e697..68de44f6da 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go @@ -1,12 +1,9 @@ /* -Copyright 2019 The Tekton Authors - +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,17 +11,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/apis" -) +package v1beta1 -var _ apis.Defaultable = (*ClusterTask)(nil) +import "context" -// SetDefaults sets the default values for the ClusterTask's Spec. -func (t *ClusterTask) SetDefaults(ctx context.Context) { - t.Spec.SetDefaults(ctx) +// SetDefaults set the default type for TaskResult +func (tr *TaskResult) SetDefaults(context.Context) { + if tr != nil && tr.Type == "" { + // ResultsTypeString is the default value + tr.Type = ResultsTypeString + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go new file mode 100644 index 0000000000..cbdc5404c3 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "strings" + +// TaskResult used to describe the results of a task +type TaskResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Properties is the JSON Schema properties to support key-value pairs results. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + + // Description is a human-readable description of the result + // +optional + Description string `json:"description,omitempty"` +} + +// TaskRunResult used to describe the results of a task +type TaskRunResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Value the given value of the result + Value ArrayOrString `json:"value"` +} + +// ResultsType indicates the type of a result; +// Used to distinguish between a single string and an array of strings. +// Note that there is ResultType used to find out whether a +// PipelineResourceResult is from a task result or not, which is different from +// this ResultsType. +// TODO(#4723): add "array" and "object" support +// TODO(#4723): align ResultsType and ParamType in ArrayOrString +type ResultsType string + +// Valid ResultsType: +const ( + ResultsTypeString ResultsType = "string" + ResultsTypeArray ResultsType = "array" + ResultsTypeObject ResultsType = "object" +) + +// AllResultsTypes can be used for ResultsTypes validation. +var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject} + +// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*]) +func ResultsArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go new file mode 100644 index 0000000000..3d0dd4ee04 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go @@ -0,0 +1,47 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" + "knative.dev/pkg/apis" +) + +// Validate implements apis.Validatable +func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { + if !resultNameFormatRegex.MatchString(tr.Name) { + return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) + } + // Array and Object is alpha feature + if tr.Type == ResultsTypeArray || tr.Type == ResultsTypeObject { + return errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + } + + // Resources created before the result. Type was introduced may not have Type set + // and should be considered valid + if tr.Type == "" { + return nil + } + + // By default the result type is string + if tr.Type != ResultsTypeString { + return apis.ErrInvalidValue(tr.Type, "type", fmt.Sprintf("type must be string")) + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go index d74e2bcd38..4eb8fa322a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" "regexp" + "strconv" "strings" ) @@ -26,30 +27,47 @@ import ( type ResultRef struct { PipelineTask string `json:"pipelineTask"` Result string `json:"result"` + ResultsIndex int `json:"resultsIndex"` + Property string `json:"property"` } const ( resultExpressionFormat = "tasks..results." + // Result expressions of the form . will be treated as object results. + // If a string result name contains a dot, brackets should be used to differentiate it from an object result. + // https://github.com/tektoncd/community/blob/main/teps/0075-object-param-and-result-types.md#collisions-with-builtin-variable-replacement + objectResultExpressionFormat = "tasks..results.." // ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference ResultTaskPart = "tasks" // ResultResultPart Constant used to define the "results" part of a pipeline result reference ResultResultPart = "results" // TODO(#2462) use one regex across all substitutions - variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*\)` + // variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*] + variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)` + // exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else + // i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. + exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` + // arrayIndexing will match all `[int]` and `[*]` for parseExpression + arrayIndexing = `\[([0-9])*\*?\]` // ResultNameFormat Constant used to define the the regex Result.Name should follow ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` ) -var variableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +// VariableSubstitutionRegex is a regex to find all result matching substitutions +var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) +// arrayIndexingRegex is used to match `[int]` and `[*]` +var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) + // NewResultRefs extracts all ResultReferences from a param or a pipeline result. // If the ResultReference can be extracted, they are returned. Expressions which are not // results are ignored. func NewResultRefs(expressions []string) []*ResultRef { var resultRefs []*ResultRef for _, expression := range expressions { - pipelineTask, result, err := parseExpression(expression) + pipelineTask, result, index, property, err := parseExpression(expression) // If the expression isn't a result but is some other expression, // parseExpression will return an error, in which case we just skip that expression, // since although it's not a result ref, it might be some other kind of reference @@ -57,6 +75,8 @@ func NewResultRefs(expressions []string) []*ResultRef { resultRefs = append(resultRefs, &ResultRef{ PipelineTask: pipelineTask, Result: result, + ResultsIndex: index, + Property: property, }) } } @@ -94,6 +114,11 @@ func GetVarSubstitutionExpressionsForParam(param Param) ([]string, bool) { case ParamTypeString: // string type allExpressions = append(allExpressions, validateString(param.Value.StringVal)...) + case ParamTypeObject: + // object type + for _, value := range param.Value.ObjectVal { + allExpressions = append(allExpressions, validateString(value)...) + } default: return nil, false } @@ -102,12 +127,18 @@ func GetVarSubstitutionExpressionsForParam(param Param) ([]string, bool) { // GetVarSubstitutionExpressionsForPipelineResult extracts all the value between "$(" and ")"" for a pipeline result func GetVarSubstitutionExpressionsForPipelineResult(result PipelineResult) ([]string, bool) { - allExpressions := validateString(result.Value) + allExpressions := validateString(result.Value.StringVal) + for _, v := range result.Value.ArrayVal { + allExpressions = append(allExpressions, validateString(v)...) + } + for _, v := range result.Value.ObjectVal { + allExpressions = append(allExpressions, validateString(v)...) + } return allExpressions, len(allExpressions) != 0 } func validateString(value string) []string { - expressions := variableSubstitutionRegex.FindAllString(value, -1) + expressions := VariableSubstitutionRegex.FindAllString(value, -1) if expressions == nil { return nil } @@ -122,26 +153,60 @@ func stripVarSubExpression(expression string) string { return strings.TrimSuffix(strings.TrimPrefix(expression, "$("), ")") } -func parseExpression(substitutionExpression string) (string, string, error) { +// parseExpression parses "task name", "result name", "array index" (iff it's an array result) and "object key name" (iff it's an object result) +// Valid Example 1: +// - Input: tasks.myTask.results.aStringResult +// - Output: "myTask", "aStringResult", -1, "", nil +// Valid Example 2: +// - Input: tasks.myTask.results.anObjectResult.key1 +// - Output: "myTask", "anObjectResult", 0, "key1", nil +// Valid Example 3: +// - Input: tasks.myTask.results.anArrayResult[1] +// - Output: "myTask", "anArrayResult", 1, "", nil +// Invalid Example 1: +// - Input: tasks.myTask.results.resultName.foo.bar +// - Output: "", "", 0, "", error +// TODO: may use regex for each type to handle possible reference formats +func parseExpression(substitutionExpression string) (string, string, int, string, error) { subExpressions := strings.Split(substitutionExpression, ".") - if len(subExpressions) != 4 || subExpressions[0] != ResultTaskPart || subExpressions[2] != ResultResultPart { - return "", "", fmt.Errorf("Must be of the form %q", resultExpressionFormat) + + // For string result: tasks..results. + // For array result: tasks..results.[index] + if len(subExpressions) == 4 && subExpressions[0] == ResultTaskPart && subExpressions[2] == ResultResultPart { + resultName, stringIdx := ParseResultName(subExpressions[3]) + if stringIdx != "" { + intIdx, _ := strconv.Atoi(stringIdx) + return subExpressions[1], resultName, intIdx, "", nil + } + return subExpressions[1], resultName, 0, "", nil } - return subExpressions[1], subExpressions[3], nil + + // For object type result: tasks..results.. + if len(subExpressions) == 5 && subExpressions[0] == ResultTaskPart && subExpressions[2] == ResultResultPart { + return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil + } + + return "", "", 0, "", fmt.Errorf("Must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) +} + +// ParseResultName parse the input string to extract resultName and result index. +// Array indexing: +// Input: anArrayResult[1] +// Output: anArrayResult, "1" +// Array star reference: +// Input: anArrayResult[*] +// Output: anArrayResult, "*" +func ParseResultName(resultName string) (string, string) { + stringIdx := strings.TrimSuffix(strings.TrimPrefix(arrayIndexingRegex.FindString(resultName), "["), "]") + resultName = arrayIndexingRegex.ReplaceAllString(resultName, "") + return resultName, stringIdx } // PipelineTaskResultRefs walks all the places a result reference can be used // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - for _, condition := range pt.Conditions { - for _, p := range condition.Params { - expressions, _ := GetVarSubstitutionExpressionsForParam(p) - refs = append(refs, NewResultRefs(expressions)...) - } - } - - for _, p := range pt.Params { + for _, p := range append(pt.Params, pt.Matrix...) { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go deleted file mode 100644 index bf41f46338..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2020 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplySidecarReplacements applies variable interpolation on a Sidecar. -func ApplySidecarReplacements(sidecar *Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) { - sidecar.Script = substitution.ApplyReplacements(sidecar.Script, stringReplacements) - applyContainerReplacements(&sidecar.Container, stringReplacements, arrayReplacements) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go new file mode 100644 index 0000000000..860bd7f1f8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "knative.dev/pkg/apis" +) + +// ValidateEmbeddedStatus checks that the embedded-status feature gate is set to the wantEmbeddedStatus value and, +// if not, returns an error stating which feature is dependent on the status and what the current status actually is. +func ValidateEmbeddedStatus(ctx context.Context, featureName, wantEmbeddedStatus string) *apis.FieldError { + embeddedStatus := config.FromContextOrDefaults(ctx).FeatureFlags.EmbeddedStatus + if embeddedStatus != wantEmbeddedStatus { + message := fmt.Sprintf(`%s requires "embedded-status" feature gate to be %q but it is %q`, featureName, wantEmbeddedStatus, embeddedStatus) + return apis.ErrGeneric(message) + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go deleted file mode 100644 index 66ef5e3a7f..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplyStepReplacements applies variable interpolation on a Step. -func ApplyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Script = substitution.ApplyReplacements(step.Script, stringReplacements) - applyContainerReplacements(&step.Container, stringReplacements, arrayReplacements) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index 92af2a98c7..1bc9780c05 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -299,12 +299,13 @@ } }, "v1beta1.ArrayOrString": { - "description": "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings.", + "description": "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", "type": "object", "required": [ "type", "stringVal", - "arrayVal" + "arrayVal", + "objectVal" ], "properties": { "arrayVal": { @@ -315,6 +316,13 @@ }, "x-kubernetes-list-type": "atomic" }, + "objectVal": { + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "stringVal": { "description": "Represents the stored type of ArrayOrString.", "type": "string", @@ -333,14 +341,6 @@ "apiVersion": { "type": "string" }, - "conditionChecks": { - "description": "ConditionChecks is the the list of condition checks, including their names and statuses, for the PipelineTask. Deprecated: This field will be removed when conditions are removed.", - "type": "array", - "items": { - "$ref": "#/definitions/v1beta1.PipelineRunChildConditionCheckStatus" - }, - "x-kubernetes-list-type": "atomic" - }, "kind": { "type": "string" }, @@ -457,109 +457,6 @@ } } }, - "v1beta1.ConditionCheck": { - "description": "ConditionCheck represents a single evaluation of a Condition step.", - "type": "object", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "default": {}, - "$ref": "#/definitions/v1.ObjectMeta" - }, - "spec": { - "default": {}, - "$ref": "#/definitions/v1beta1.TaskRunSpec" - }, - "status": { - "default": {}, - "$ref": "#/definitions/v1beta1.TaskRunStatus" - } - } - }, - "v1beta1.ConditionCheckStatus": { - "description": "ConditionCheckStatus defines the observed state of ConditionCheck", - "type": "object", - "required": [ - "podName" - ], - "properties": { - "annotations": { - "description": "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", - "type": "object", - "additionalProperties": { - "type": "string", - "default": "" - } - }, - "check": { - "description": "Check describes the state of the check container.", - "default": {}, - "$ref": "#/definitions/v1.ContainerState" - }, - "completionTime": { - "description": "CompletionTime is the time the check pod completed.", - "$ref": "#/definitions/v1.Time" - }, - "conditions": { - "description": "Conditions the latest available observations of a resource's current state.", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/knative.Condition" - }, - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge" - }, - "observedGeneration": { - "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", - "type": "integer", - "format": "int64" - }, - "podName": { - "description": "PodName is the name of the pod responsible for executing this condition check.", - "type": "string", - "default": "" - }, - "startTime": { - "description": "StartTime is the time the check is actually started.", - "$ref": "#/definitions/v1.Time" - } - } - }, - "v1beta1.ConditionCheckStatusFields": { - "description": "ConditionCheckStatusFields holds the fields of ConfigurationCheck's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", - "type": "object", - "required": [ - "podName" - ], - "properties": { - "check": { - "description": "Check describes the state of the check container.", - "default": {}, - "$ref": "#/definitions/v1.ContainerState" - }, - "completionTime": { - "description": "CompletionTime is the time the check pod completed.", - "$ref": "#/definitions/v1.Time" - }, - "podName": { - "description": "PodName is the name of the pod responsible for executing this condition check.", - "type": "string", - "default": "" - }, - "startTime": { - "description": "StartTime is the time the check is actually started.", - "$ref": "#/definitions/v1.Time" - } - } - }, "v1beta1.EmbeddedTask": { "description": "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.", "type": "object", @@ -616,7 +513,7 @@ }, "stepTemplate": { "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", - "$ref": "#/definitions/v1.Container" + "$ref": "#/definitions/v1beta1.StepTemplate" }, "steps": { "description": "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", @@ -720,8 +617,16 @@ "type": "string", "default": "" }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs parameter.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1beta1.PropertySpec" + } + }, "type": { - "description": "Type is the user-specified type of the parameter. The possible types are currently \"string\" and \"array\", and \"string\" is the default.", + "description": "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", "type": "string" } } @@ -802,7 +707,7 @@ } }, "v1beta1.PipelineRef": { - "description": "PipelineRef can be used to refer to a specific instance of a Pipeline. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + "description": "PipelineRef can be used to refer to a specific instance of a Pipeline.", "type": "object", "properties": { "apiVersion": { @@ -866,10 +771,6 @@ "resourceName": { "type": "string" }, - "resourceRef": { - "description": "The field ResourceRef should be deprecated and removed in the next API version. See https://github.com/tektoncd/pipeline/issues/2694 for more information.", - "$ref": "#/definitions/v1beta1.PipelineResourceRef" - }, "type": { "type": "integer", "format": "int32" @@ -898,10 +799,14 @@ "type": "string", "default": "" }, + "type": { + "description": "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.", + "type": "string" + }, "value": { "description": "Value the expression used to retrieve the value", - "type": "string", - "default": "" + "default": {}, + "$ref": "#/definitions/v1beta1.ArrayOrString" } } }, @@ -931,37 +836,6 @@ } } }, - "v1beta1.PipelineRunChildConditionCheckStatus": { - "description": "PipelineRunChildConditionCheckStatus is used to record the status of condition checks within StatusChildReferences.", - "type": "object", - "properties": { - "conditionCheckName": { - "type": "string" - }, - "conditionName": { - "description": "ConditionName is the name of the Condition", - "type": "string" - }, - "status": { - "description": "Status is the ConditionCheckStatus for the corresponding ConditionCheck", - "$ref": "#/definitions/v1beta1.ConditionCheckStatus" - } - } - }, - "v1beta1.PipelineRunConditionCheckStatus": { - "description": "PipelineRunConditionCheckStatus returns the condition check status", - "type": "object", - "properties": { - "conditionName": { - "description": "ConditionName is the name of the Condition", - "type": "string" - }, - "status": { - "description": "Status is the ConditionCheckStatus for the corresponding ConditionCheck", - "$ref": "#/definitions/v1beta1.ConditionCheckStatus" - } - } - }, "v1beta1.PipelineRunList": { "description": "PipelineRunList contains a list of PipelineRun", "type": "object", @@ -1002,8 +876,8 @@ }, "value": { "description": "Value is the result returned from the execution of this PipelineRun", - "type": "string", - "default": "" + "default": {}, + "$ref": "#/definitions/v1beta1.ArrayOrString" } } }, @@ -1065,15 +939,6 @@ "serviceAccountName": { "type": "string" }, - "serviceAccountNames": { - "description": "Deprecated: use taskRunSpecs.ServiceAccountName instead", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1beta1.PipelineRunSpecServiceAccountName" - }, - "x-kubernetes-list-type": "atomic" - }, "status": { "description": "Used for cancelling a pipelinerun (and maybe more later on)", "type": "string" @@ -1088,11 +953,11 @@ "x-kubernetes-list-type": "atomic" }, "timeout": { - "description": "Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Timeout Deprecated: use pipelineRunSpec.Timeouts.Pipeline instead Time after which the Pipeline times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "timeouts": { - "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nTime after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline \u003e= Timeouts.tasks + Timeouts.finally", + "description": "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline \u003e= Timeouts.tasks + Timeouts.finally", "$ref": "#/definitions/v1beta1.TimeoutFields" }, "workspaces": { @@ -1106,18 +971,6 @@ } } }, - "v1beta1.PipelineRunSpecServiceAccountName": { - "description": "PipelineRunSpecServiceAccountName can be used to configure specific ServiceAccountName for a concrete Task", - "type": "object", - "properties": { - "serviceAccountName": { - "type": "string" - }, - "taskName": { - "type": "string" - } - } - }, "v1beta1.PipelineRunStatus": { "description": "PipelineRunStatus defines the observed state of PipelineRun", "type": "object", @@ -1263,13 +1116,6 @@ "description": "PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status", "type": "object", "properties": { - "conditionChecks": { - "description": "ConditionChecks maps the name of a condition check to its Status", - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/v1beta1.PipelineRunConditionCheckStatus" - } - }, "pipelineTaskName": { "description": "PipelineTaskName is the name of the PipelineTask.", "type": "string" @@ -1357,15 +1203,6 @@ "description": "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.", "type": "object", "properties": { - "conditions": { - "description": "Conditions is a list of conditions that need to be true for the task to run Conditions are deprecated, use WhenExpressions instead", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1beta1.PipelineTaskCondition" - }, - "x-kubernetes-list-type": "atomic" - }, "matrix": { "description": "Matrix declares parameters used to fan out this task.", "type": "array", @@ -1437,38 +1274,6 @@ } } }, - "v1beta1.PipelineTaskCondition": { - "description": "PipelineTaskCondition allows a PipelineTask to declare a Condition to be evaluated before the Task is run.", - "type": "object", - "required": [ - "conditionRef" - ], - "properties": { - "conditionRef": { - "description": "ConditionRef is the name of the Condition to use for the conditionCheck", - "type": "string", - "default": "" - }, - "params": { - "description": "Params declare parameters passed to this Condition", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1beta1.Param" - }, - "x-kubernetes-list-type": "atomic" - }, - "resources": { - "description": "Resources declare the resources provided to this Condition as input", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1beta1.PipelineTaskInputResource" - }, - "x-kubernetes-list-type": "atomic" - } - } - }, "v1beta1.PipelineTaskInputResource": { "description": "PipelineTaskInputResource maps the name of a declared PipelineResource input dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources that should be used. This input may come from a previous task.", "type": "object", @@ -1593,6 +1398,13 @@ "description": "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task", "type": "object", "properties": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "metadata": { + "$ref": "#/definitions/v1beta1.PipelineTaskMetadata" + }, "pipelineTaskName": { "type": "string" }, @@ -1642,6 +1454,15 @@ } } }, + "v1beta1.PropertySpec": { + "description": "PropertySpec defines the struct for object keys", + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, "v1beta1.ResolverParam": { "description": "ResolverParam is a single parameter passed to a resolver.", "type": "object", @@ -1686,21 +1507,32 @@ "type": "object", "required": [ "pipelineTask", - "result" + "result", + "resultsIndex", + "property" ], "properties": { "pipelineTask": { "type": "string", "default": "" }, + "property": { + "type": "string", + "default": "" + }, "result": { "type": "string", "default": "" + }, + "resultsIndex": { + "type": "integer", + "format": "int32", + "default": 0 } } }, "v1beta1.Sidecar": { - "description": "Sidecar has nearly the same data structure as Step, consisting of a Container and an optional Script, but does not have the ability to timeout.", + "description": "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", "type": "object", "required": [ "name" @@ -1712,7 +1544,8 @@ "items": { "type": "string", "default": "" - } + }, + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", @@ -1720,7 +1553,8 @@ "items": { "type": "string", "default": "" - } + }, + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -1729,6 +1563,7 @@ "default": {}, "$ref": "#/definitions/v1.EnvVar" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -1738,7 +1573,8 @@ "items": { "default": {}, "$ref": "#/definitions/v1.EnvFromSource" - } + }, + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -1824,6 +1660,7 @@ "default": {}, "$ref": "#/definitions/v1.VolumeDevice" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -1834,6 +1671,7 @@ "default": {}, "$ref": "#/definitions/v1.VolumeMount" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -1883,7 +1721,8 @@ "description": "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.", "type": "object", "required": [ - "name" + "name", + "reason" ], "properties": { "name": { @@ -1891,6 +1730,11 @@ "type": "string", "default": "" }, + "reason": { + "description": "Reason is the cause of the PipelineTask being skipped.", + "type": "string", + "default": "" + }, "whenExpressions": { "description": "WhenExpressions is the list of checks guarding the execution of the PipelineTask", "type": "array", @@ -1903,7 +1747,7 @@ } }, "v1beta1.Step": { - "description": "Step embeds the Container type, which allows it to include fields not provided by Container.", + "description": "Step runs a subcomponent of a Task", "type": "object", "required": [ "name" @@ -1915,7 +1759,8 @@ "items": { "type": "string", "default": "" - } + }, + "x-kubernetes-list-type": "atomic" }, "command": { "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", @@ -1923,7 +1768,8 @@ "items": { "type": "string", "default": "" - } + }, + "x-kubernetes-list-type": "atomic" }, "env": { "description": "List of environment variables to set in the container. Cannot be updated.", @@ -1932,6 +1778,7 @@ "default": {}, "$ref": "#/definitions/v1.EnvVar" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, @@ -1941,7 +1788,8 @@ "items": { "default": {}, "$ref": "#/definitions/v1.EnvFromSource" - } + }, + "x-kubernetes-list-type": "atomic" }, "image": { "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", @@ -1952,11 +1800,11 @@ "type": "string" }, "lifecycle": { - "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "description": "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "$ref": "#/definitions/v1.Lifecycle" }, "livenessProbe": { - "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/v1.Probe" }, "name": { @@ -1969,7 +1817,7 @@ "type": "string" }, "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "description": "Deprecated. This field will be removed in a future release. List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "type": "array", "items": { "default": {}, @@ -1984,7 +1832,7 @@ "x-kubernetes-patch-strategy": "merge" }, "readinessProbe": { - "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/v1.Probe" }, "resources": { @@ -2001,23 +1849,31 @@ "$ref": "#/definitions/v1.SecurityContext" }, "startupProbe": { - "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/v1.Probe" }, + "stderrConfig": { + "description": "Stores configuration for the stderr stream of the step.", + "$ref": "#/definitions/v1beta1.StepOutputConfig" + }, "stdin": { - "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "type": "boolean" }, "stdinOnce": { - "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "description": "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "type": "boolean" }, + "stdoutConfig": { + "description": "Stores configuration for the stdout stream of the step.", + "$ref": "#/definitions/v1beta1.StepOutputConfig" + }, "terminationMessagePath": { - "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "description": "Deprecated. This field will be removed in a future release. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "type": "string" }, "terminationMessagePolicy": { - "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "description": "Deprecated. This field will be removed in a future release. Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "type": "string" }, "timeout": { @@ -2025,7 +1881,7 @@ "$ref": "#/definitions/v1.Duration" }, "tty": { - "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", "type": "boolean" }, "volumeDevices": { @@ -2035,6 +1891,7 @@ "default": {}, "$ref": "#/definitions/v1.VolumeDevice" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "devicePath", "x-kubernetes-patch-strategy": "merge" }, @@ -2045,6 +1902,7 @@ "default": {}, "$ref": "#/definitions/v1.VolumeMount" }, + "x-kubernetes-list-type": "atomic", "x-kubernetes-patch-merge-key": "mountPath", "x-kubernetes-patch-strategy": "merge" }, @@ -2063,6 +1921,16 @@ } } }, + "v1beta1.StepOutputConfig": { + "description": "StepOutputConfig stores configuration for a step output stream.", + "type": "object", + "properties": { + "path": { + "description": "Path to duplicate stdout stream to on container's local filesystem.", + "type": "string" + } + } + }, "v1beta1.StepState": { "description": "StepState reports the results of running a step in a Task.", "type": "object", @@ -2090,6 +1958,152 @@ } } }, + "v1beta1.StepTemplate": { + "description": "StepTemplate is a template for a Step", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Deprecated. This field will be removed in a future release. Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "$ref": "#/definitions/v1.Lifecycle" + }, + "livenessProbe": { + "description": "Deprecated. This field will be removed in a future release. Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "name": { + "description": "Deprecated. This field will be removed in a future release. DeprecatedName of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "ports": { + "description": "Deprecated. This field will be removed in a future release. List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Deprecated. This field will be removed in a future release. Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "startupProbe": { + "description": "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "stdin": { + "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Deprecated. This field will be removed in a future release. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Deprecated. This field will be removed in a future release. Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a DeprecatedTTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, "v1beta1.Task": { "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", "type": "object", @@ -2142,7 +2156,7 @@ } }, "v1beta1.TaskRef": { - "description": "TaskRef can be used to refer to a specific instance of a task. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + "description": "TaskRef can be used to refer to a specific instance of a task.", "type": "object", "properties": { "apiVersion": { @@ -2255,13 +2269,24 @@ "properties": { "description": { "description": "Description is a human-readable description of the result", - "type": "string", - "default": "" + "type": "string" }, "name": { "description": "Name the given name", "type": "string", "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs results.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1beta1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" } } }, @@ -2406,10 +2431,14 @@ "type": "string", "default": "" }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + }, "value": { "description": "Value the given value of the result", - "type": "string", - "default": "" + "default": {}, + "$ref": "#/definitions/v1beta1.ArrayOrString" } } }, @@ -2437,6 +2466,10 @@ "description": "TaskRunSpec defines the desired state of TaskRun", "type": "object", "properties": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, "debug": { "$ref": "#/definitions/v1beta1.TaskRunDebug" }, @@ -2747,7 +2780,7 @@ }, "stepTemplate": { "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", - "$ref": "#/definitions/v1.Container" + "$ref": "#/definitions/v1beta1.StepTemplate" }, "steps": { "description": "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", @@ -2837,6 +2870,10 @@ "description": "ConfigMap represents a configMap that should populate this workspace.", "$ref": "#/definitions/v1.ConfigMapVolumeSource" }, + "csi": { + "description": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + "$ref": "#/definitions/v1.CSIVolumeSource" + }, "emptyDir": { "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", "$ref": "#/definitions/v1.EmptyDirVolumeSource" @@ -2850,6 +2887,10 @@ "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" }, + "projected": { + "description": "Projected represents a projected volume that should populate this workspace.", + "$ref": "#/definitions/v1.ProjectedVolumeSource" + }, "secret": { "description": "Secret represents a secret that should populate this workspace.", "$ref": "#/definitions/v1.SecretVolumeSource" @@ -2898,8 +2939,7 @@ "description": "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", "type": "object", "required": [ - "name", - "workspace" + "name" ], "properties": { "name": { @@ -2913,8 +2953,7 @@ }, "workspace": { "description": "Workspace is the name of the workspace declared by the pipeline", - "type": "string", - "default": "" + "type": "string" } } }, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_defaults.go index 8703a41ad6..6a25d0a415 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_defaults.go @@ -34,11 +34,7 @@ func (ts *TaskSpec) SetDefaults(ctx context.Context) { for i := range ts.Params { ts.Params[i].SetDefaults(ctx) } -} - -// applyImplicitParams propagates implicit params from the parent context -// through the Task. -func (ts *TaskSpec) applyImplicitParams(ctx context.Context) { - ctx = addContextParamSpec(ctx, ts.Params) - ts.Params = getContextParamSpecs(ctx) + for i := range ts.Results { + ts.Results[i].SetDefaults(ctx) + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go index f7067b3a7a..957b0aef69 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go @@ -110,7 +110,7 @@ type TaskSpec struct { // StepTemplate can be used as the basis for all step containers within the // Task, so that the steps inherit settings on the base container. - StepTemplate *corev1.Container `json:"stepTemplate,omitempty"` + StepTemplate *StepTemplate `json:"stepTemplate,omitempty"` // Sidecars are run alongside the Task's step containers. They begin before // the steps start and end after the steps complete. @@ -126,72 +126,6 @@ type TaskSpec struct { Results []TaskResult `json:"results,omitempty"` } -// TaskResult used to describe the results of a task -type TaskResult struct { - // Name the given name - Name string `json:"name"` - - // Description is a human-readable description of the result - // +optional - Description string `json:"description"` -} - -// Step embeds the Container type, which allows it to include fields not -// provided by Container. -type Step struct { - corev1.Container `json:",inline"` - - // Script is the contents of an executable file to execute. - // - // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script. - // +optional - Script string `json:"script,omitempty"` - - // Timeout is the time after which the step times out. Defaults to never. - // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` - - // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" - // for this field to be supported. - // - // Workspaces is a list of workspaces from the Task that this Step wants - // exclusive access to. Adding a workspace to this list means that any - // other Step or Sidecar that does not also request this Workspace will - // not have access to it. - // +optional - // +listType=atomic - Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` - - // OnError defines the exiting behavior of a container on error - // can be set to [ continue | stopAndFail ] - // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code - // continue indicates continue executing the rest of the steps irrespective of the container exit code - OnError string `json:"onError,omitempty"` -} - -// Sidecar has nearly the same data structure as Step, consisting of a Container and an optional Script, but does not have the ability to timeout. -type Sidecar struct { - corev1.Container `json:",inline"` - - // Script is the contents of an executable file to execute. - // - // If Script is not empty, the Step cannot have an Command or Args. - // +optional - Script string `json:"script,omitempty"` - - // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" - // for this field to be supported. - // - // Workspaces is a list of workspaces from the Task that this Sidecar wants - // exclusive access to. Adding a workspace to this list means that any - // other Step or Sidecar that does not also request this Workspace will - // not have access to it. - // +optional - // +listType=atomic - Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` -} - // TaskList contains a list of Task // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type TaskList struct { @@ -200,36 +134,3 @@ type TaskList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Task `json:"items"` } - -// TaskRef can be used to refer to a specific instance of a task. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type TaskRef struct { - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name,omitempty"` - // TaskKind indicates the kind of the task, namespaced or cluster scoped. - Kind TaskKind `json:"kind,omitempty"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Bundle url reference to a Tekton Bundle. - // +optional - Bundle string `json:"bundle,omitempty"` - - // ResolverRef allows referencing a Task in a remote location - // like a git repo. This field is only supported when the alpha - // feature gate is enabled. - // +optional - ResolverRef `json:",omitempty"` -} - -// Check that Pipeline may be validated and defaulted. - -// TaskKind defines the type of Task used by the pipeline. -type TaskKind string - -const ( - // NamespacedTaskKind indicates that the task type has a namespaced scope. - NamespacedTaskKind TaskKind = "Task" - // ClusterTaskKind indicates that task type has a cluster scope. - ClusterTaskKind TaskKind = "ClusterTask" -) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index e6baaed04e..4df3d4feaf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -20,11 +20,14 @@ import ( "context" "fmt" "path/filepath" + "regexp" "strings" "time" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/substitution" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -32,7 +35,20 @@ import ( "knative.dev/pkg/apis" ) +const ( + // stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules. + // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) + // - Must begin with a letter or an underscore (_) + stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" + + // objectVariableNameFormat is the regext used to validate object name and key names format + // The difference with the array or string name format is that object variable names shouldn't contain dots. + objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$" +) + var _ apis.Validatable = (*Task)(nil) +var stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat) +var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { @@ -48,6 +64,15 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { if len(ts.Steps) == 0 { errs = errs.Also(apis.ErrMissingField("steps")) } + + if config.IsSubstituted(ctx) { + // Validate the task's workspaces only. + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + + return errs + } + errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) errs = errs.Also(validateWorkspaceUsages(ctx, ts)) @@ -62,10 +87,10 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps")) errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) - errs = errs.Also(ValidateParameterTypes(ts.Params).ViaField("params")) - errs = errs.Also(ValidateParameterVariables(ts.Steps, ts.Params)) - errs = errs.Also(ValidateResourcesVariables(ts.Steps, ts.Resources)) - errs = errs.Also(validateTaskContextVariables(ts.Steps)) + errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params)) + errs = errs.Also(ValidateResourcesVariables(ctx, ts.Steps, ts.Resources)) + errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps)) errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) return errs } @@ -77,17 +102,9 @@ func validateResults(ctx context.Context, results []TaskResult) (errs *apis.Fiel return errs } -// Validate implements apis.Validatable -func (tr TaskResult) Validate(_ context.Context) *apis.FieldError { - if !resultNameFormatRegex.MatchString(tr.Name) { - return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) - } - return nil -} - // a mount path which conflicts with any other declared workspaces, with the explicitly // declared volume mounts, or with the stepTemplate. The names must also be unique. -func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) { +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) { mountPaths := sets.NewString() for _, step := range steps { for _, vm := range step.VolumeMounts { @@ -135,7 +152,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -146,7 +163,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -238,15 +255,31 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi if s.Script != "" { cleaned := strings.TrimSpace(s.Script) if strings.HasPrefix(cleaned, "#!win") { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) } } + + // StdoutConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StdoutConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig")) + } + // StderrConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StderrConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig")) + } return errs } // ValidateParameterTypes validates all the types within a slice of ParamSpecs -func ValidateParameterTypes(params []ParamSpec) (errs *apis.FieldError) { +func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { + if p.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } errs = errs.Also(p.ValidateType()) } return errs @@ -276,26 +309,68 @@ func (p ParamSpec) ValidateType() *apis.FieldError { }, } } + + // Check object type and its PropertySpec type + return p.ValidateObjectType() +} + +// ValidateObjectType checks that object type parameter does not miss the +// definition of `properties` section and the type of a PropertySpec is allowed. +// (Currently, only string is allowed) +func (p ParamSpec) ValidateObjectType() *apis.FieldError { + if p.Type == ParamTypeObject && p.Properties == nil { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + + invalidKeys := []string{} + for key, propertySpec := range p.Properties { + if propertySpec.Type != ParamTypeString { + invalidKeys = append(invalidKeys, key) + } + } + + if len(invalidKeys) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys), + Paths: []string{fmt.Sprintf("%s.properties", p.Name)}, + } + } + return nil } // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps -func ValidateParameterVariables(steps []Step, params []ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() +func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { + allParameterNames := sets.NewString() + stringParameterNames := sets.NewString() arrayParameterNames := sets.NewString() - + objectParamSpecs := []ParamSpec{} + var errs *apis.FieldError for _, p := range params { - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { + // validate no duplicate names + if allParameterNames.Has(p.Name) { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) + } + allParameterNames.Insert(p.Name) + + switch p.Type { + case ParamTypeArray: arrayParameterNames.Insert(p.Name) + case ParamTypeObject: + objectParamSpecs = append(objectParamSpecs, p) + default: + stringParameterNames.Insert(p.Name) } } - errs := validateVariables(steps, "params", parameterNames) - return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) + errs = errs.Also(validateObjectDefault(objectParamSpecs)) + return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) } -func validateTaskContextVariables(steps []Step) *apis.FieldError { +func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { taskRunContextNames := sets.NewString().Insert( "name", "namespace", @@ -305,12 +380,12 @@ func validateTaskContextVariables(steps []Step) *apis.FieldError { "name", "retry-count", ) - errs := validateVariables(steps, "context\\.taskRun", taskRunContextNames) - return errs.Also(validateVariables(steps, "context\\.task", taskContextNames)) + errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames) + return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames)) } // ValidateResourcesVariables validates all variables within a TaskResources against a slice of Steps -func ValidateResourcesVariables(steps []Step, resources *TaskResources) *apis.FieldError { +func ValidateResourcesVariables(ctx context.Context, steps []Step, resources *TaskResources) *apis.FieldError { if resources == nil { return nil } @@ -325,7 +400,98 @@ func ValidateResourcesVariables(steps []Step, resources *TaskResources) *apis.Fi resourceNames.Insert(r.Name) } } - return validateVariables(steps, "resources.(?:inputs|outputs)", resourceNames) + return validateVariables(ctx, steps, "resources.(?:inputs|outputs)", resourceNames) +} + +// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object +func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) { + objectParameterNames := sets.NewString() + for _, p := range params { + // collect all names of object type params + objectParameterNames.Insert(p.Name) + + // collect all keys for this object param + objectKeys := sets.NewString() + for key := range p.Properties { + objectKeys.Insert(key) + } + + // check if the object's key names are referenced correctly i.e. param.objectParam.key1 + errs = errs.Also(validateVariables(ctx, steps, fmt.Sprintf("params\\.%s", p.Name), objectKeys)) + } + + return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) +} + +// validateObjectDefault validates the keys of all the object params within a +// slice of ParamSpecs are provided in default iff the default section is provided. +func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { + for _, p := range objectParams { + errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) + } + return errs +} + +// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. +func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { + if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { + return nil + } + + neededKeys := []string{} + providedKeys := []string{} + + // collect all needed keys + for key := range properties { + neededKeys = append(neededKeys, key) + } + + // collect all provided keys + for key := range propertiesProvider.ObjectVal { + providedKeys = append(providedKeys, key) + } + + missings := list.DiffLeft(neededKeys, providedKeys) + if len(missings) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), + Paths: []string{fmt.Sprintf("properties"), fmt.Sprintf("default")}, + } + } + + return nil +} + +// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings +// i.e. param.objectParam, param.objectParam[*] +func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs } func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { @@ -358,18 +524,70 @@ func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.Fi return errs } -func validateVariables(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { +func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + // We've checked param name format. Now, we want to check if param names are referenced correctly in each step for idx, step := range steps { - errs = errs.Also(validateStepVariables(step, prefix, vars).ViaFieldIndex("steps", idx)) + errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +// validateNameFormat validates that the name format of all param types follows the rules +func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) { + // checking string or array name format + // ---- + invalidStringAndArrayNames := []string{} + // Converting to sorted list here rather than just looping map keys + // because we want the order of items in vars to be deterministic for purpose of unit testing + for _, name := range stringAndArrayParams.List() { + if !stringAndArrayVariableNameFormatRegex.MatchString(name) { + invalidStringAndArrayNames = append(invalidStringAndArrayNames, name) + } + } + + if len(invalidStringAndArrayNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames), + Paths: []string{"params"}, + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }) + } + + // checking object name and key name format + // ----- + invalidObjectNames := map[string][]string{} + for _, obj := range objectParams { + // check object param name + if !objectVariableNameFormatRegex.MatchString(obj.Name) { + invalidObjectNames[obj.Name] = []string{} + } + + // check key names + for k := range obj.Properties { + if !objectVariableNameFormatRegex.MatchString(k) { + invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k) + } + } + } + + if len(invalidObjectNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }) } + return errs } -func validateStepVariables(step Step, prefix string, vars sets.String) *apis.FieldError { +func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + if !(config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" && prefix == "params") { + errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + } for i, cmd := range step.Command { errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) } @@ -391,6 +609,10 @@ func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldErr return substitution.ValidateVariableP(value, prefix, vars) } +func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { + return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) +} + func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go new file mode 100644 index 0000000000..07aeb436d7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go @@ -0,0 +1,49 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// TaskRef can be used to refer to a specific instance of a task. +type TaskRef struct { + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name,omitempty"` + // TaskKind indicates the kind of the task, namespaced or cluster scoped. + Kind TaskKind `json:"kind,omitempty"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Bundle url reference to a Tekton Bundle. + // +optional + Bundle string `json:"bundle,omitempty"` + + // ResolverRef allows referencing a Task in a remote location + // like a git repo. This field is only supported when the alpha + // feature gate is enabled. + // +optional + ResolverRef `json:",omitempty"` +} + +// Check that Pipeline may be validated and defaulted. + +// TaskKind defines the type of Task used by the pipeline. +type TaskKind string + +const ( + // NamespacedTaskKind indicates that the task type has a namespaced scope. + NamespacedTaskKind TaskKind = "Task" + // ClusterTaskKind indicates that task type has a cluster scope. + ClusterTaskKind TaskKind = "ClusterTask" +) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index dc5e97611f..652eed6cff 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -21,81 +21,43 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) // Validate ensures that a supplied TaskRef field is populated // correctly. No errors are returned for a nil TaskRef. func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) if ref == nil { return } - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - errs = errs.Also(ref.validateAlphaRef(ctx)) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) - } - return -} -// validateInTreeRef returns errors if the given taskRef is not valid for -// Pipelines' built-in resolution machinery. -func (ref *TaskRef) validateInTreeRef(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if ref.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("resolver")) - } - if ref.Resource != nil { - errs = errs.Also(apis.ErrDisallowedFields("resource")) - } - if ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if cfg.FeatureFlags.EnableTektonOCIBundles { - if ref.Bundle != "" && ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) + switch { + case ref.Resolver != "": + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) } if ref.Bundle != "" { - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } - } - } else if ref.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("bundle")) - } - return -} - -// validateAlphaRef ensures that the user has passed either a -// valid remote resource reference or a valid in-tree resource reference, -// but not both. -func (ref *TaskRef) validateAlphaRef(ctx context.Context) (errs *apis.FieldError) { - hasResolver := ref.Resolver != "" - hasResource := ref.Resource != nil - hasName := ref.Name != "" - hasBundle := ref.Bundle != "" - if hasName { - if hasResolver { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) } - if hasResource { + case ref.Resource != nil: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) + if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) } - } - if hasBundle { - if hasResolver { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) - } - if hasResource { + if ref.Bundle != "" { errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) } - } - if !hasResolver { - if hasResource { + if ref.Resolver == "" { errs = errs.Also(apis.ErrMissingField("resolver")) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) + } + case ref.Name == "": + errs = errs.Also(apis.ErrMissingField("name")) + case ref.Bundle != "": + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) } } return diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go index 18530c8810..8ffd62725a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go @@ -68,9 +68,5 @@ func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { // If this taskrun has an embedded task, apply the usual task defaults if trs.TaskSpec != nil { trs.TaskSpec.SetDefaults(ctx) - - if config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" { - trs.TaskSpec.applyImplicitParams(addContextParams(ctx, trs.Params)) - } } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index abccc5df7a..44a0f02b81 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -77,6 +77,8 @@ type TaskRunSpec struct { // +optional // +listType=atomic SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"` + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` } // TaskRunSpecStatus defines the taskrun spec status the user can provide @@ -140,6 +142,11 @@ const ( TaskRunReasonCancelled TaskRunReason = "TaskRunCancelled" // TaskRunReasonTimedOut is the reason set when the Taskrun has timed out TaskRunReasonTimedOut TaskRunReason = "TaskRunTimeout" + // TaskRunReasonResolvingTaskRef indicates that the TaskRun is waiting for + // its taskRef to be asynchronously resolved. + TaskRunReasonResolvingTaskRef = "ResolvingTaskRef" + // TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled + TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed" ) func (t TaskRunReason) String() string { @@ -235,15 +242,6 @@ type TaskRunStatusFields struct { TaskSpec *TaskSpec `json:"taskSpec,omitempty"` } -// TaskRunResult used to describe the results of a task -type TaskRunResult struct { - // Name the given name - Name string `json:"name"` - - // Value the given value of the result - Value string `json:"value"` -} - // TaskRunStepOverride is used to override the values of a Step in the corresponding Task. type TaskRunStepOverride struct { // The name of the Step to override. @@ -418,7 +416,7 @@ func (tr *TaskRun) HasStarted() bool { // IsSuccessful returns true if the TaskRun's status indicates that it is done. func (tr *TaskRun) IsSuccessful() bool { - return tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } // IsCancelled returns true if the TaskRun's spec status is set to Cancelled state diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index 761c944666..2eed5c9e60 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -23,6 +23,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -56,21 +58,25 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) } - errs = errs.Also(validateParameters(ts.Params).ViaField("params")) - errs = errs.Also(validateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) + errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) if ts.Debug != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) errs = errs.Also(validateDebug(ts.Debug).ViaField("debug")) } if ts.StepOverrides != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) errs = errs.Also(validateStepOverrides(ts.StepOverrides).ViaField("stepOverrides")) } if ts.SidecarOverrides != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) errs = errs.Also(validateSidecarOverrides(ts.SidecarOverrides).ViaField("sidecarOverrides")) } + if ts.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(ts.ComputeResources, ts.StepOverrides)) + } if ts.Status != "" { if ts.Status != TaskRunSpecStatusCancelled { @@ -101,8 +107,8 @@ func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) { return errs } -// validateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. -func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { +// ValidateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. +func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { var names []string for idx, w := range wb { names = append(names, w.Name) @@ -112,12 +118,18 @@ func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs return errs } -func validateParameters(params []Param) (errs *apis.FieldError) { +// ValidateParameters makes sure the params for the Task are valid. +func ValidateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { var names []string for _, p := range params { + if p.Value.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a taskrun spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } names = append(names, p.Name) } - return validateNoDuplicateNames(names, false) + return errs.Also(validateNoDuplicateNames(names, false)) } func validateStepOverrides(overrides []TaskRunStepOverride) (errs *apis.FieldError) { @@ -133,6 +145,19 @@ func validateStepOverrides(overrides []TaskRunStepOverride) (errs *apis.FieldErr return errs } +// validateTaskRunComputeResources ensures that compute resources are not configured at both the step level and the task level +func validateTaskRunComputeResources(computeResources *corev1.ResourceRequirements, overrides []TaskRunStepOverride) (errs *apis.FieldError) { + for _, override := range overrides { + if override.Resources.Size() != 0 && computeResources != nil { + return apis.ErrMultipleOneOf( + "stepOverrides.resources", + "computeResources", + ) + } + } + return nil +} + func validateSidecarOverrides(overrides []TaskRunSidecarOverride) (errs *apis.FieldError) { var names []string for i, o := range overrides { @@ -159,7 +184,7 @@ func validateNoDuplicateNames(names []string, byIndex bool) (errs *apis.FieldErr errs = errs.Also(apis.ErrMultipleOneOf("name").ViaKey(n)) } } - seen.Insert(n) + seen.Insert(strings.ToLower(n)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go index 0d6f170d5e..ac53da805d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go @@ -62,9 +62,12 @@ func (we *WhenExpression) applyReplacements(replacements map[string]string, arra for _, val := range we.Values { // arrayReplacements holds a list of array parameters with a pattern - params.arrayParam1 // array params are referenced using $(params.arrayParam1[*]) + // array results are referenced using $(results.resultname[*]) // check if the param exist in the arrayReplacements to replace it with a list of values if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(val))]; ok { replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...) + } else if _, ok := arrayReplacements[ResultsArrayReference(val)]; ok { + replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...) } else { replacedValues = append(replacedValues, substitution.ApplyReplacements(val, replacements)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go index 316dbf10f2..b5a0b1c8e9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go @@ -74,17 +74,17 @@ func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { return nil } -func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, we := range wes { - errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames).ViaField("input").ViaFieldIndex("when", idx)) + errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx)) for _, val := range we.Values { // one of the values could be a reference to an array param, such as, $(params.foo[*]) // extract the variable name from the pattern $(params.foo[*]), if the variable name matches with one of the array params // validate the param as an array variable otherwise, validate it as a string variable if arrayParamNames.Has(ArrayReference(val)) { - errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames).ViaField("values").ViaFieldIndex("when", idx)) + errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx)) } else { - errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames).ViaField("values").ViaFieldIndex("when", idx)) + errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx)) } } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go index 8f45bfb36c..f915fe13df 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go @@ -77,6 +77,12 @@ type WorkspaceBinding struct { // Secret represents a secret that should populate this workspace. // +optional Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + // Projected represents a projected volume that should populate this workspace. + // +optional + Projected *corev1.ProjectedVolumeSource `json:"projected,omitempty"` + // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty"` } // WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun @@ -105,7 +111,8 @@ type WorkspacePipelineTaskBinding struct { // Name is the name of the workspace as declared by the task Name string `json:"name"` // Workspace is the name of the workspace declared by the pipeline - Workspace string `json:"workspace"` + // +optional + Workspace string `json:"workspace,omitempty"` // SubPath is optionally a directory on the volume which should be used // for this binding (i.e. the volume will be mounted at this sub directory). // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go index 1aff7f0440..96bb846deb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go @@ -19,6 +19,8 @@ package v1beta1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -36,7 +38,7 @@ var allVolumeSourceFields = []string{ // Validate looks at the Volume provided in wb and makes sure that it is valid. // This means that only one VolumeSource can be specified, and also that the // supported VolumeSource is itself valid. -func (b *WorkspaceBinding) Validate(ctx context.Context) *apis.FieldError { +func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) { if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { return apis.ErrMissingField(apis.CurrentField) } @@ -66,6 +68,29 @@ func (b *WorkspaceBinding) Validate(ctx context.Context) *apis.FieldError { return apis.ErrMissingField("secret.secretName") } + // The projected workspace is only supported when the alpha feature gate is enabled. + // For a Projected volume to work, you must provide at least one source. + if b.Projected != nil { + if err := version.ValidateEnabledAPIFields(ctx, "projected workspace type", config.AlphaAPIFields).ViaField("workspace"); err != nil { + return err + } + if len(b.Projected.Sources) == 0 { + return apis.ErrMissingField("projected.sources") + } + } + + // The csi workspace is only supported when the alpha feature gate is enabled. + // For a CSI to work, you must provide and have installed the driver to use. + if b.CSI != nil { + errs := version.ValidateEnabledAPIFields(ctx, "csi workspace type", config.AlphaAPIFields).ViaField("workspaces") + if errs != nil { + return errs + } + if b.CSI.Driver == "" { + return apis.ErrMissingField("csi.driver") + } + } + return nil } @@ -88,5 +113,11 @@ func (b *WorkspaceBinding) numSources() int { if b.Secret != nil { n++ } + if b.Projected != nil { + n++ + } + if b.CSI != nil { + n++ + } return n } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index 983a234151..a793eb3d63 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -38,6 +38,13 @@ func (in *ArrayOrString) DeepCopyInto(out *ArrayOrString) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ObjectVal != nil { + in, out := &in.ObjectVal, &out.ObjectVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -55,17 +62,6 @@ func (in *ArrayOrString) DeepCopy() *ArrayOrString { func (in *ChildStatusReference) DeepCopyInto(out *ChildStatusReference) { *out = *in out.TypeMeta = in.TypeMeta - if in.ConditionChecks != nil { - in, out := &in.ConditionChecks, &out.ConditionChecks - *out = make([]*PipelineRunChildConditionCheckStatus, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PipelineRunChildConditionCheckStatus) - (*in).DeepCopyInto(*out) - } - } - } if in.WhenExpressions != nil { in, out := &in.WhenExpressions, &out.WhenExpressions *out = make([]WhenExpression, len(*in)) @@ -183,69 +179,6 @@ func (in *ClusterTaskList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionCheck) DeepCopyInto(out *ConditionCheck) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheck. -func (in *ConditionCheck) DeepCopy() *ConditionCheck { - if in == nil { - return nil - } - out := new(ConditionCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionCheckStatus) DeepCopyInto(out *ConditionCheckStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - in.ConditionCheckStatusFields.DeepCopyInto(&out.ConditionCheckStatusFields) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheckStatus. -func (in *ConditionCheckStatus) DeepCopy() *ConditionCheckStatus { - if in == nil { - return nil - } - out := new(ConditionCheckStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConditionCheckStatusFields) DeepCopyInto(out *ConditionCheckStatusFields) { - *out = *in - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = (*in).DeepCopy() - } - in.Check.DeepCopyInto(&out.Check) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionCheckStatusFields. -func (in *ConditionCheckStatusFields) DeepCopy() *ConditionCheckStatusFields { - if in == nil { - return nil - } - out := new(ConditionCheckStatusFields) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EmbeddedTask) DeepCopyInto(out *EmbeddedTask) { *out = *in @@ -323,6 +256,13 @@ func (in *Param) DeepCopy() *Param { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ParamSpec) DeepCopyInto(out *ParamSpec) { *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Default != nil { in, out := &in.Default, &out.Default *out = new(ArrayOrString) @@ -479,11 +419,6 @@ func (in *PipelineResourceRef) DeepCopy() *PipelineResourceRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResourceResult) DeepCopyInto(out *PipelineResourceResult) { *out = *in - if in.ResourceRef != nil { - in, out := &in.ResourceRef, &out.ResourceRef - *out = new(PipelineResourceRef) - **out = **in - } return } @@ -500,6 +435,7 @@ func (in *PipelineResourceResult) DeepCopy() *PipelineResourceResult { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResult) DeepCopyInto(out *PipelineResult) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -541,44 +477,6 @@ func (in *PipelineRun) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunChildConditionCheckStatus) DeepCopyInto(out *PipelineRunChildConditionCheckStatus) { - *out = *in - in.PipelineRunConditionCheckStatus.DeepCopyInto(&out.PipelineRunConditionCheckStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunChildConditionCheckStatus. -func (in *PipelineRunChildConditionCheckStatus) DeepCopy() *PipelineRunChildConditionCheckStatus { - if in == nil { - return nil - } - out := new(PipelineRunChildConditionCheckStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunConditionCheckStatus) DeepCopyInto(out *PipelineRunConditionCheckStatus) { - *out = *in - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(ConditionCheckStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunConditionCheckStatus. -func (in *PipelineRunConditionCheckStatus) DeepCopy() *PipelineRunConditionCheckStatus { - if in == nil { - return nil - } - out := new(PipelineRunConditionCheckStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) { *out = *in @@ -615,6 +513,7 @@ func (in *PipelineRunList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineRunResult) DeepCopyInto(out *PipelineRunResult) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -683,11 +582,6 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ServiceAccountNames != nil { - in, out := &in.ServiceAccountNames, &out.ServiceAccountNames - *out = make([]PipelineRunSpecServiceAccountName, len(*in)) - copy(*out, *in) - } if in.Timeouts != nil { in, out := &in.Timeouts, &out.Timeouts *out = new(TimeoutFields) @@ -730,22 +624,6 @@ func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunSpecServiceAccountName) DeepCopyInto(out *PipelineRunSpecServiceAccountName) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpecServiceAccountName. -func (in *PipelineRunSpecServiceAccountName) DeepCopy() *PipelineRunSpecServiceAccountName { - if in == nil { - return nil - } - out := new(PipelineRunSpecServiceAccountName) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) { *out = *in @@ -808,7 +686,9 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { if in.PipelineResults != nil { in, out := &in.PipelineResults, &out.PipelineResults *out = make([]PipelineRunResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.PipelineSpec != nil { in, out := &in.PipelineSpec, &out.PipelineSpec @@ -850,21 +730,6 @@ func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) *out = new(TaskRunStatus) (*in).DeepCopyInto(*out) } - if in.ConditionChecks != nil { - in, out := &in.ConditionChecks, &out.ConditionChecks - *out = make(map[string]*PipelineRunConditionCheckStatus, len(*in)) - for key, val := range *in { - var outVal *PipelineRunConditionCheckStatus - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(PipelineRunConditionCheckStatus) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } if in.WhenExpressions != nil { in, out := &in.WhenExpressions, &out.WhenExpressions *out = make([]WhenExpression, len(*in)) @@ -915,7 +780,9 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { if in.Results != nil { in, out := &in.Results, &out.Results *out = make([]PipelineResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Finally != nil { in, out := &in.Finally, &out.Finally @@ -950,13 +817,6 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { *out = new(EmbeddedTask) (*in).DeepCopyInto(*out) } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PipelineTaskCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.WhenExpressions != nil { in, out := &in.WhenExpressions, &out.WhenExpressions *out = make(WhenExpressions, len(*in)) @@ -1011,36 +871,6 @@ func (in *PipelineTask) DeepCopy() *PipelineTask { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineTaskCondition) DeepCopyInto(out *PipelineTaskCondition) { - *out = *in - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]PipelineTaskInputResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskCondition. -func (in *PipelineTaskCondition) DeepCopy() *PipelineTaskCondition { - if in == nil { - return nil - } - out := new(PipelineTaskCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineTaskInputResource) DeepCopyInto(out *PipelineTaskInputResource) { *out = *in @@ -1212,6 +1042,16 @@ func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(PipelineTaskMetadata) + (*in).DeepCopyInto(*out) + } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1241,6 +1081,22 @@ func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. +func (in *PropertySpec) DeepCopy() *PropertySpec { + if in == nil { + return nil + } + out := new(PropertySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { *out = *in @@ -1297,7 +1153,73 @@ func (in *ResultRef) DeepCopy() *ResultRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Sidecar) DeepCopyInto(out *Sidecar) { *out = *in - in.Container.DeepCopyInto(&out.Container) + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]v1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } if in.Workspaces != nil { in, out := &in.Workspaces, &out.Workspaces *out = make([]WorkspaceUsage, len(*in)) @@ -1359,7 +1281,73 @@ func (in *SkippedTask) DeepCopy() *SkippedTask { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Step) DeepCopyInto(out *Step) { *out = *in - in.Container.DeepCopyInto(&out.Container) + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DeprecatedPorts != nil { + in, out := &in.DeprecatedPorts, &out.DeprecatedPorts + *out = make([]v1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]v1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.DeprecatedLivenessProbe != nil { + in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedReadinessProbe != nil { + in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedStartupProbe != nil { + in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedLifecycle != nil { + in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout *out = new(metav1.Duration) @@ -1370,6 +1358,16 @@ func (in *Step) DeepCopyInto(out *Step) { *out = make([]WorkspaceUsage, len(*in)) copy(*out, *in) } + if in.StdoutConfig != nil { + in, out := &in.StdoutConfig, &out.StdoutConfig + *out = new(StepOutputConfig) + **out = **in + } + if in.StderrConfig != nil { + in, out := &in.StderrConfig, &out.StderrConfig + *out = new(StepOutputConfig) + **out = **in + } return } @@ -1383,6 +1381,22 @@ func (in *Step) DeepCopy() *Step { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig. +func (in *StepOutputConfig) DeepCopy() *StepOutputConfig { + if in == nil { + return nil + } + out := new(StepOutputConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepState) DeepCopyInto(out *StepState) { *out = *in @@ -1400,6 +1414,89 @@ func (in *StepState) DeepCopy() *StepState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DeprecatedPorts != nil { + in, out := &in.DeprecatedPorts, &out.DeprecatedPorts + *out = make([]v1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]v1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.DeprecatedLivenessProbe != nil { + in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedReadinessProbe != nil { + in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedStartupProbe != nil { + in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe + *out = new(v1.Probe) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedLifecycle != nil { + in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate. +func (in *StepTemplate) DeepCopy() *StepTemplate { + if in == nil { + return nil + } + out := new(StepTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Task) DeepCopyInto(out *Task) { *out = *in @@ -1545,6 +1642,13 @@ func (in *TaskResources) DeepCopy() *TaskResources { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskResult) DeepCopyInto(out *TaskResult) { *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1726,6 +1830,7 @@ func (in *TaskRunResources) DeepCopy() *TaskRunResources { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -1817,6 +1922,11 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1883,14 +1993,14 @@ func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) { if in.ResourcesResult != nil { in, out := &in.ResourcesResult, &out.ResourcesResult *out = make([]PipelineResourceResult, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } if in.TaskRunResults != nil { in, out := &in.TaskRunResults, &out.TaskRunResults *out = make([]TaskRunResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Sidecars != nil { in, out := &in.Sidecars, &out.Sidecars @@ -1965,7 +2075,7 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { } if in.StepTemplate != nil { in, out := &in.StepTemplate, &out.StepTemplate - *out = new(v1.Container) + *out = new(StepTemplate) (*in).DeepCopyInto(*out) } if in.Sidecars != nil { @@ -1983,7 +2093,9 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { if in.Results != nil { in, out := &in.Results, &out.Results *out = make([]TaskResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -2100,6 +2212,16 @@ func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { *out = new(v1.SecretVolumeSource) (*in).DeepCopyInto(*out) } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(v1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(v1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go similarity index 98% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go index 4d58eeb755..bf8f6bf156 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package version import ( "context" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go index 0681ce1abd..0763f48419 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go @@ -22,6 +22,7 @@ import ( "fmt" "net/http" + tektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" discovery "k8s.io/client-go/discovery" @@ -33,6 +34,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface TektonV1beta1() tektonv1beta1.TektonV1beta1Interface + TektonV1() tektonv1.TektonV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -41,6 +43,7 @@ type Clientset struct { *discovery.DiscoveryClient tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client tektonV1beta1 *tektonv1beta1.TektonV1beta1Client + tektonV1 *tektonv1.TektonV1Client } // TektonV1alpha1 retrieves the TektonV1alpha1Client @@ -53,6 +56,11 @@ func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface { return c.tektonV1beta1 } +// TektonV1 retrieves the TektonV1Client +func (c *Clientset) TektonV1() tektonv1.TektonV1Interface { + return c.tektonV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -101,6 +109,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.tektonV1, err = tektonv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -124,6 +136,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.tektonV1alpha1 = tektonv1alpha1.New(c) cs.tektonV1beta1 = tektonv1beta1.New(c) + cs.tektonV1 = tektonv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go index 757e6eb815..21c3e532df 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +35,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ tektonv1alpha1.AddToScheme, tektonv1beta1.AddToScheme, + tektonv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go index d545dddd31..6f9b4198cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by client-gen. DO NOT EDIT. -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// TaskObject is implemented by Task and ClusterTask -type TaskObject interface { - TaskMetadata() metav1.ObjectMeta - TaskSpec() TaskSpec - Copy() TaskObject -} +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go similarity index 81% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go index 7536360d31..1d86b44247 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type TaskExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go new file mode 100644 index 0000000000..d2baa0206d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type TektonV1Interface interface { + RESTClient() rest.Interface + TasksGetter +} + +// TektonV1Client is used to interact with features provided by the tekton.dev group. +type TektonV1Client struct { + restClient rest.Interface +} + +func (c *TektonV1Client) Tasks(namespace string) TaskInterface { + return newTasks(c, namespace) +} + +// NewForConfig creates a new TektonV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*TektonV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new TektonV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &TektonV1Client{client}, nil +} + +// NewForConfigOrDie creates a new TektonV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TektonV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TektonV1Client for the given RESTClient. +func New(c rest.Interface) *TektonV1Client { + return &TektonV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TektonV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go index ba5b46d27d..d900e6f612 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" "time" - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -38,14 +38,14 @@ type TasksGetter interface { // TaskInterface has methods to work with Task resources. type TaskInterface interface { - Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateOptions) (*v1alpha1.Task, error) - Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateOptions) (*v1alpha1.Task, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Task, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) + Create(ctx context.Context, task *v1.Task, opts metav1.CreateOptions) (*v1.Task, error) + Update(ctx context.Context, task *v1.Task, opts metav1.UpdateOptions) (*v1.Task, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Task, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.TaskList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Task, err error) TaskExpansion } @@ -56,7 +56,7 @@ type tasks struct { } // newTasks returns a Tasks -func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { +func newTasks(c *TektonV1Client, namespace string) *tasks { return &tasks{ client: c.RESTClient(), ns: namespace, @@ -64,8 +64,8 @@ func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { } // Get takes name of the task, and returns the corresponding task object, and an error if there is any. -func (c *tasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Get(). Namespace(c.ns). Resource("tasks"). @@ -77,12 +77,12 @@ func (c *tasks) Get(ctx context.Context, name string, options v1.GetOptions) (re } // List takes label and field selectors, and returns the list of Tasks that match those selectors. -func (c *tasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskList, err error) { +func (c *tasks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TaskList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.TaskList{} + result = &v1.TaskList{} err = c.client.Get(). Namespace(c.ns). Resource("tasks"). @@ -94,7 +94,7 @@ func (c *tasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1 } // Watch returns a watch.Interface that watches the requested tasks. -func (c *tasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *tasks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,8 +109,8 @@ func (c *tasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface } // Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. -func (c *tasks) Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Create(ctx context.Context, task *v1.Task, opts metav1.CreateOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Post(). Namespace(c.ns). Resource("tasks"). @@ -122,8 +122,8 @@ func (c *tasks) Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateO } // Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. -func (c *tasks) Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Update(ctx context.Context, task *v1.Task, opts metav1.UpdateOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Put(). Namespace(c.ns). Resource("tasks"). @@ -136,7 +136,7 @@ func (c *tasks) Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateO } // Delete takes name of the task and deletes it. Returns an error if one occurs. -func (c *tasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *tasks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("tasks"). @@ -147,7 +147,7 @@ func (c *tasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) } // DeleteCollection deletes a collection of objects. -func (c *tasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *tasks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -163,8 +163,8 @@ func (c *tasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, lis } // Patch applies the patch and returns the patched task. -func (c *tasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Patch(pt). Namespace(c.ns). Resource("tasks"). diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go deleted file mode 100644 index 570202483b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClusterTasksGetter has a method to return a ClusterTaskInterface. -// A group's client should implement this interface. -type ClusterTasksGetter interface { - ClusterTasks() ClusterTaskInterface -} - -// ClusterTaskInterface has methods to work with ClusterTask resources. -type ClusterTaskInterface interface { - Create(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.CreateOptions) (*v1alpha1.ClusterTask, error) - Update(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.UpdateOptions) (*v1alpha1.ClusterTask, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTask, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTaskList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) - ClusterTaskExpansion -} - -// clusterTasks implements ClusterTaskInterface -type clusterTasks struct { - client rest.Interface -} - -// newClusterTasks returns a ClusterTasks -func newClusterTasks(c *TektonV1alpha1Client) *clusterTasks { - return &clusterTasks{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. -func (c *clusterTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Get(). - Resource("clustertasks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. -func (c *clusterTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTaskList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterTaskList{} - err = c.client.Get(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTasks. -func (c *clusterTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *clusterTasks) Create(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.CreateOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Post(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTask). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *clusterTasks) Update(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.UpdateOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Put(). - Resource("clustertasks"). - Name(clusterTask.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTask). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. -func (c *clusterTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertasks"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertasks"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterTask. -func (c *clusterTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Patch(pt). - Resource("clustertasks"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go deleted file mode 100644 index ab26eea6eb..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ConditionsGetter has a method to return a ConditionInterface. -// A group's client should implement this interface. -type ConditionsGetter interface { - Conditions(namespace string) ConditionInterface -} - -// ConditionInterface has methods to work with Condition resources. -type ConditionInterface interface { - Create(ctx context.Context, condition *v1alpha1.Condition, opts v1.CreateOptions) (*v1alpha1.Condition, error) - Update(ctx context.Context, condition *v1alpha1.Condition, opts v1.UpdateOptions) (*v1alpha1.Condition, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Condition, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ConditionList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Condition, err error) - ConditionExpansion -} - -// conditions implements ConditionInterface -type conditions struct { - client rest.Interface - ns string -} - -// newConditions returns a Conditions -func newConditions(c *TektonV1alpha1Client, namespace string) *conditions { - return &conditions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the condition, and returns the corresponding condition object, and an error if there is any. -func (c *conditions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Condition, err error) { - result = &v1alpha1.Condition{} - err = c.client.Get(). - Namespace(c.ns). - Resource("conditions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Conditions that match those selectors. -func (c *conditions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ConditionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ConditionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("conditions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested conditions. -func (c *conditions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("conditions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a condition and creates it. Returns the server's representation of the condition, and an error, if there is any. -func (c *conditions) Create(ctx context.Context, condition *v1alpha1.Condition, opts v1.CreateOptions) (result *v1alpha1.Condition, err error) { - result = &v1alpha1.Condition{} - err = c.client.Post(). - Namespace(c.ns). - Resource("conditions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(condition). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a condition and updates it. Returns the server's representation of the condition, and an error, if there is any. -func (c *conditions) Update(ctx context.Context, condition *v1alpha1.Condition, opts v1.UpdateOptions) (result *v1alpha1.Condition, err error) { - result = &v1alpha1.Condition{} - err = c.client.Put(). - Namespace(c.ns). - Resource("conditions"). - Name(condition.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(condition). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the condition and deletes it. Returns an error if one occurs. -func (c *conditions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("conditions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *conditions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("conditions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched condition. -func (c *conditions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Condition, err error) { - result = &v1alpha1.Condition{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("conditions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go index 6942df0f45..40814697cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -18,16 +18,4 @@ limitations under the License. package v1alpha1 -type ClusterTaskExpansion interface{} - -type ConditionExpansion interface{} - -type PipelineExpansion interface{} - -type PipelineRunExpansion interface{} - type RunExpansion interface{} - -type TaskExpansion interface{} - -type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go deleted file mode 100644 index cc0a370f77..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PipelinesGetter has a method to return a PipelineInterface. -// A group's client should implement this interface. -type PipelinesGetter interface { - Pipelines(namespace string) PipelineInterface -} - -// PipelineInterface has methods to work with Pipeline resources. -type PipelineInterface interface { - Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (*v1alpha1.Pipeline, error) - Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Pipeline, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) - PipelineExpansion -} - -// pipelines implements PipelineInterface -type pipelines struct { - client rest.Interface - ns string -} - -// newPipelines returns a Pipelines -func newPipelines(c *TektonV1alpha1Client, namespace string) *pipelines { - return &pipelines{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. -func (c *pipelines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pipelines that match those selectors. -func (c *pipelines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PipelineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pipelines. -func (c *pipelines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelines"). - Name(pipeline.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. -func (c *pipelines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pipeline. -func (c *pipelines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go index d81e4c674b..28d39482a6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -28,13 +28,7 @@ import ( type TektonV1alpha1Interface interface { RESTClient() rest.Interface - ClusterTasksGetter - ConditionsGetter - PipelinesGetter - PipelineRunsGetter RunsGetter - TasksGetter - TaskRunsGetter } // TektonV1alpha1Client is used to interact with features provided by the tekton.dev group. @@ -42,34 +36,10 @@ type TektonV1alpha1Client struct { restClient rest.Interface } -func (c *TektonV1alpha1Client) ClusterTasks() ClusterTaskInterface { - return newClusterTasks(c) -} - -func (c *TektonV1alpha1Client) Conditions(namespace string) ConditionInterface { - return newConditions(c, namespace) -} - -func (c *TektonV1alpha1Client) Pipelines(namespace string) PipelineInterface { - return newPipelines(c, namespace) -} - -func (c *TektonV1alpha1Client) PipelineRuns(namespace string) PipelineRunInterface { - return newPipelineRuns(c, namespace) -} - func (c *TektonV1alpha1Client) Runs(namespace string) RunInterface { return newRuns(c, namespace) } -func (c *TektonV1alpha1Client) Tasks(namespace string) TaskInterface { - return newTasks(c, namespace) -} - -func (c *TektonV1alpha1Client) TaskRuns(namespace string) TaskRunInterface { - return newTaskRuns(c, namespace) -} - // NewForConfig creates a new TektonV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go deleted file mode 100644 index cdf2a1e367..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PipelineRunsGetter has a method to return a PipelineRunInterface. -// A group's client should implement this interface. -type PipelineRunsGetter interface { - PipelineRuns(namespace string) PipelineRunInterface -} - -// PipelineRunInterface has methods to work with PipelineRun resources. -type PipelineRunInterface interface { - Create(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.CreateOptions) (*v1alpha1.PipelineRun, error) - Update(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) - UpdateStatus(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PipelineRun, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineRunList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) - PipelineRunExpansion -} - -// pipelineRuns implements PipelineRunInterface -type pipelineRuns struct { - client rest.Interface - ns string -} - -// newPipelineRuns returns a PipelineRuns -func newPipelineRuns(c *TektonV1alpha1Client, namespace string) *pipelineRuns { - return &pipelineRuns{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. -func (c *pipelineRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. -func (c *pipelineRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineRunList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PipelineRunList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pipelineRuns. -func (c *pipelineRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *pipelineRuns) Create(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.CreateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *pipelineRuns) Update(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(pipelineRun.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pipelineRuns) UpdateStatus(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(pipelineRun.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. -func (c *pipelineRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pipelineRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pipelineRun. -func (c *pipelineRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go deleted file mode 100644 index d3e7f1b704..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// TaskRunsGetter has a method to return a TaskRunInterface. -// A group's client should implement this interface. -type TaskRunsGetter interface { - TaskRuns(namespace string) TaskRunInterface -} - -// TaskRunInterface has methods to work with TaskRun resources. -type TaskRunInterface interface { - Create(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.CreateOptions) (*v1alpha1.TaskRun, error) - Update(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) - UpdateStatus(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.TaskRun, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskRunList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) - TaskRunExpansion -} - -// taskRuns implements TaskRunInterface -type taskRuns struct { - client rest.Interface - ns string -} - -// newTaskRuns returns a TaskRuns -func newTaskRuns(c *TektonV1alpha1Client, namespace string) *taskRuns { - return &taskRuns{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. -func (c *taskRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. -func (c *taskRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskRunList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.TaskRunList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested taskRuns. -func (c *taskRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *taskRuns) Create(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.CreateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Post(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *taskRuns) Update(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("taskruns"). - Name(taskRun.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *taskRuns) UpdateStatus(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("taskruns"). - Name(taskRun.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. -func (c *taskRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *taskRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched taskRun. -func (c *taskRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index 1f8c9110fb..f98d03569b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -19,19 +19,38 @@ package substitution import ( "fmt" "regexp" + "strconv" "strings" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) -const parameterSubstitution = `[_a-zA-Z][_a-zA-Z0-9.-]*(\[\*\])?` +const ( + parameterSubstitution = `.*?(\[\*\])?` -const braceMatchingRegex = "(\\$(\\(%s\\.(?P%s)\\)))" + // braceMatchingRegex is a regex for parameter references including dot notation, bracket notation with single and double quotes. + braceMatchingRegex = "(\\$(\\(%s(\\.(?P%s)|\\[\"(?P%s)\"\\]|\\['(?P%s)'\\])\\)))" + // arrayIndexing will match all `[int]` and `[*]` for parseExpression + arrayIndexing = `\[([0-9])*\*?\]` + // paramIndex will match all `$(params.paramName[int])` expressions + paramIndexing = `\$\(params(\.[_a-zA-Z0-9.-]+|\[\'[_a-zA-Z0-9.-\/]+\'\]|\[\"[_a-zA-Z0-9.-\/]+\"\])\[[0-9]+\]\)` + // intIndex will match all `[int]` expressions + intIndex = `\[[0-9]+\]` +) + +// arrayIndexingRegex is used to match `[int]` and `[*]` +var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) + +// paramIndexingRegex will match all `$(params.paramName[int])` expressions +var paramIndexingRegex = regexp.MustCompile(paramIndexing) + +// intIndexRegex will match all `[int]` for param expression +var intIndexRegex = regexp.MustCompile(intIndex) // ValidateVariable makes sure all variables in the provided string are known func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, _ := extractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if !vars.Has(v) { @@ -47,9 +66,16 @@ func ValidateVariable(name, value, prefix, locationName, path string, vars sets. // ValidateVariableP makes sure all variables for a parameter in the provided string are known func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, errString := extractVariablesFromString(value, prefix); present { + if errString != "" { + return &apis.FieldError{ + Message: errString, + Paths: []string{""}, + } + + } for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") + v = TrimArrayIndex(v) if !vars.Has(v) { return &apis.FieldError{ Message: fmt.Sprintf("non-existent variable in %q", value), @@ -64,7 +90,7 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError // ValidateVariableProhibited verifies that variables matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, _ := extractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { @@ -80,7 +106,14 @@ func ValidateVariableProhibited(name, value, prefix, locationName, path string, // ValidateVariableProhibitedP verifies that variables for a parameter matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, errString := extractVariablesFromString(value, prefix); present { + if errString != "" { + return &apis.FieldError{ + Message: errString, + Paths: []string{""}, + } + + } for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { @@ -95,9 +128,34 @@ func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.F return nil } +// ValidateEntireVariableProhibitedP verifies that values of object type are not used as whole. +func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { + vs, err := extractEntireVariablesFromString(value, prefix) + if err != nil { + return &apis.FieldError{ + Message: fmt.Sprintf("extractEntireVariablesFromString failed : %v", err), + // Empty path is required to make the `ViaField`, … work + Paths: []string{""}, + } + } + + for _, v := range vs { + v = strings.TrimSuffix(v, "[*]") + if vars.Has(v) { + return &apis.FieldError{ + Message: fmt.Sprintf("variable type invalid in %q", value), + // Empty path is required to make the `ViaField`, … work + Paths: []string{""}, + } + } + } + + return nil +} + // ValidateVariableIsolated verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, _ := extractVariablesFromString(value, prefix); present { firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") @@ -116,7 +174,14 @@ func ValidateVariableIsolated(name, value, prefix, locationName, path string, va // ValidateVariableIsolatedP verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present := extractVariablesFromString(value, prefix); present { + if vs, present, errString := extractVariablesFromString(value, prefix); present { + if errString != "" { + return &apis.FieldError{ + Message: errString, + Paths: []string{""}, + } + + } firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") @@ -134,10 +199,33 @@ func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.Fie return nil } +// ValidateWholeArrayOrObjectRefInStringVariable validates if a single string field uses references to the whole array/object appropriately +// valid example: "$(params.myObject[*])" +// invalid example: "$(params.name-not-exist[*])" +func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, vars sets.String) (isIsolated bool, errs *apis.FieldError) { + nameSubstitution := `[_a-zA-Z0-9.-]+\[\*\]` + + // a regex to check if the stringValue is an isolated reference to the whole array/object param without extra string literal. + isolatedVariablePattern := fmt.Sprintf(fmt.Sprintf("^%s$", braceMatchingRegex), prefix, nameSubstitution, nameSubstitution, nameSubstitution) + isolatedVariableRegex, err := regexp.Compile(isolatedVariablePattern) + if err != nil { + return false, &apis.FieldError{ + Message: fmt.Sprint("Fail to parse the regex: ", err), + Paths: []string{fmt.Sprintf("%s.%s", prefix, name)}, + } + } + + if isolatedVariableRegex.MatchString(value) { + return true, ValidateVariableP(value, prefix, vars).ViaFieldKey(prefix, name) + } + + return false, nil +} + // Extract a the first full string expressions found (e.g "$(input.params.foo)"). Return // "" and false if nothing is found. func extractExpressionFromString(s, prefix string) (string, bool) { - pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution) + pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re := regexp.MustCompile(pattern) match := re.FindStringSubmatch(s) if match == nil { @@ -146,22 +234,68 @@ func extractExpressionFromString(s, prefix string) (string, bool) { return match[0], true } -func extractVariablesFromString(s, prefix string) ([]string, bool) { - pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution) +func extractVariablesFromString(s, prefix string) ([]string, bool, string) { + pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re := regexp.MustCompile(pattern) matches := re.FindAllStringSubmatch(s, -1) + errString := "" if len(matches) == 0 { - return []string{}, false + return []string{}, false, "" + } + vars := make([]string, len(matches)) + for i, match := range matches { + groups := matchGroups(match, re) + for j, v := range []string{"var1", "var2", "var3"} { + val := groups[v] + // If using the dot notation, the number of dot-separated components is restricted up to 2. + // Valid Examples: + // - extract "aString" from .aString + // - extract "anObject" from .anObject.key + // Invalid Examples: + // - .foo.bar.baz.... + if j == 0 && strings.Contains(val, ".") { + if len(strings.Split(val, ".")) > 2 { + errString = fmt.Sprintf(`Invalid referencing of parameters in "%s"! Only two dot-separated components after the prefix "%s" are allowed.`, s, prefix) + return vars, true, errString + } + vars[i] = strings.SplitN(val, ".", 2)[0] + break + } + if val != "" { + vars[i] = val + break + } + } + } + return vars, true, errString +} + +func extractEntireVariablesFromString(s, prefix string) ([]string, error) { + pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("Fail to parse regex pattern: %v", err) + } + + matches := re.FindAllStringSubmatch(s, -1) + if len(matches) == 0 { + return []string{}, nil } vars := make([]string, len(matches)) for i, match := range matches { groups := matchGroups(match, re) // foo -> foo - // foo.bar -> foo - // foo.bar.baz -> foo - vars[i] = strings.SplitN(groups["var"], ".", 2)[0] + // foo.bar -> foo.bar + // foo.bar.baz -> foo.bar.baz + for _, v := range []string{"var1", "var2", "var3"} { + val := groups[v] + if val != "" { + vars[i] = val + break + } + } } - return vars, true + return vars, nil } func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { @@ -207,3 +341,28 @@ func ApplyArrayReplacements(in string, stringReplacements map[string]string, arr // Otherwise return a size-1 array containing the input string with standard stringReplacements applied. return []string{ApplyReplacements(in, stringReplacements)} } + +// TrimArrayIndex replaces all `[i]` and `[*]` to "". +func TrimArrayIndex(s string) string { + return arrayIndexingRegex.ReplaceAllString(s, "") +} + +// ExtractParamsExpressions will find all `$(params.paramName[int])` expressions +func ExtractParamsExpressions(s string) []string { + return paramIndexingRegex.FindAllString(s, -1) +} + +// ExtractIndexString will find the leftmost match of `[int]` +func ExtractIndexString(s string) string { + return intIndexRegex.FindString(s) +} + +// ExtractIndex will extract int from `[int]` +func ExtractIndex(s string) (int, error) { + return strconv.Atoi(strings.TrimSuffix(strings.TrimPrefix(s, "["), "]")) +} + +// StripStarVarSubExpression strips "$(target[*])"" to get "target" +func StripStarVarSubExpression(s string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0893d79173..fbc214b252 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -389,18 +389,21 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/tektoncd/pipeline v0.35.1 +# github.com/tektoncd/pipeline v0.38.3 ## explicit; go 1.17 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline github.com/tektoncd/pipeline/pkg/apis/pipeline/pod +github.com/tektoncd/pipeline/pkg/apis/pipeline/v1 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/validate +github.com/tektoncd/pipeline/pkg/apis/version github.com/tektoncd/pipeline/pkg/client/clientset/versioned github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme +github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1 github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/list