Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: inject namespace from rendered manifests in post deploy hooks #9090

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions integration/deploy_hooks_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
/*
Copyright 2023 The Skaffold Authors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package integration

import (
"fmt"
"testing"

"github.com/GoogleContainerTools/skaffold/v2/integration/skaffold"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)

func TestPostDeployHooksNamespaces(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)

tests := []struct {
description string
dir string
configFile string
resourceFile string
}{
{
description: "set SKAFFOLD_NAMESPACES from manifests with kubectl deployer",
configFile: `apiVersion: skaffold/v4beta6
kind: Config
manifests:
rawYaml:
- pod.yaml

deploy:
kubectl:
hooks:
after:
- host:
command: ["sh", "-c", "echo $SKAFFOLD_NAMESPACES"]
`,
resourceFile: `apiVersion: v1
kind: Pod
metadata:
name: getting-started
namespace: %v
spec:
containers:
- name: getting-started
image: us-central1-docker.pkg.dev/k8s-skaffold/testing/skaffold-example
`,
},
}

for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ns, _ := SetupNamespace(t)
expectedOutput := fmt.Sprintf("Starting post-deploy hooks...\ndefault,%v", ns.Name)
resourceWithNs := fmt.Sprintf(test.resourceFile, ns.Name)

dir := testutil.NewTempDir(t)
dir.Write("skaffold.yaml", test.configFile)
dir.Write("pod.yaml", resourceWithNs)
out, err := skaffold.Run().InDir(dir.Root()).RunWithCombinedOutput(t)
testutil.CheckError(t, false, err)
testutil.CheckContains(t, expectedOutput, string(out))
})
}
}
10 changes: 8 additions & 2 deletions pkg/skaffold/deploy/helm/helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ type Deployer struct {
namespace string
configFile string

namespaces *[]string
namespaces *[]string
manifestsNamespaces *[]string

// packaging temporary directory, used for predictable test output
pkgTmpDir string
Expand Down Expand Up @@ -166,6 +167,9 @@ func NewDeployer(ctx context.Context, cfg Config, labeller *label.DefaultLabelle
RuntimeType: artifact.RuntimeType,
})
}

manifestsNamespaces := []string{}

return &Deployer{
configName: configName,
LegacyHelmDeploy: h,
Expand All @@ -177,7 +181,8 @@ func NewDeployer(ctx context.Context, cfg Config, labeller *label.DefaultLabelle
logger: logger,
statusMonitor: component.NewMonitor(cfg, cfg.GetKubeContext(), labeller, &namespaces, customResourceSelectors),
syncer: component.NewSyncer(kubectl, &namespaces, logger.GetFormatter()),
hookRunner: hooks.NewDeployRunner(kubectl, h.LifecycleHooks, &namespaces, logger.GetFormatter(), hooks.NewDeployEnvOpts(labeller.GetRunID(), kubectl.KubeContext, namespaces)),
manifestsNamespaces: &manifestsNamespaces,
hookRunner: hooks.NewDeployRunner(kubectl, h.LifecycleHooks, &namespaces, logger.GetFormatter(), hooks.NewDeployEnvOpts(labeller.GetRunID(), kubectl.KubeContext, namespaces), &manifestsNamespaces),
originalImages: ogImages,
kubeContext: cfg.GetKubeContext(),
kubeConfig: cfg.GetKubeConfig(),
Expand Down Expand Up @@ -302,6 +307,7 @@ func (h *Deployer) Deploy(ctx context.Context, out io.Writer, builds []graph.Art

h.TrackBuildArtifacts(builds, deployedImages)
h.trackNamespaces(namespaces)
*h.manifestsNamespaces = namespaces
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/skaffold/deploy/helm/helm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1393,7 +1393,7 @@ func TestHelmHooks(t *testing.T) {
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRunWithOutput("helm version --client", version31))
t.Override(&hooks.NewDeployRunner, func(*ctl.CLI, latest.DeployHooks, *[]string, logger.Formatter, hooks.DeployEnvOpts) hooks.Runner {
t.Override(&hooks.NewDeployRunner, func(*ctl.CLI, latest.DeployHooks, *[]string, logger.Formatter, hooks.DeployEnvOpts, *[]string) hooks.Runner {
return test.runner
})

Expand Down
81 changes: 43 additions & 38 deletions pkg/skaffold/deploy/kubectl/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,25 +58,26 @@ type Deployer struct {

*latest.KubectlDeploy

accessor access.Accessor
imageLoader loader.ImageLoader
logger k8slogger.Logger
debugger debug.Debugger
statusMonitor kstatus.Monitor
syncer sync.Syncer
hookRunner hooks.Runner
originalImages []graph.Artifact // the set of images parsed from the Deployer's manifest set
localImages []graph.Artifact // the set of images marked as "local" by the Runner
podSelector *kubernetes.ImageList
hydratedManifests []string
workingDir string
globalConfig string
defaultRepo *string
multiLevelRepo *bool
kubectl CLI
insecureRegistries map[string]bool
labeller *label.DefaultLabeller
namespaces *[]string
accessor access.Accessor
imageLoader loader.ImageLoader
logger k8slogger.Logger
debugger debug.Debugger
statusMonitor kstatus.Monitor
syncer sync.Syncer
hookRunner hooks.Runner
originalImages []graph.Artifact // the set of images parsed from the Deployer's manifest set
localImages []graph.Artifact // the set of images marked as "local" by the Runner
podSelector *kubernetes.ImageList
hydratedManifests []string
workingDir string
globalConfig string
defaultRepo *string
multiLevelRepo *bool
kubectl CLI
insecureRegistries map[string]bool
labeller *label.DefaultLabeller
namespaces *[]string
manifestsNamespaces *[]string

transformableAllowlist map[apimachinery.GroupKind]latest.ResourceFilter
transformableDenylist map[apimachinery.GroupKind]latest.ResourceFilter
Expand Down Expand Up @@ -121,26 +122,29 @@ func NewDeployer(cfg Config, labeller *label.DefaultLabeller, d *latest.KubectlD
})
}

manifestsNamespaces := []string{}

return &Deployer{
originalImages: ogImages,
configName: configName,
KubectlDeploy: d,
podSelector: podSelector,
namespaces: &namespaces,
accessor: component.NewAccessor(cfg, cfg.GetKubeContext(), kubectl.CLI, podSelector, labeller, &namespaces),
debugger: component.NewDebugger(cfg.Mode(), podSelector, &namespaces, cfg.GetKubeContext()),
imageLoader: component.NewImageLoader(cfg, kubectl.CLI),
logger: logger,
statusMonitor: component.NewMonitor(cfg, cfg.GetKubeContext(), labeller, &namespaces, customResourceSelectors),
syncer: component.NewSyncer(kubectl.CLI, &namespaces, logger.GetFormatter()),
hookRunner: hooks.NewDeployRunner(kubectl.CLI, d.LifecycleHooks, &namespaces, logger.GetFormatter(), hooks.NewDeployEnvOpts(labeller.GetRunID(), kubectl.KubeContext, namespaces)),
workingDir: cfg.GetWorkingDir(),
globalConfig: cfg.GlobalConfig(),
defaultRepo: cfg.DefaultRepo(),
multiLevelRepo: cfg.MultiLevelRepo(),
kubectl: kubectl,
insecureRegistries: cfg.GetInsecureRegistries(),
labeller: labeller,
originalImages: ogImages,
configName: configName,
KubectlDeploy: d,
podSelector: podSelector,
namespaces: &namespaces,
accessor: component.NewAccessor(cfg, cfg.GetKubeContext(), kubectl.CLI, podSelector, labeller, &namespaces),
debugger: component.NewDebugger(cfg.Mode(), podSelector, &namespaces, cfg.GetKubeContext()),
imageLoader: component.NewImageLoader(cfg, kubectl.CLI),
logger: logger,
statusMonitor: component.NewMonitor(cfg, cfg.GetKubeContext(), labeller, &namespaces, customResourceSelectors),
syncer: component.NewSyncer(kubectl.CLI, &namespaces, logger.GetFormatter()),
manifestsNamespaces: &manifestsNamespaces,
hookRunner: hooks.NewDeployRunner(kubectl.CLI, d.LifecycleHooks, &namespaces, logger.GetFormatter(), hooks.NewDeployEnvOpts(labeller.GetRunID(), kubectl.KubeContext, namespaces), &manifestsNamespaces),
workingDir: cfg.GetWorkingDir(),
globalConfig: cfg.GlobalConfig(),
defaultRepo: cfg.DefaultRepo(),
multiLevelRepo: cfg.MultiLevelRepo(),
kubectl: kubectl,
insecureRegistries: cfg.GetInsecureRegistries(),
labeller: labeller,
// hydratedManifests refers to the DIR in the `skaffold apply DIR`. Used in both v1 and v2.
hydratedManifests: cfg.HydratedManifests(),
transformableAllowlist: transformableAllowlist,
Expand Down Expand Up @@ -270,6 +274,7 @@ func (k *Deployer) Deploy(ctx context.Context, out io.Writer, builds []graph.Art
k.statusMonitor.RegisterDeployManifests(manifests)
endTrace()
k.trackNamespaces(namespaces)
*k.manifestsNamespaces = namespaces
return nil
}

Expand Down
37 changes: 22 additions & 15 deletions pkg/skaffold/hooks/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ import (
"context"
"fmt"
"io"
"strings"
"sync"

corev1 "k8s.io/api/core/v1"

deployutil "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/deploy/util"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubectl"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/kubernetes/logger"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output"
Expand All @@ -37,8 +37,8 @@ var (
NewCloudRunDeployRunner = newCloudRunDeployRunner
)

func newDeployRunner(cli *kubectl.CLI, d latest.DeployHooks, namespaces *[]string, formatter logger.Formatter, opts DeployEnvOpts) Runner {
return deployRunner{d, cli, namespaces, formatter, opts, new(sync.Map)}
func newDeployRunner(cli *kubectl.CLI, d latest.DeployHooks, namespaces *[]string, formatter logger.Formatter, opts DeployEnvOpts, manifestsNamespaces *[]string) Runner {
return deployRunner{d, cli, namespaces, manifestsNamespaces, formatter, opts, new(sync.Map)}
}

func newCloudRunDeployRunner(d latest.CloudRunDeployHooks, opts DeployEnvOpts) Runner {
Expand Down Expand Up @@ -69,38 +69,45 @@ func NewDeployEnvOpts(runID string, kubeContext string, namespaces []string) Dep
return DeployEnvOpts{
RunID: runID,
KubeContext: kubeContext,
Namespaces: strings.Join(namespaces, ","),
Namespaces: namespaces,
}
}

type deployRunner struct {
latest.DeployHooks
cli *kubectl.CLI
namespaces *[]string
formatter logger.Formatter
opts DeployEnvOpts
visitedContainers *sync.Map // maintain a list of previous iteration containers, so that they can be skipped
cli *kubectl.CLI
namespaces *[]string
manifestsNamespaces *[]string
formatter logger.Formatter
opts DeployEnvOpts
visitedContainers *sync.Map // maintain a list of previous iteration containers, so that they can be skipped
}

func (r deployRunner) RunPreHooks(ctx context.Context, out io.Writer) error {
return r.run(ctx, out, r.PreHooks, phases.PreDeploy)
return r.run(ctx, out, r.PreHooks, phases.PreDeploy, nil)
}

func (r deployRunner) RunPostHooks(ctx context.Context, out io.Writer) error {
return r.run(ctx, out, r.PostHooks, phases.PostDeploy)
return r.run(ctx, out, r.PostHooks, phases.PostDeploy, r.manifestsNamespaces)
}

func (r deployRunner) getEnv() []string {
func (r deployRunner) getEnv(manifestsNs *[]string) []string {
mergedOpts := r.opts

if manifestsNs != nil {
mergedOpts.Namespaces = deployutil.ConsolidateNamespaces(r.opts.Namespaces, *manifestsNs)
}

common := getEnv(staticEnvOpts)
deploy := getEnv(r.opts)
deploy := getEnv(mergedOpts)
return append(common, deploy...)
}

func (r deployRunner) run(ctx context.Context, out io.Writer, hooks []latest.DeployHookItem, phase phase) error {
func (r deployRunner) run(ctx context.Context, out io.Writer, hooks []latest.DeployHookItem, phase phase, manifestsNs *[]string) error {
if len(hooks) > 0 {
output.Default.Fprintln(out, fmt.Sprintf("Starting %s hooks...", phase))
}
env := r.getEnv()
env := r.getEnv(manifestsNs)
for _, h := range hooks {
if h.HostHook != nil {
hook := hostHook{*h.HostHook, env}
Expand Down
2 changes: 1 addition & 1 deletion pkg/skaffold/hooks/deploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func TestDeployHooks(t *testing.T) {
namespaces := []string{"np1", "np2"}
opts := NewDeployEnvOpts("run_id", testKubeContext, namespaces)
formatter := func(corev1.Pod, corev1.ContainerStatus, func() bool) log.Formatter { return mockLogFormatter{} }
runner := NewDeployRunner(&kubectl.CLI{KubeContext: testKubeContext}, deployer.LifecycleHooks, &namespaces, formatter, opts)
runner := NewDeployRunner(&kubectl.CLI{KubeContext: testKubeContext}, deployer.LifecycleHooks, &namespaces, formatter, opts, nil)

t.Override(&util.DefaultExecCommand,
testutil.CmdRunWithOutput("kubectl --context context1 exec pod1 --namespace np1 -c container1 -- foo pre-hook", preContainerHookOut).
Expand Down
8 changes: 7 additions & 1 deletion pkg/skaffold/hooks/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,17 @@ type RenderEnvOpts struct {
Namespaces string
}

type Namespaces []string

func (ns Namespaces) String() string {
return strings.Join(ns, ",")
}

// DeployEnvOpts contains the environment variables to be set in a deploy type lifecycle hook executor.
type DeployEnvOpts struct {
RunID string
KubeContext string
Namespaces string
Namespaces Namespaces
}

type Config interface {
Expand Down
2 changes: 1 addition & 1 deletion pkg/skaffold/hooks/env_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func TestGetEnv(t *testing.T) {
input: DeployEnvOpts{
RunID: "1234",
KubeContext: "minikube",
Namespaces: "np1,np2,np3",
Namespaces: Namespaces{"np1", "np2", "np3"},
},
expected: []string{
"SKAFFOLD_RUN_ID=1234",
Expand Down