Skip to content

Commit

Permalink
Merge branch 'main' into NET-6466-remove-access-to-secrets-for-termgw
Browse files Browse the repository at this point in the history
  • Loading branch information
jm96441n authored Apr 19, 2024
2 parents 4e61f68 + 6dfbadf commit d5c39d9
Show file tree
Hide file tree
Showing 28 changed files with 999 additions and 19 deletions.
6 changes: 6 additions & 0 deletions .changelog/3685.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:bug
helm: corrected datadog openmetrics and consul-checks consul server URLs set during automation to use full consul deployment release name
```
```release-note:bug
helm: bug fix for `prometheus.io` annotation omission while using datadog integration with openmetrics/prometheus and consul integration checks
```
3 changes: 3 additions & 0 deletions .changelog/3918.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
api-gateway: Fix order of initialization for creating ACL role/policy to avoid error logs in consul when upgrading between versions.
```
3 changes: 3 additions & 0 deletions acceptance/framework/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ type TestConfig struct {
EnableEnterprise bool
EnterpriseLicense string

SkipDataDogTests bool
DatadogHelmChartVersion string

EnableOpenshift bool

EnablePodSecurityPolicies bool
Expand Down
1 change: 0 additions & 1 deletion acceptance/framework/consul/helm_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ func (h *HelmCluster) Create(t *testing.T) {
if h.ChartPath != "" {
chartName = h.ChartPath
}

// Retry the install in case previous tests have not finished cleaning up.
retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 30}, t, func(r *retry.R) {
err := helm.InstallE(r, h.helmOptions, chartName, h.releaseName)
Expand Down
190 changes: 190 additions & 0 deletions acceptance/framework/datadog/datadog.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
package datadog

import (
"context"
"fmt"
"github.com/hashicorp/consul-k8s/acceptance/framework/k8s"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
"time"

"github.com/hashicorp/consul-k8s/acceptance/framework/config"
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/hashicorp/consul-k8s/acceptance/framework/logger"

"github.com/gruntwork-io/terratest/modules/helm"
terratestk8s "github.com/gruntwork-io/terratest/modules/k8s"
terratestLogger "github.com/gruntwork-io/terratest/modules/logger"
"github.com/hashicorp/consul-k8s/acceptance/framework/environment"
"k8s.io/client-go/kubernetes"
)

const (
releaseLabel = "app.kubernetes.io/name"
OperatorReleaseName = "datadog-operator"
DefaultHelmChartVersion = "1.4.0"
datadogSecretName = "datadog-secret"
datadogAPIKey = "api-key"
datadogAppKey = "app-key"
datadogFakeAPIKey = "DD_FAKEAPIKEY"
datadogFakeAPPKey = "DD_FAKEAPPKEY"
)

type DatadogCluster struct {
ctx environment.TestContext

helmOptions *helm.Options
releaseName string

kubectlOptions *terratestk8s.KubectlOptions

kubernetesClient kubernetes.Interface

noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
}

// releaseLabelSelector returns label selector that selects all pods
// from a Datadog installation.
func (d *DatadogCluster) releaseLabelSelector() string {
return fmt.Sprintf("%s=%s", releaseLabel, d.releaseName)
}

func NewDatadogCluster(t *testing.T, ctx environment.TestContext, cfg *config.TestConfig, releaseName string, releaseNamespace string, helmValues map[string]string) *DatadogCluster {
logger := terratestLogger.New(logger.TestLogger{})

configureNamespace(t, ctx.KubernetesClient(t), cfg, releaseNamespace)

createOrUpdateDatadogSecret(t, ctx.KubernetesClient(t), cfg, releaseNamespace)

kopts := ctx.KubectlOptionsForNamespace(releaseNamespace)

values := defaultHelmValues()

ddHelmChartVersion := DefaultHelmChartVersion
if cfg.DatadogHelmChartVersion != "" {
ddHelmChartVersion = cfg.DatadogHelmChartVersion
}

helpers.MergeMaps(values, helmValues)
datadogHelmOpts := &helm.Options{
SetValues: values,
KubectlOptions: kopts,
Logger: logger,
Version: ddHelmChartVersion,
}

helm.AddRepo(t, datadogHelmOpts, "datadog", "https://helm.datadoghq.com")
// Ignoring the error from `helm repo update` as it could fail due to stale cache or unreachable servers and we're
// asserting a chart version on Install which would fail in an obvious way should this not succeed.
_, err := helm.RunHelmCommandAndGetOutputE(t, &helm.Options{}, "repo", "update")
if err != nil {
logger.Logf(t, "Unable to update helm repository, proceeding anyway: %s.", err)
}

return &DatadogCluster{
ctx: ctx,
helmOptions: datadogHelmOpts,
kubectlOptions: kopts,
kubernetesClient: ctx.KubernetesClient(t),
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
releaseName: releaseName,
}
}

func (d *DatadogCluster) Create(t *testing.T) {
t.Helper()

helpers.Cleanup(t, d.noCleanupOnFailure, d.noCleanup, func() {
d.Destroy(t)
})

helm.Install(t, d.helmOptions, "datadog/datadog-operator", d.releaseName)
// Wait for the datadog-operator to become ready
k8s.WaitForAllPodsToBeReady(t, d.kubernetesClient, d.helmOptions.KubectlOptions.Namespace, d.releaseLabelSelector())
}

func (d *DatadogCluster) Destroy(t *testing.T) {
t.Helper()

k8s.WritePodsDebugInfoIfFailed(t, d.kubectlOptions, d.debugDirectory, d.releaseLabelSelector())
// Ignore the error returned by the helm delete here so that we can
// always idempotent clean up resources in the cluster.
_ = helm.DeleteE(t, d.helmOptions, d.releaseName, true)
}

func defaultHelmValues() map[string]string {
return map[string]string{
"replicaCount": "1",
"image.tag": DefaultHelmChartVersion,
"image.repository": "gcr.io/datadoghq/operator",
}
}

func configureNamespace(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) {
ctx := context.Background()

ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: map[string]string{},
},
}
if cfg.EnableRestrictedPSAEnforcement {
ns.ObjectMeta.Labels["pod-security.kubernetes.io/enforce"] = "restricted"
ns.ObjectMeta.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
}

_, createErr := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if createErr == nil {
logger.Logf(t, "Created namespace %s", namespace)
return
}

_, updateErr := client.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{})
if updateErr == nil {
logger.Logf(t, "Updated namespace %s", namespace)
return
}

require.Failf(t, "Failed to create or update namespace", "Namespace=%s, CreateError=%s, UpdateError=%s", namespace, createErr, updateErr)
}

func createOrUpdateDatadogSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace string) {
secretMap := map[string]string{
datadogAPIKey: datadogFakeAPIKey,
datadogAppKey: datadogFakeAPPKey,
}
createMultiKeyK8sSecret(t, client, cfg, namespace, datadogSecretName, secretMap)
}

func createMultiKeyK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.TestConfig, namespace, secretName string, secretMap map[string]string) {
retry.RunWith(&retry.Counter{Wait: 2 * time.Second, Count: 15}, t, func(r *retry.R) {
_, err := client.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{})
if errors.IsNotFound(err) {
_, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
StringData: secretMap,
Type: corev1.SecretTypeOpaque,
}, metav1.CreateOptions{})
require.NoError(r, err)
} else {
require.NoError(r, err)
}
})

helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
})
}
9 changes: 7 additions & 2 deletions acceptance/framework/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ type TestFlags struct {

flagEnableOpenshift bool

flagSkipDatadogTests bool

flagEnablePodSecurityPolicies bool

flagEnableCNI bool
Expand Down Expand Up @@ -155,6 +157,9 @@ func (t *TestFlags) init() {
flag.BoolVar(&t.flagDisablePeering, "disable-peering", false,
"If true, the peering tests will not run.")

flag.BoolVar(&t.flagSkipDatadogTests, "skip-datadog", false,
"If true, datadog acceptance tests will not run.")

if t.flagEnterpriseLicense == "" {
t.flagEnterpriseLicense = os.Getenv("CONSUL_ENT_LICENSE")
}
Expand Down Expand Up @@ -198,11 +203,9 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {
// if the Version is empty consulVersion will be nil
consulVersion, _ := version.NewVersion(t.flagConsulVersion)
consulDataplaneVersion, _ := version.NewVersion(t.flagConsulDataplaneVersion)
//vaultserverVersion, _ := version.NewVersion(t.flagVaultServerVersion)
kubeEnvs := config.NewKubeTestConfigList(t.flagKubeconfigs, t.flagKubecontexts, t.flagKubeNamespaces)

c := &config.TestConfig{

EnableEnterprise: t.flagEnableEnterprise,
EnterpriseLicense: t.flagEnterpriseLicense,

Expand All @@ -211,6 +214,8 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {

EnableOpenshift: t.flagEnableOpenshift,

SkipDataDogTests: t.flagSkipDatadogTests,

EnablePodSecurityPolicies: t.flagEnablePodSecurityPolicies,

EnableCNI: t.flagEnableCNI,
Expand Down
Loading

0 comments on commit d5c39d9

Please sign in to comment.