From bf7e8be995b1f316f6f660842cda57abce8ceddd Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Thu, 10 Feb 2022 09:41:55 -0800
Subject: [PATCH 1/9] fix(admission) honor updated certificate files
If certificate files change after start, use the latest keypair, not the
keypair originally present at startup.
---
CHANGELOG.md | 3 +
go.mod | 2 +-
go.sum | 11 +-
internal/admission/server.go | 52 ++++----
internal/manager/setup.go | 4 +-
test/e2e/all_in_one_test.go | 9 +-
test/e2e/features_test.go | 238 +++++++++++++++++++++++++++++++++++
7 files changed, 278 insertions(+), 41 deletions(-)
create mode 100644 test/e2e/features_test.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d40ef9461c..39f524dcb3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -53,6 +53,9 @@
recreate configuration upon gaining leadership while populating their
Kubernetes object cache.
[#2255](https://github.com/Kong/kubernetes-ingress-controller/pull/2255)
+- Admission webhook certificate files now track updates to the file, and will
+ update when the corresponding Secret has changed.
+ [#2258](https://github.com/Kong/kubernetes-ingress-controller/pull/2258)
## [2.2.0]
diff --git a/go.mod b/go.mod
index b7e8b5c111..fdfa85d192 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/google/uuid v1.3.0
github.com/kong/deck v1.10.0
github.com/kong/go-kong v0.28.0
- github.com/kong/kubernetes-testing-framework v0.12.1
+ github.com/kong/kubernetes-testing-framework v0.13.0
github.com/lithammer/dedent v1.1.0
github.com/miekg/dns v1.1.46
github.com/mitchellh/mapstructure v1.4.3
diff --git a/go.sum b/go.sum
index 791513b88b..eb57d79cc7 100644
--- a/go.sum
+++ b/go.sum
@@ -42,7 +42,6 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.2.0 h1:EKki8sSdvDU0OO9mAXGwPXOTOgPz2l08R0/IutDH11I=
cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=
-cloud.google.com/go/container v1.0.0/go.mod h1:EQLhTDFhzVXTX6TmjfNqa3/ikbCzjGlZcGY67exUnlY=
cloud.google.com/go/container v1.1.0 h1:iSWANLBNSwLjJum69+FlA96vFBTSASCiS7+ZGeJu8K0=
cloud.google.com/go/container v1.1.0/go.mod h1:+BsZKG08AmEAL+cJhSCZM5l3v6/HfktjrN2CXrxz6po=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
@@ -898,7 +897,7 @@ github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxy
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jetstack/cert-manager v1.7.0/go.mod h1:xj0TPp31HE0Jub5mNOnF3Fp3XvhIsiP+tsPZVOmU/Qs=
+github.com/jetstack/cert-manager v1.7.1/go.mod h1:xj0TPp31HE0Jub5mNOnF3Fp3XvhIsiP+tsPZVOmU/Qs=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -939,11 +938,10 @@ github.com/kong/deck v1.10.0 h1:lSGbl+B/mE75TYtI92qIsg0R9fKFC1moXL52bKmocjk=
github.com/kong/deck v1.10.0/go.mod h1:bk8MZu2kQvNzYJeQ6uF+BaUXO5ECDP1S4nFE7pP5yt0=
github.com/kong/go-kong v0.25.1/go.mod h1:8Dl/eA8SVH3aJNOkS91J8CPf5oUHZ2+XSYvacrF5PBc=
github.com/kong/go-kong v0.25.1/go.mod h1:8Dl/eA8SVH3aJNOkS91J8CPf5oUHZ2+XSYvacrF5PBc=
-github.com/kong/go-kong v0.26.0/go.mod h1:W1rTawoOanrC1j4KO3p1X1Xuwm3mQXrKby5boBUQTso=
github.com/kong/go-kong v0.28.0 h1:xmcuzi2N16VX1tAYP1rn4zQp4AQUmVPZvYP663xbAjc=
github.com/kong/go-kong v0.28.0/go.mod h1:bbC56mBqvnYZmzAWujmUqONXhloPWLs8r4A07uT8EzE=
-github.com/kong/kubernetes-testing-framework v0.12.1 h1:j8HTQTP4nkKGkt2+YtpsAPinrOmT0uiNP8mASSzhSl0=
-github.com/kong/kubernetes-testing-framework v0.12.1/go.mod h1:fupa1x3EVLT+CwQNmbBaMPaEPrG7N3JlM1/4kgcxt+s=
+github.com/kong/kubernetes-testing-framework v0.13.0 h1:sJgE1u6Ixdz1RI8pkt+D7oHepH5OqSWQzVTglJxYT2I=
+github.com/kong/kubernetes-testing-framework v0.13.0/go.mod h1:72ywlPzfpsfIYOGFIzKIjFl55mB8VYwa7lgFHmesEPM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1337,7 +1335,6 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w=
github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@@ -1632,7 +1629,6 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -2037,7 +2033,6 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
diff --git a/internal/admission/server.go b/internal/admission/server.go
index 38b3671242..afd8134e6d 100644
--- a/internal/admission/server.go
+++ b/internal/admission/server.go
@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"net/http"
- "os"
"github.com/sirupsen/logrus"
admission "k8s.io/api/admission/v1"
@@ -15,6 +14,7 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
+ "sigs.k8s.io/controller-runtime/pkg/certwatcher"
gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
configuration "github.com/kong/kubernetes-ingress-controller/v2/pkg/apis/configuration/v1"
@@ -40,20 +40,8 @@ type ServerConfig struct {
Key string
}
-func readKeyPairFiles(certPath, keyPath string) ([]byte, []byte, error) {
- cert, err := os.ReadFile(certPath)
- if err != nil {
- return nil, nil, fmt.Errorf("read cert from file %q: %w", certPath, err)
- }
- key, err := os.ReadFile(keyPath)
- if err != nil {
- return nil, nil, fmt.Errorf("read key from file %q: %w", keyPath, err)
- }
-
- return cert, key, nil
-}
-
-func (sc *ServerConfig) toTLSConfig() (*tls.Config, error) {
+func (sc *ServerConfig) toTLSConfig(ctx context.Context, log logrus.FieldLogger) (*tls.Config, error) {
+ var watcher *certwatcher.CertWatcher
var cert, key []byte
switch {
case sc.CertPath == "" && sc.KeyPath == "" && sc.Cert != "" && sc.Key != "":
@@ -61,34 +49,46 @@ func (sc *ServerConfig) toTLSConfig() (*tls.Config, error) {
case sc.CertPath != "" && sc.KeyPath != "" && sc.Cert == "" && sc.Key == "":
var err error
- cert, key, err = readKeyPairFiles(sc.CertPath, sc.KeyPath)
+ watcher, err = certwatcher.New(sc.CertPath, sc.KeyPath)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to create CertWatcher: %w", err)
}
case sc.CertPath == "" && sc.KeyPath == "" && sc.Cert == "" && sc.Key == "":
var err error
- cert, key, err = readKeyPairFiles(DefaultAdmissionWebhookCertPath, DefaultAdmissionWebhookKeyPath)
+ watcher, err = certwatcher.New(DefaultAdmissionWebhookCertPath, DefaultAdmissionWebhookKeyPath)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to create CertWatcher: %w", err)
}
default:
return nil, fmt.Errorf("either cert/key files OR cert/key values must be provided, or none")
}
- keyPair, err := tls.X509KeyPair(cert, key)
- if err != nil {
- return nil, fmt.Errorf("X509KeyPair error: %w", err)
+ if watcher == nil {
+ keyPair, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, fmt.Errorf("X509KeyPair error: %w", err)
+ }
+ return &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Certificates: []tls.Certificate{keyPair},
+ }, nil
}
+ go func() {
+ if err := watcher.Start(ctx); err != nil {
+ log.WithError(err).Error("certificate watcher error")
+ }
+ }()
return &tls.Config{
- MinVersion: tls.VersionTLS12,
- Certificates: []tls.Certificate{keyPair},
+ MinVersion: tls.VersionTLS12,
+ GetCertificate: watcher.GetCertificate,
}, nil
}
-func MakeTLSServer(config *ServerConfig, handler http.Handler) (*http.Server, error) {
- tlsConfig, err := config.toTLSConfig()
+func MakeTLSServer(ctx context.Context, config *ServerConfig, handler http.Handler,
+ log logrus.FieldLogger) (*http.Server, error) {
+ tlsConfig, err := config.toTLSConfig(ctx, log)
if err != nil {
return nil, err
}
diff --git a/internal/manager/setup.go b/internal/manager/setup.go
index 7f9cf60dc8..d7bfe34230 100644
--- a/internal/manager/setup.go
+++ b/internal/manager/setup.go
@@ -193,7 +193,7 @@ func setupAdmissionServer(ctx context.Context, managerConfig *Config, managerCli
if err != nil {
return err
}
- srv, err := admission.MakeTLSServer(&managerConfig.AdmissionServer, &admission.RequestHandler{
+ srv, err := admission.MakeTLSServer(ctx, &managerConfig.AdmissionServer, &admission.RequestHandler{
Validator: admission.NewKongHTTPValidator(
kongclient.Consumers,
kongclient.Plugins,
@@ -202,7 +202,7 @@ func setupAdmissionServer(ctx context.Context, managerConfig *Config, managerCli
managerConfig.IngressClassName,
),
Logger: logger,
- })
+ }, log)
if err != nil {
return err
}
diff --git a/test/e2e/all_in_one_test.go b/test/e2e/all_in_one_test.go
index 635f585dae..ce53a75a0f 100644
--- a/test/e2e/all_in_one_test.go
+++ b/test/e2e/all_in_one_test.go
@@ -442,7 +442,7 @@ func TestDeployAllInOnePostgresWithMultipleReplicas(t *testing.T) {
t.Log("confirming the second replica is not the leader and is not pushing configuration")
forwardCtx, cancel := context.WithCancel(context.Background())
defer cancel()
- startPortForwarder(forwardCtx, t, env, secondary, "9777", "cmetrics")
+ startPortForwarder(forwardCtx, t, env, secondary.Namespace, secondary.Name, "9777", "cmetrics")
require.Never(t, func() bool {
req, err := http.NewRequest("GET", "http://localhost:9777/metrics", nil)
require.NoError(t, err)
@@ -881,7 +881,7 @@ func verifyPostgres(ctx context.Context, t *testing.T, env environments.Environm
// startPortForwarder runs "kubectl port-forward" in the background. It stops the forward when the provided context
// ends
-func startPortForwarder(ctx context.Context, t *testing.T, env environments.Environment, pod corev1.Pod, localPort,
+func startPortForwarder(ctx context.Context, t *testing.T, env environments.Environment, namespace, name, localPort,
targetPort string) {
kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
require.NoError(t, err)
@@ -892,8 +892,9 @@ func startPortForwarder(ctx context.Context, t *testing.T, env environments.Envi
written, err := kubeconfigFile.Write(kubeconfig)
require.NoError(t, err)
require.Equal(t, len(kubeconfig), written)
- cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFile.Name(), "port-forward", "-n", pod.Namespace, pod.Name, "9777:cmetrics") //nolint:gosec
- t.Logf("forwarding port %s to %s/%s:%s", localPort, pod.Namespace, pod.Name, targetPort)
+ cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFile.Name(), "port-forward", "-n", namespace,
+ name, fmt.Sprintf("%s:%s", localPort, targetPort)) //nolint:gosec
+ t.Logf("forwarding port %s to %s/%s:%s", localPort, namespace, name, targetPort)
if startErr := cmd.Start(); startErr != nil {
startOutput, outputErr := cmd.Output()
assert.NoError(t, outputErr)
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
new file mode 100644
index 0000000000..ba1f619033
--- /dev/null
+++ b/test/e2e/features_test.go
@@ -0,0 +1,238 @@
+//go:build e2e_tests
+// +build e2e_tests
+
+package e2e
+
+import (
+ "context"
+ "crypto/tls"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/blang/semver/v4"
+ "github.com/kong/kubernetes-testing-framework/pkg/clusters"
+ "github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/loadimage"
+ "github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/metallb"
+ "github.com/kong/kubernetes-testing-framework/pkg/clusters/types/kind"
+ "github.com/kong/kubernetes-testing-framework/pkg/environments"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// TLSPair is a PEM certificate+key pair
+type TLSPair struct {
+ Key, Cert string
+}
+
+var (
+ tlsPairs = []TLSPair{
+ {
+ Cert: `-----BEGIN CERTIFICATE-----
+MIICTDCCAdKgAwIBAgIUOe9HN8v1eedsZXur5uXAwJkOSG4wCgYIKoZIzj0EAwIw
+XTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGElu
+dGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEWMBQGA1UEAwwNZmlyc3QuZXhhbXBsZTAe
+Fw0yMjAyMTAxOTIzNDhaFw0zMjAyMDgxOTIzNDhaMF0xCzAJBgNVBAYTAkFVMRMw
+EQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0
+eSBMdGQxFjAUBgNVBAMMDWZpcnN0LmV4YW1wbGUwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAR2pbLcSQhX4gD6IyPJiRN7lxZ8aPbi6qyPyjvoTJc6DPjMuJuJgkdSC8wy
+e1XFsI295WGl5gbqJsXQyJOqU6pHg6mjTEeyRxN9HbfEpH+Zp7GZ2KuTTGzi3wnh
+CPqzic6jUzBRMB0GA1UdDgQWBBTPOtLEjQvk5/iy4/dhxIWWEoSJbTAfBgNVHSME
+GDAWgBTPOtLEjQvk5/iy4/dhxIWWEoSJbTAPBgNVHRMBAf8EBTADAQH/MAoGCCqG
+SM49BAMCA2gAMGUCMQC7rKXFcTAfoTSw5m2/ALseXru/xZC5t3Y7yQ+zSaneFMvQ
+KvXcO0/RGYeqLmS58C4CMGoJva3Ad5LaZ7qgMkahhLdopePb0U/GAQqIsWhHfjOT
+Il2dwxMvntBECtd0uXeKHQ==
+-----END CERTIFICATE-----`,
+ Key: `-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDAA9OHUgH4O/xF0/qyQ
+t3ZSX0/6IDilnyM1ayoUSUOfNcELUd2UZVAuZgP10f6cMUWhZANiAAR2pbLcSQhX
+4gD6IyPJiRN7lxZ8aPbi6qyPyjvoTJc6DPjMuJuJgkdSC8wye1XFsI295WGl5gbq
+JsXQyJOqU6pHg6mjTEeyRxN9HbfEpH+Zp7GZ2KuTTGzi3wnhCPqzic4=
+-----END PRIVATE KEY-----`,
+ },
+ {
+ Cert: `-----BEGIN CERTIFICATE-----
+MIICTzCCAdSgAwIBAgIUOOTCdVckt76c9OSeGHyf+OrLU+YwCgYIKoZIzj0EAwIw
+XjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGElu
+dGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEXMBUGA1UEAwwOc2Vjb25kLmV4YW1wbGUw
+HhcNMjIwMjEwMTkyNTMwWhcNMzIwMjA4MTkyNTMwWjBeMQswCQYDVQQGEwJBVTET
+MBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ
+dHkgTHRkMRcwFQYDVQQDDA5zZWNvbmQuZXhhbXBsZTB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABHCTYbqp3P2v5aDuhkO+1rVNAidb0UcnCdtyoZx0+Oqz35Auq/GNaLvZ
+RYsyW6SHVGaRWhPh3jQ8zFnc28TCGwmAMnzYPs5RHYbvBm2BSP9YWPXhc6h+lkma
+HNNCu1tu56NTMFEwHQYDVR0OBBYEFEG94gMq4SvGtTs48Nw5BzVnPK69MB8GA1Ud
+IwQYMBaAFEG94gMq4SvGtTs48Nw5BzVnPK69MA8GA1UdEwEB/wQFMAMBAf8wCgYI
+KoZIzj0EAwIDaQAwZgIxAPRJkWfSdIQMr2R77RgCicR+adD/mMxZra2SoL7qSMyq
+3iXLIXauNP9ar3tt1uZE8wIxAM4C6G4uoQ0dydhcgQVhlgB6GaqO18AEDYPzQjir
+dV2Bs8EBkYBx87PmZ+e/S7g9Ug==
+-----END CERTIFICATE-----`,
+ Key: `-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDBVtvjDBFke/k2Skezl
+h63g1q5IHCQM7wr1T43m5ACKZQt0ZDE1jfm1BYKk1omNpeChZANiAARwk2G6qdz9
+r+Wg7oZDvta1TQInW9FHJwnbcqGcdPjqs9+QLqvxjWi72UWLMlukh1RmkVoT4d40
+PMxZ3NvEwhsJgDJ82D7OUR2G7wZtgUj/WFj14XOofpZJmhzTQrtbbuc=
+-----END PRIVATE KEY-----`,
+ },
+ }
+)
+
+// webhookKINDConfig is a KIND configuration used for TestWebhookUpdate. KIND, when running in GitHub Actions, is
+// a bit wonky with handling Secret updates, and they do not propagate to container filesystems in a reasonable
+// amount of time (>10m) when running this in the complete test suite, even though the actual sync frequency/update
+// propagation should be 1m by default. These changes force Secret updates to go directly to the API server and
+// update containers much more often. The latter causes significant performance degradation elsewhere, and Pods take
+// much longer to start, but once they do Secret updates show up more quickly, enough for the test to complete in time.
+const webhookKINDConfig = `kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: KubeletConfiguration
+ configMapAndSecretChangeDetectionStrategy: Get
+ syncFrequency: 3s
+`
+
+// TestWebhookUpdate checks that the webhook updates the certificate indicated by --admission-webhook-cert-file when
+// the mounted Secret updates. This requires E2E because we can't mount Secrets with the locally-run integration
+// test controller instance.
+func TestWebhookUpdate(t *testing.T) {
+ t.Log("configuring all-in-one-dbless.yaml manifest test")
+ t.Parallel()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ t.Log("building test cluster and environment")
+ configFile, err := os.CreateTemp(os.TempDir(), "webhook-kind-config-")
+ require.NoError(t, err)
+ defer os.Remove(configFile.Name())
+ defer configFile.Close()
+ written, err := configFile.Write([]byte(webhookKINDConfig))
+ require.NoError(t, err)
+ require.Equal(t, len(webhookKINDConfig), written)
+
+ clusterBuilder := kind.NewBuilder()
+ clusterBuilder.WithConfig(configFile.Name())
+ if clusterVersionStr != "" {
+ clusterVersion, err := semver.ParseTolerant(clusterVersionStr)
+ require.NoError(t, err)
+ clusterBuilder.WithClusterVersion(clusterVersion)
+ }
+ cluster, err := clusterBuilder.Build(ctx)
+ require.NoError(t, err)
+ addons := []clusters.Addon{}
+ addons = append(addons, metallb.New())
+ if b, err := loadimage.NewBuilder().WithImage(imageLoad); err == nil {
+ addons = append(addons, b.Build())
+ }
+ builder := environments.NewBuilder().WithExistingCluster(cluster).WithAddons(addons...)
+ env, err := builder.Build(ctx)
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, env.Cleanup(ctx))
+ }()
+
+ t.Log("deploying kong components")
+ manifest, err := getTestManifest(t, dblessPath)
+ require.NoError(t, err)
+ deployment := deployKong(ctx, t, env, manifest)
+
+ firstCertificate := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "admission-cert",
+ },
+ Type: corev1.SecretTypeTLS,
+ Data: map[string][]byte{
+ "tls.crt": []byte(tlsPairs[0].Cert),
+ "tls.key": []byte(tlsPairs[0].Key),
+ },
+ }
+
+ secondCertificate := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "admission-cert",
+ },
+ Type: corev1.SecretTypeTLS,
+ Data: map[string][]byte{
+ "tls.crt": []byte(tlsPairs[1].Cert),
+ "tls.key": []byte(tlsPairs[1].Key),
+ },
+ }
+
+ _, err = env.Cluster().Client().CoreV1().Secrets("kong").Create(ctx, firstCertificate, metav1.CreateOptions{})
+ assert.NoError(t, err)
+
+ t.Log("exposing admission service to the test environment")
+ admission, err := env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
+ metav1.GetOptions{})
+ assert.NoError(t, err)
+ admission.Spec.Type = corev1.ServiceTypeLoadBalancer
+ _, err = env.Cluster().Client().CoreV1().Services("kong").Update(ctx, admission, metav1.UpdateOptions{})
+ assert.NoError(t, err)
+ var admissionAddress string
+ require.Eventually(t, func() bool {
+ admission, err = env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
+ metav1.GetOptions{})
+ if err != nil {
+ return false
+ }
+ if len(admission.Status.LoadBalancer.Ingress) > 0 {
+ admissionAddress = admission.Status.LoadBalancer.Ingress[0].IP
+ return true
+ }
+ return false
+ }, time.Minute, time.Second)
+
+ t.Log("updating kong deployment to use admission certificate")
+ for i, container := range deployment.Spec.Template.Spec.Containers {
+ if container.Name == "ingress-controller" {
+ deployment.Spec.Template.Spec.Containers[i].Env = append(deployment.Spec.Template.Spec.Containers[i].Env,
+ corev1.EnvVar{Name: "CONTROLLER_ADMISSION_WEBHOOK_CERT_FILE", Value: "/admission-webhook/tls.crt"},
+ corev1.EnvVar{Name: "CONTROLLER_ADMISSION_WEBHOOK_KEY_FILE", Value: "/admission-webhook/tls.key"},
+ corev1.EnvVar{Name: "CONTROLLER_ADMISSION_WEBHOOK_LISTEN", Value: ":8080"})
+
+ deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes,
+ corev1.Volume{
+ Name: "admission-cert",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "admission-cert"},
+ },
+ })
+
+ deployment.Spec.Template.Spec.Containers[i].VolumeMounts = append(
+ deployment.Spec.Template.Spec.Containers[i].VolumeMounts,
+ corev1.VolumeMount{Name: "admission-cert", MountPath: "/admission-webhook"})
+ }
+ }
+
+ deployment, err = env.Cluster().Client().AppsV1().Deployments(deployment.Namespace).Update(ctx,
+ deployment, metav1.UpdateOptions{})
+ require.NoError(t, err)
+
+ t.Log("checking initial certificate")
+ require.Eventually(t, func() bool {
+ conn, err := tls.Dial("tcp", admissionAddress+":443",
+ &tls.Config{MinVersion: tls.VersionTLS13, InsecureSkipVerify: true}) // nolint:gosec
+ if err != nil {
+ return false
+ }
+ return conn.ConnectionState().PeerCertificates[0].Subject.CommonName == "first.example"
+ }, time.Minute*2, time.Second)
+
+ t.Log("changing certificate")
+ _, err = env.Cluster().Client().CoreV1().Secrets("kong").Update(ctx, secondCertificate, metav1.UpdateOptions{})
+ assert.NoError(t, err)
+
+ t.Log("checking second certificate")
+ require.Eventually(t, func() bool {
+ conn, err := tls.Dial("tcp", admissionAddress+":443",
+ &tls.Config{MinVersion: tls.VersionTLS13, InsecureSkipVerify: true}) // nolint:gosec
+ if err != nil {
+ return false
+ }
+ return conn.ConnectionState().PeerCertificates[0].Subject.CommonName == "second.example"
+ }, time.Minute*10, time.Second)
+}
From efd5cc8c8a5923e9084e84d2f788b084220dc6e0 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Fri, 11 Feb 2022 15:02:18 -0800
Subject: [PATCH 2/9] chore(tests) reorganize E2E
Move E2E helper and utility functions to dedicated files.
Move several all-in-one tests that weren't in the correct category to
feature tests.
---
test/e2e/all_in_one_test.go | 726 ------------------------------------
test/e2e/features_test.go | 96 +++++
test/e2e/helpers_test.go | 387 +++++++++++++++++++
test/e2e/utils_test.go | 280 ++++++++++++++
4 files changed, 763 insertions(+), 726 deletions(-)
create mode 100644 test/e2e/helpers_test.go
diff --git a/test/e2e/all_in_one_test.go b/test/e2e/all_in_one_test.go
index ce53a75a0f..cfe05db675 100644
--- a/test/e2e/all_in_one_test.go
+++ b/test/e2e/all_in_one_test.go
@@ -4,52 +4,28 @@
package e2e
import (
- "bytes"
"context"
- "encoding/json"
"fmt"
"io"
- "io/ioutil"
- "net"
"net/http"
- "net/url"
"os"
- "os/exec"
- "path/filepath"
- "sort"
"strings"
"testing"
"time"
"github.com/blang/semver/v4"
- "github.com/google/uuid"
"github.com/kong/kubernetes-testing-framework/pkg/clusters"
"github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/kong"
"github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/loadimage"
"github.com/kong/kubernetes-testing-framework/pkg/clusters/addons/metallb"
"github.com/kong/kubernetes-testing-framework/pkg/environments"
- "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators"
- "github.com/sethvargo/go-password/password"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
- kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- //"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/intstr"
- gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
- gatewayclient "sigs.k8s.io/gateway-api/pkg/client/clientset/gateway/versioned"
- "sigs.k8s.io/kustomize/api/krusty"
- "sigs.k8s.io/kustomize/kyaml/filesys"
-
- "github.com/kong/kubernetes-ingress-controller/v2/internal/annotations"
- "github.com/kong/kubernetes-ingress-controller/v2/internal/controllers/gateway"
"github.com/kong/kubernetes-ingress-controller/v2/internal/metrics"
- kongv1 "github.com/kong/kubernetes-ingress-controller/v2/pkg/apis/configuration/v1"
- "github.com/kong/kubernetes-ingress-controller/v2/pkg/clientset"
)
// -----------------------------------------------------------------------------
@@ -98,59 +74,6 @@ const (
dblessURL = "https://raw.githubusercontent.com/Kong/kubernetes-ingress-controller/%v.%v.x/deploy/single/all-in-one-dbless.yaml"
)
-func TestDeployAllInOneDBLESSGateway(t *testing.T) {
- t.Log("configuring all-in-one-dbless.yaml manifest test for Gateway")
- t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- t.Log("building test cluster and environment")
- addons := []clusters.Addon{}
- addons = append(addons, metallb.New())
- if b, err := loadimage.NewBuilder().WithImage(imageLoad); err == nil {
- addons = append(addons, b.Build())
- }
- builder := environments.NewBuilder().WithAddons(addons...)
- if clusterVersionStr != "" {
- clusterVersion, err := semver.ParseTolerant(clusterVersionStr)
- require.NoError(t, err)
- builder.WithKubernetesVersion(clusterVersion)
- }
- env, err := builder.Build(ctx)
- require.NoError(t, err)
-
- defer func() {
- t.Logf("cleaning up environment for cluster %s", env.Cluster().Name())
- assert.NoError(t, env.Cleanup(ctx))
- }()
-
- t.Logf("deploying Gateway APIs CRDs from %s", gatewayCRDsURL)
- require.NoError(t, clusters.KustomizeDeployForCluster(ctx, env.Cluster(), gatewayCRDsURL))
-
- t.Log("deploying kong components")
- manifest, err := getTestManifest(t, dblessPath)
- require.NoError(t, err)
- deployment := deployKong(ctx, t, env, manifest)
-
- t.Log("updating kong deployment to enable Gateway feature gate")
- for i, container := range deployment.Spec.Template.Spec.Containers {
- if container.Name == "ingress-controller" {
- deployment.Spec.Template.Spec.Containers[i].Env = append(deployment.Spec.Template.Spec.Containers[i].Env,
- corev1.EnvVar{Name: "CONTROLLER_FEATURE_GATES", Value: "Gateway=true"})
- }
- }
-
- _, err = env.Cluster().Client().AppsV1().Deployments(deployment.Namespace).Update(ctx,
- deployment, metav1.UpdateOptions{})
- require.NoError(t, err)
-
- t.Log("verifying controller updates associated Gateway resoures")
- gw := deployGateway(ctx, t, env)
- verifyGateway(ctx, t, env, gw)
- deployHTTPRoute(ctx, t, env, gw)
- verifyHTTPRoute(ctx, t, env)
-}
-
func TestDeployAllInOneDBLESS(t *testing.T) {
t.Log("configuring all-in-one-dbless.yaml manifest test")
t.Parallel()
@@ -235,41 +158,6 @@ func TestDeployAndUpgradeAllInOneDBLESS(t *testing.T) {
verifyIngress(ctx, t, env)
}
-// Unsatisfied LoadBalancers have special handling, see
-// https://github.com/Kong/kubernetes-ingress-controller/issues/2001
-func TestDeployAllInOneDBLESSNoLoadBalancer(t *testing.T) {
- t.Log("configuring all-in-one-dbless.yaml manifest test")
- t.Parallel()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- t.Log("building test cluster and environment")
- addons := []clusters.Addon{}
- if b, err := loadimage.NewBuilder().WithImage(imageLoad); err == nil {
- addons = append(addons, b.Build())
- }
- builder := environments.NewBuilder().WithAddons(addons...)
- if clusterVersionStr != "" {
- clusterVersion, err := semver.ParseTolerant(clusterVersionStr)
- require.NoError(t, err)
- builder.WithKubernetesVersion(clusterVersion)
- }
- env, err := builder.Build(ctx)
- require.NoError(t, err)
- defer func() {
- assert.NoError(t, env.Cleanup(ctx))
- }()
-
- t.Log("deploying kong components")
- manifest, err := getTestManifest(t, dblessPath)
- require.NoError(t, err)
- _ = deployKong(ctx, t, env, manifest)
-
- t.Log("running ingress tests to verify all-in-one deployed ingress controller and proxy are functional")
- deployIngress(ctx, t, env)
- verifyIngress(ctx, t, env)
-}
-
const entDBLESSPath = "../../deploy/single/all-in-one-dbless-k4k8s-enterprise.yaml"
func TestDeployAllInOneEnterpriseDBLESS(t *testing.T) {
@@ -525,617 +413,3 @@ func TestDeployAllInOneEnterprisePostgres(t *testing.T) {
verifyEnterprise(ctx, t, env, adminPassword)
verifyEnterpriseWithPostgres(ctx, t, env, adminPassword)
}
-
-// -----------------------------------------------------------------------------
-// Private Functions - Test Helpers
-// -----------------------------------------------------------------------------
-
-const (
- httpBinImage = "kennethreitz/httpbin"
- ingressClass = "kong"
- namespace = "kong"
- adminServiceName = "kong-admin"
-)
-
-func deployKong(ctx context.Context, t *testing.T, env environments.Environment, manifest io.Reader, additionalSecrets ...*corev1.Secret) *appsv1.Deployment {
- t.Log("creating a tempfile for kubeconfig")
- kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
- require.NoError(t, err)
- kubeconfigFile, err := os.CreateTemp(os.TempDir(), "manifest-tests-kubeconfig-")
- require.NoError(t, err)
- defer os.Remove(kubeconfigFile.Name())
- defer kubeconfigFile.Close()
-
- t.Log("dumping kubeconfig to tempfile")
- written, err := kubeconfigFile.Write(kubeconfig)
- require.NoError(t, err)
- require.Equal(t, len(kubeconfig), written)
- kubeconfigFilename := kubeconfigFile.Name()
-
- t.Log("waiting for testing environment to be ready")
- require.NoError(t, <-env.WaitForReady(ctx))
-
- t.Log("creating the kong namespace")
- ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kong"}}
- _, err = env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
- if !kerrors.IsAlreadyExists(err) {
- require.NoError(t, err)
- }
-
- t.Logf("deploying any supplemental secrets (found: %d)", len(additionalSecrets))
- for _, secret := range additionalSecrets {
- _, err := env.Cluster().Client().CoreV1().Secrets("kong").Create(ctx, secret, metav1.CreateOptions{})
- if !kerrors.IsAlreadyExists(err) {
- require.NoError(t, err)
- }
- }
-
- t.Log("deploying the manifest to the cluster")
- stdout, stderr := new(bytes.Buffer), new(bytes.Buffer)
- cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFilename, "apply", "-f", "-")
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- cmd.Stdin = manifest
- require.NoError(t, cmd.Run(), fmt.Sprintf("STDOUT=(%s), STDERR=(%s)", stdout.String(), stderr.String()))
-
- t.Log("waiting for kong to be ready")
- var deployment *appsv1.Deployment
- require.Eventually(t, func() bool {
- deployment, err = env.Cluster().Client().AppsV1().Deployments(namespace).Get(ctx, "ingress-kong", metav1.GetOptions{})
- require.NoError(t, err)
- return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas
- }, kongComponentWait, time.Second)
- return deployment
-}
-
-func deployIngress(ctx context.Context, t *testing.T, env environments.Environment) {
- c, err := clientset.NewForConfig(env.Cluster().Config())
- assert.NoError(t, err)
- t.Log("deploying an HTTP service to test the ingress controller and proxy")
- container := generators.NewContainer("httpbin", httpBinImage, 80)
- deployment := generators.NewDeploymentForContainer(container)
- deployment, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{})
- require.NoError(t, err)
-
- t.Logf("exposing deployment %s via service", deployment.Name)
- service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer)
- _, err = env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Create(ctx, service, metav1.CreateOptions{})
- require.NoError(t, err)
-
- getString := "GET"
- king := &kongv1.KongIngress{
- ObjectMeta: metav1.ObjectMeta{
- Name: "testki",
- Namespace: corev1.NamespaceDefault,
- Annotations: map[string]string{
- annotations.IngressClassKey: ingressClass,
- },
- },
- Route: &kongv1.KongIngressRoute{
- Methods: []*string{&getString},
- },
- }
- _, err = c.ConfigurationV1().KongIngresses(corev1.NamespaceDefault).Create(ctx, king, metav1.CreateOptions{})
- require.NoError(t, err)
- t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass)
- kubernetesVersion, err := env.Cluster().Version()
- require.NoError(t, err)
- ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{
- annotations.IngressClassKey: ingressClass,
- "konghq.com/strip-path": "true",
- "konghq.com/override": "testki",
- }, service)
- require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress))
-}
-
-func verifyIngress(ctx context.Context, t *testing.T, env environments.Environment) {
- t.Log("finding the kong proxy service ip")
- svc, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, "kong-proxy", metav1.GetOptions{})
- require.NoError(t, err)
- proxyIP := getKongProxyIP(ctx, t, env, svc)
-
- t.Logf("waiting for route from Ingress to be operational at http://%s/httpbin", proxyIP)
- httpc := http.Client{Timeout: time.Second * 10}
- require.Eventually(t, func() bool {
- resp, err := httpc.Get(fmt.Sprintf("http://%s/httpbin", proxyIP))
- if err != nil {
- return false
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusOK {
- b := new(bytes.Buffer)
- n, err := b.ReadFrom(resp.Body)
- require.NoError(t, err)
- require.True(t, n > 0)
- if !strings.Contains(b.String(), "
httpbin.org") {
- return false
- }
- }
- // verify the KongIngress method restriction
- fakeData := url.Values{}
- fakeData.Set("foo", "bar")
- resp, err = httpc.PostForm(fmt.Sprintf("http://%s/httpbin", proxyIP), fakeData)
- if err != nil {
- return false
- }
- defer resp.Body.Close()
- return resp.StatusCode == http.StatusNotFound
- }, ingressWait, time.Second)
-}
-
-func deployGateway(ctx context.Context, t *testing.T, env environments.Environment) *gatewayv1alpha2.Gateway {
- gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
- require.NoError(t, err)
-
- t.Log("deploying a supported gatewayclass to the test cluster")
- supportedGatewayClass := &gatewayv1alpha2.GatewayClass{
- ObjectMeta: metav1.ObjectMeta{
- Name: uuid.NewString(),
- },
- Spec: gatewayv1alpha2.GatewayClassSpec{
- ControllerName: gateway.ControllerName,
- },
- }
- supportedGatewayClass, err = gc.GatewayV1alpha2().GatewayClasses().Create(ctx, supportedGatewayClass, metav1.CreateOptions{})
- require.NoError(t, err)
-
- t.Log("deploying a gateway to the test cluster using unmanaged gateway mode")
- gw := &gatewayv1alpha2.Gateway{
- ObjectMeta: metav1.ObjectMeta{
- Name: "kong",
- Annotations: map[string]string{
- annotations.AnnotationPrefix + annotations.GatewayUnmanagedAnnotation: "true", // trigger the unmanaged gateway mode
- },
- },
- Spec: gatewayv1alpha2.GatewaySpec{
- GatewayClassName: gatewayv1alpha2.ObjectName(supportedGatewayClass.Name),
- Listeners: []gatewayv1alpha2.Listener{{
- Name: "http",
- Protocol: gatewayv1alpha2.HTTPProtocolType,
- Port: gatewayv1alpha2.PortNumber(80),
- }},
- },
- }
- gw, err = gc.GatewayV1alpha2().Gateways(corev1.NamespaceDefault).Create(ctx, gw, metav1.CreateOptions{})
- require.NoError(t, err)
- return gw
-}
-
-func verifyGateway(ctx context.Context, t *testing.T, env environments.Environment, gw *gatewayv1alpha2.Gateway) {
- gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
- require.NoError(t, err)
-
- t.Log("verifying that the gateway receives a final ready condition once reconciliation completes")
- require.Eventually(t, func() bool {
- gw, err = gc.GatewayV1alpha2().Gateways(corev1.NamespaceDefault).Get(ctx, gw.Name, metav1.GetOptions{})
- require.NoError(t, err)
- for _, cond := range gw.Status.Conditions {
- if cond.Reason == string(gatewayv1alpha2.GatewayReasonReady) {
- return true
- }
- }
- return false
- }, gatewayUpdateWaitTime, time.Second)
-}
-
-func deployHTTPRoute(ctx context.Context, t *testing.T, env environments.Environment, gw *gatewayv1alpha2.Gateway) {
- gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
- assert.NoError(t, err)
- t.Log("deploying an HTTP service to test the ingress controller and proxy")
- container := generators.NewContainer("httpbin", httpBinImage, 80)
- deployment := generators.NewDeploymentForContainer(container)
- deployment, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{})
- require.NoError(t, err)
-
- t.Logf("exposing deployment %s via service", deployment.Name)
- service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer)
- _, err = env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Create(ctx, service, metav1.CreateOptions{})
- require.NoError(t, err)
-
- t.Logf("creating an HTTPRoute for service %s with Gateway %s", service.Name, gw.Name)
- pathMatchPrefix := gatewayv1alpha2.PathMatchPathPrefix
- path := "/httpbin"
- httpPort := gatewayv1alpha2.PortNumber(80)
- httproute := &gatewayv1alpha2.HTTPRoute{
- ObjectMeta: metav1.ObjectMeta{
- Name: uuid.NewString(),
- },
- Spec: gatewayv1alpha2.HTTPRouteSpec{
- CommonRouteSpec: gatewayv1alpha2.CommonRouteSpec{
- ParentRefs: []gatewayv1alpha2.ParentRef{{
- Name: gatewayv1alpha2.ObjectName(gw.Name),
- }},
- },
- Rules: []gatewayv1alpha2.HTTPRouteRule{{
- Matches: []gatewayv1alpha2.HTTPRouteMatch{{
- Path: &gatewayv1alpha2.HTTPPathMatch{
- Type: &pathMatchPrefix,
- Value: &path,
- },
- }},
- BackendRefs: []gatewayv1alpha2.HTTPBackendRef{{
- BackendRef: gatewayv1alpha2.BackendRef{
- BackendObjectReference: gatewayv1alpha2.BackendObjectReference{
- Name: gatewayv1alpha2.ObjectName(service.Name),
- Port: &httpPort,
- },
- },
- }},
- }},
- },
- }
- _, err = gc.GatewayV1alpha2().HTTPRoutes(corev1.NamespaceDefault).Create(ctx, httproute, metav1.CreateOptions{})
- require.NoError(t, err)
-}
-
-// verifyHTTPRoute verifies an HTTPRoute exposes a route at /httpbin
-// TODO this is not actually specific to HTTPRoutes. It is verifyIngress with the KongIngress removed
-// Once we support HTTPMethod HTTPRouteMatch handling, we can combine the two into a single generic function
-func verifyHTTPRoute(ctx context.Context, t *testing.T, env environments.Environment) {
- t.Log("finding the kong proxy service ip")
- svc, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, "kong-proxy", metav1.GetOptions{})
- require.NoError(t, err)
- proxyIP := getKongProxyIP(ctx, t, env, svc)
-
- t.Logf("waiting for route from Ingress to be operational at http://%s/httpbin", proxyIP)
- httpc := http.Client{Timeout: time.Second * 10}
- require.Eventually(t, func() bool {
- resp, err := httpc.Get(fmt.Sprintf("http://%s/httpbin", proxyIP))
- if err != nil {
- return false
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusOK {
- b := new(bytes.Buffer)
- n, err := b.ReadFrom(resp.Body)
- require.NoError(t, err)
- require.True(t, n > 0)
- return strings.Contains(b.String(), "httpbin.org")
- }
- return false
- }, ingressWait, time.Second)
-}
-
-// verifyEnterprise performs some basic tests of the Kong Admin API in the provided
-// environment to ensure that the Admin API that responds is in fact the enterprise
-// version of Kong.
-func verifyEnterprise(ctx context.Context, t *testing.T, env environments.Environment, adminPassword string) {
- t.Log("finding the ip address for the admin API")
- service, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, adminServiceName, metav1.GetOptions{})
- require.NoError(t, err)
- require.Equal(t, 1, len(service.Status.LoadBalancer.Ingress))
- adminIP := service.Status.LoadBalancer.Ingress[0].IP
-
- t.Log("building a GET request to gather admin api information")
- req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://%s/", adminIP), nil)
- require.NoError(t, err)
- req.Header.Set("Kong-Admin-Token", adminPassword)
-
- t.Log("pulling the admin api information")
- adminOutput := struct {
- Version string `json:"version"`
- }{}
- httpc := http.Client{Timeout: time.Second * 10}
- require.Eventually(t, func() bool {
- // at the time of writing it was seen that the admin API had
- // brief timing windows where it could respond 200 OK but
- // the API version data would not be populated and the JSON
- // decode would fail. Thus this check actually waits until
- // the response body is fully decoded with a non-empty value
- // before considering this complete.
- resp, err := httpc.Do(req)
- if err != nil {
- return false
- }
- defer resp.Body.Close()
- body, err := io.ReadAll(resp.Body)
- if err != nil {
- return false
- }
- if resp.StatusCode != http.StatusOK {
- return false
- }
- if err := json.Unmarshal(body, &adminOutput); err != nil {
- return false
- }
- return adminOutput.Version != ""
- }, adminAPIWait, time.Second)
- require.True(t, strings.Contains(adminOutput.Version, "enterprise-edition"))
-}
-
-func verifyEnterpriseWithPostgres(ctx context.Context, t *testing.T, env environments.Environment, adminPassword string) {
- t.Log("finding the ip address for the admin API")
- service, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, adminServiceName, metav1.GetOptions{})
- require.NoError(t, err)
- require.Equal(t, 1, len(service.Status.LoadBalancer.Ingress))
- adminIP := service.Status.LoadBalancer.Ingress[0].IP
-
- t.Log("building a POST request to create a new kong workspace")
- form := url.Values{"name": {"kic-e2e-tests"}}
- req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/workspaces", adminIP), strings.NewReader(form.Encode()))
- require.NoError(t, err)
- req.Header.Set("Kong-Admin-Token", adminPassword)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- t.Log("creating a workspace to validate enterprise functionality")
- httpc := http.Client{Timeout: time.Second * 10}
- resp, err := httpc.Do(req)
- require.NoError(t, err)
- defer resp.Body.Close()
- body, err := io.ReadAll(resp.Body)
- require.NoError(t, err)
- require.Equal(t, http.StatusCreated, resp.StatusCode, fmt.Sprintf("STATUS=(%s), BODY=(%s)", resp.Status, string(body)))
-}
-
-func verifyPostgres(ctx context.Context, t *testing.T, env environments.Environment) {
- t.Log("verifying that postgres pod was deployed and is running")
- postgresPod, err := env.Cluster().Client().CoreV1().Pods(namespace).Get(ctx, "postgres-0", metav1.GetOptions{})
- require.NoError(t, err)
- require.Equal(t, corev1.PodRunning, postgresPod.Status.Phase)
-
- t.Log("verifying that all migrations ran properly")
- migrationJob, err := env.Cluster().Client().BatchV1().Jobs(namespace).Get(ctx, "kong-migrations", metav1.GetOptions{})
- require.NoError(t, err)
- require.GreaterOrEqual(t, migrationJob.Status.Succeeded, int32(1))
-}
-
-// startPortForwarder runs "kubectl port-forward" in the background. It stops the forward when the provided context
-// ends
-func startPortForwarder(ctx context.Context, t *testing.T, env environments.Environment, namespace, name, localPort,
- targetPort string) {
- kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
- require.NoError(t, err)
- kubeconfigFile, err := os.CreateTemp(os.TempDir(), "portforward-tests-kubeconfig-")
- require.NoError(t, err)
- defer os.Remove(kubeconfigFile.Name())
- defer kubeconfigFile.Close()
- written, err := kubeconfigFile.Write(kubeconfig)
- require.NoError(t, err)
- require.Equal(t, len(kubeconfig), written)
- cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFile.Name(), "port-forward", "-n", namespace,
- name, fmt.Sprintf("%s:%s", localPort, targetPort)) //nolint:gosec
- t.Logf("forwarding port %s to %s/%s:%s", localPort, namespace, name, targetPort)
- if startErr := cmd.Start(); startErr != nil {
- startOutput, outputErr := cmd.Output()
- assert.NoError(t, outputErr)
- require.NoError(t, startErr, string(startOutput))
- }
- require.Eventually(t, func() bool {
- conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%s", localPort))
- if err == nil {
- conn.Close()
- return true
- }
- return false
- }, kongComponentWait, time.Second)
-}
-
-// -----------------------------------------------------------------------------
-// Private Functions - Utilities
-// -----------------------------------------------------------------------------
-
-const (
- // adminPasswordSecretName is the name of the secret which will house the admin
- // API admin password.
- adminPasswordSecretName = "kong-enterprise-superuser-password"
-)
-
-func generateAdminPasswordSecret() (string, *corev1.Secret, error) {
- adminPassword, err := password.Generate(64, 10, 10, false, false)
- if err != nil {
- return "", nil, err
- }
-
- return adminPassword, &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: adminPasswordSecretName,
- },
- Type: corev1.SecretTypeOpaque,
- Data: map[string][]byte{
- "password": []byte(adminPassword),
- },
- }, nil
-}
-
-// exposeAdminAPI will override the KONG_ADMIN_LISTEN for the cluster's proxy to expose the
-// Admin API via a service. Some deployments only expose this on localhost by default as there's
-// no authentication, so note that this is only for testing environment purposes.
-func exposeAdminAPI(ctx context.Context, t *testing.T, env environments.Environment) *corev1.Service {
- t.Log("updating the proxy container KONG_ADMIN_LISTEN to expose the admin api")
- deployment, err := env.Cluster().Client().AppsV1().Deployments(namespace).Get(ctx, "ingress-kong", metav1.GetOptions{})
- require.NoError(t, err)
- for i, containerSpec := range deployment.Spec.Template.Spec.Containers {
- if containerSpec.Name == "proxy" {
- for j, envVar := range containerSpec.Env {
- if envVar.Name == "KONG_ADMIN_LISTEN" {
- deployment.Spec.Template.Spec.Containers[i].Env[j].Value = "0.0.0.0:8001, 0.0.0.0:8444 ssl"
- }
- }
- }
- }
- deployment, err = env.Cluster().Client().AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{})
- require.NoError(t, err)
-
- t.Log("creating a loadbalancer service for the admin API")
- svcPorts := []corev1.ServicePort{{
- Name: "proxy",
- Protocol: corev1.ProtocolTCP,
- TargetPort: intstr.IntOrString{IntVal: 8001},
- Port: 80,
- }}
- service := &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: adminServiceName,
- },
- Spec: corev1.ServiceSpec{
- Type: corev1.ServiceTypeLoadBalancer,
- Selector: deployment.Spec.Selector.MatchLabels,
- Ports: svcPorts,
- },
- }
- service, err = env.Cluster().Client().CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
- require.NoError(t, err)
-
- t.Log("waiting for loadbalancer ip to provision")
- require.Eventually(t, func() bool {
- service, err = env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, service.Name, metav1.GetOptions{})
- require.NoError(t, err)
- return len(service.Status.LoadBalancer.Ingress) == 1
- }, time.Minute, time.Second)
-
- return service
-}
-
-// getTestManifest checks if a controller image override is set. If not, it returns the original provided path.
-// If an override is set, it runs a kustomize patch that replaces the controller image with the override image and
-// returns the modified manifest path. If there is any issue patching the manifest, it will log the issue and return
-// the original provided path
-func getTestManifest(t *testing.T, baseManifestPath string) (io.Reader, error) {
- var imagetag string
- if imageLoad != "" {
- imagetag = imageLoad
- } else {
- imagetag = imageOverride
- }
- if imagetag == "" {
- return os.Open(baseManifestPath)
- }
- split := strings.Split(imagetag, ":")
- if len(split) != 2 {
- t.Logf("could not parse override image '%v', using default manifest %v", imagetag, baseManifestPath)
- return os.Open(baseManifestPath)
- }
- modified, err := patchControllerImage(baseManifestPath, split[0], split[1])
- if err != nil {
- t.Logf("failed patching override image '%v' (%v), using default manifest %v", imagetag, err, baseManifestPath)
- return os.Open(baseManifestPath)
- }
- t.Logf("using modified %v manifest", baseManifestPath)
- return modified, nil
-}
-
-const imageKustomizationContents = `resources:
-- base.yaml
-images:
-- name: kong/kubernetes-ingress-controller
- newName: %v
- newTag: '%v'
-`
-
-// patchControllerImage takes a manifest, image, and tag and runs kustomize to replace the
-// kong/kubernetes-ingress-controller image with the provided image. It returns the location of kustomize's output
-func patchControllerImage(baseManifestPath string, image string, tag string) (io.Reader, error) {
- workDir, err := os.MkdirTemp("", "kictest.")
- if err != nil {
- return nil, err
- }
- defer os.RemoveAll(workDir)
- orig, err := ioutil.ReadFile(baseManifestPath)
- if err != nil {
- return nil, err
- }
- err = ioutil.WriteFile(filepath.Join(workDir, "base.yaml"), orig, 0600)
- if err != nil {
- return nil, err
- }
- kustomization := []byte(fmt.Sprintf(imageKustomizationContents, image, tag))
- err = os.WriteFile(filepath.Join(workDir, "kustomization.yaml"), kustomization, 0600)
- if err != nil {
- return nil, err
- }
- kustomized, err := kustomizeManifest(workDir)
- if err != nil {
- return nil, err
- }
- return bytes.NewReader(kustomized), nil
-}
-
-// kustomizeManifest runs kustomize on a path and returns the YAML output
-func kustomizeManifest(path string) ([]byte, error) {
- k := krusty.MakeKustomizer(krusty.MakeDefaultOptions())
- m, err := k.Run(filesys.MakeFsOnDisk(), path)
- if err != nil {
- return []byte{}, err
- }
- return m.AsYaml()
-}
-
-func getCurrentGitTag(path string) (semver.Version, error) {
- cmd := exec.Command("git", "describe", "--tags")
- cmd.Dir = path
- tagBytes, _ := cmd.Output()
- tag, err := semver.ParseTolerant(string(tagBytes))
- if err != nil {
- return semver.Version{}, err
- }
- return tag, nil
-}
-
-func getPreviousGitTag(path string, cur semver.Version) (semver.Version, error) {
- var tags []semver.Version
- cmd := exec.Command("git", "tag")
- cmd.Dir = path
- tagsBytes, err := cmd.Output()
- if err != nil {
- return semver.Version{}, err
- }
- foo := strings.Split(string(tagsBytes), "\n")
- for _, tag := range foo {
- ver, err := semver.ParseTolerant(tag)
- if err == nil {
- tags = append(tags, ver)
- }
- }
- sort.Slice(tags, func(i, j int) bool { return tags[i].LT(tags[j]) })
- curIndex := sort.Search(len(tags), func(i int) bool { return tags[i].EQ(cur) })
- if curIndex == 0 {
- return tags[curIndex], nil
- }
- return tags[curIndex-1], nil
-}
-
-// getKongProxyIP takes a Service with Kong proxy ports and returns and its IP, or fails the test if it cannot
-func getKongProxyIP(ctx context.Context, t *testing.T, env environments.Environment, svc *corev1.Service) string {
- proxyIP := ""
- require.NotEqual(t, svc.Spec.Type, svc.Spec.ClusterIP)
- if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
- if len(svc.Status.LoadBalancer.Ingress) > 0 {
- proxyIP = svc.Status.LoadBalancer.Ingress[0].IP
- t.Logf("found loadbalancer IP for the Kong Proxy: %s", proxyIP)
- }
- }
- // the above failed to find an address. either the LB didn't provision or we're using a NodePort
- if proxyIP == "" {
- var port int32
- for _, sport := range svc.Spec.Ports {
- if sport.Name == "kong-proxy" || sport.Name == "proxy" {
- port = sport.NodePort
- }
- }
- var extAddrs []string
- var intAddrs []string
- nodes, err := env.Cluster().Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
- require.NoError(t, err)
- for _, node := range nodes.Items {
- for _, naddr := range node.Status.Addresses {
- if naddr.Type == corev1.NodeExternalIP {
- extAddrs = append(extAddrs, naddr.Address)
- }
- if naddr.Type == corev1.NodeInternalIP {
- extAddrs = append(intAddrs, naddr.Address)
- }
- }
- }
- // local clusters (KIND, minikube) typically provide no external addresses, but their internal addresses are
- // routeable from their host. We prefer external addresses if they're available, but fall back to internal
- // in their absence
- if len(extAddrs) > 0 {
- proxyIP = fmt.Sprintf("%v:%v", extAddrs[0], port)
- } else if len(intAddrs) > 0 {
- proxyIP = fmt.Sprintf("%v:%v", intAddrs[0], port)
- } else {
- assert.Fail(t, "both extAddrs and intAddrs are empty")
- }
- }
- return proxyIP
-}
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
index ba1f619033..e84fc43420 100644
--- a/test/e2e/features_test.go
+++ b/test/e2e/features_test.go
@@ -22,6 +22,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+// -----------------------------------------------------------------------------
+// E2E feature tests
+//
+// These tests test features that are not easily testable using integration
+// tests due to environment requirements (e.g. needing to mount volumes) or
+// conflicts with the integration configuration.
+// -----------------------------------------------------------------------------
+
// TLSPair is a PEM certificate+key pair
type TLSPair struct {
Key, Cert string
@@ -236,3 +244,91 @@ func TestWebhookUpdate(t *testing.T) {
return conn.ConnectionState().PeerCertificates[0].Subject.CommonName == "second.example"
}, time.Minute*10, time.Second)
}
+
+func TestDeployAllInOneDBLESSGateway(t *testing.T) {
+ t.Log("configuring all-in-one-dbless.yaml manifest test for Gateway")
+ t.Parallel()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ t.Log("building test cluster and environment")
+ addons := []clusters.Addon{}
+ addons = append(addons, metallb.New())
+ if b, err := loadimage.NewBuilder().WithImage(imageLoad); err == nil {
+ addons = append(addons, b.Build())
+ }
+ builder := environments.NewBuilder().WithAddons(addons...)
+ if clusterVersionStr != "" {
+ clusterVersion, err := semver.ParseTolerant(clusterVersionStr)
+ require.NoError(t, err)
+ builder.WithKubernetesVersion(clusterVersion)
+ }
+ env, err := builder.Build(ctx)
+ require.NoError(t, err)
+
+ defer func() {
+ t.Logf("cleaning up environment for cluster %s", env.Cluster().Name())
+ assert.NoError(t, env.Cleanup(ctx))
+ }()
+
+ t.Logf("deploying Gateway APIs CRDs from %s", gatewayCRDsURL)
+ require.NoError(t, clusters.KustomizeDeployForCluster(ctx, env.Cluster(), gatewayCRDsURL))
+
+ t.Log("deploying kong components")
+ manifest, err := getTestManifest(t, dblessPath)
+ require.NoError(t, err)
+ deployment := deployKong(ctx, t, env, manifest)
+
+ t.Log("updating kong deployment to enable Gateway feature gate")
+ for i, container := range deployment.Spec.Template.Spec.Containers {
+ if container.Name == "ingress-controller" {
+ deployment.Spec.Template.Spec.Containers[i].Env = append(deployment.Spec.Template.Spec.Containers[i].Env,
+ corev1.EnvVar{Name: "CONTROLLER_FEATURE_GATES", Value: "Gateway=true"})
+ }
+ }
+
+ _, err = env.Cluster().Client().AppsV1().Deployments(deployment.Namespace).Update(ctx,
+ deployment, metav1.UpdateOptions{})
+ require.NoError(t, err)
+
+ t.Log("verifying controller updates associated Gateway resoures")
+ gw := deployGateway(ctx, t, env)
+ verifyGateway(ctx, t, env, gw)
+ deployHTTPRoute(ctx, t, env, gw)
+ verifyHTTPRoute(ctx, t, env)
+}
+
+// Unsatisfied LoadBalancers have special handling, see
+// https://github.com/Kong/kubernetes-ingress-controller/issues/2001
+func TestDeployAllInOneDBLESSNoLoadBalancer(t *testing.T) {
+ t.Log("configuring all-in-one-dbless.yaml manifest test")
+ t.Parallel()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ t.Log("building test cluster and environment")
+ addons := []clusters.Addon{}
+ if b, err := loadimage.NewBuilder().WithImage(imageLoad); err == nil {
+ addons = append(addons, b.Build())
+ }
+ builder := environments.NewBuilder().WithAddons(addons...)
+ if clusterVersionStr != "" {
+ clusterVersion, err := semver.ParseTolerant(clusterVersionStr)
+ require.NoError(t, err)
+ builder.WithKubernetesVersion(clusterVersion)
+ }
+ env, err := builder.Build(ctx)
+ require.NoError(t, err)
+ defer func() {
+ assert.NoError(t, env.Cleanup(ctx))
+ }()
+
+ t.Log("deploying kong components")
+ manifest, err := getTestManifest(t, dblessPath)
+ require.NoError(t, err)
+ _ = deployKong(ctx, t, env, manifest)
+
+ t.Log("running ingress tests to verify all-in-one deployed ingress controller and proxy are functional")
+ deployIngress(ctx, t, env)
+ verifyIngress(ctx, t, env)
+}
diff --git a/test/e2e/helpers_test.go b/test/e2e/helpers_test.go
new file mode 100644
index 0000000000..9580edd55d
--- /dev/null
+++ b/test/e2e/helpers_test.go
@@ -0,0 +1,387 @@
+//go:build e2e_tests
+// +build e2e_tests
+
+package e2e
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/kong/kubernetes-testing-framework/pkg/clusters"
+ "github.com/kong/kubernetes-testing-framework/pkg/environments"
+ "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
+ gatewayclient "sigs.k8s.io/gateway-api/pkg/client/clientset/gateway/versioned"
+
+ "github.com/kong/kubernetes-ingress-controller/v2/internal/annotations"
+ "github.com/kong/kubernetes-ingress-controller/v2/internal/controllers/gateway"
+ kongv1 "github.com/kong/kubernetes-ingress-controller/v2/pkg/apis/configuration/v1"
+ "github.com/kong/kubernetes-ingress-controller/v2/pkg/clientset"
+)
+
+const (
+ httpBinImage = "kennethreitz/httpbin"
+ ingressClass = "kong"
+ namespace = "kong"
+ adminServiceName = "kong-admin"
+)
+
+func deployKong(ctx context.Context, t *testing.T, env environments.Environment, manifest io.Reader, additionalSecrets ...*corev1.Secret) *appsv1.Deployment {
+ t.Log("creating a tempfile for kubeconfig")
+ kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
+ require.NoError(t, err)
+ kubeconfigFile, err := os.CreateTemp(os.TempDir(), "manifest-tests-kubeconfig-")
+ require.NoError(t, err)
+ defer os.Remove(kubeconfigFile.Name())
+ defer kubeconfigFile.Close()
+
+ t.Log("dumping kubeconfig to tempfile")
+ written, err := kubeconfigFile.Write(kubeconfig)
+ require.NoError(t, err)
+ require.Equal(t, len(kubeconfig), written)
+ kubeconfigFilename := kubeconfigFile.Name()
+
+ t.Log("waiting for testing environment to be ready")
+ require.NoError(t, <-env.WaitForReady(ctx))
+
+ t.Log("creating the kong namespace")
+ ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kong"}}
+ _, err = env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
+ if !kerrors.IsAlreadyExists(err) {
+ require.NoError(t, err)
+ }
+
+ t.Logf("deploying any supplemental secrets (found: %d)", len(additionalSecrets))
+ for _, secret := range additionalSecrets {
+ _, err := env.Cluster().Client().CoreV1().Secrets("kong").Create(ctx, secret, metav1.CreateOptions{})
+ if !kerrors.IsAlreadyExists(err) {
+ require.NoError(t, err)
+ }
+ }
+
+ t.Log("deploying the manifest to the cluster")
+ stdout, stderr := new(bytes.Buffer), new(bytes.Buffer)
+ cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFilename, "apply", "-f", "-")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Stdin = manifest
+ require.NoError(t, cmd.Run(), fmt.Sprintf("STDOUT=(%s), STDERR=(%s)", stdout.String(), stderr.String()))
+
+ t.Log("waiting for kong to be ready")
+ var deployment *appsv1.Deployment
+ require.Eventually(t, func() bool {
+ deployment, err = env.Cluster().Client().AppsV1().Deployments(namespace).Get(ctx, "ingress-kong", metav1.GetOptions{})
+ require.NoError(t, err)
+ return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas
+ }, kongComponentWait, time.Second)
+ return deployment
+}
+
+func deployIngress(ctx context.Context, t *testing.T, env environments.Environment) {
+ c, err := clientset.NewForConfig(env.Cluster().Config())
+ assert.NoError(t, err)
+ t.Log("deploying an HTTP service to test the ingress controller and proxy")
+ container := generators.NewContainer("httpbin", httpBinImage, 80)
+ deployment := generators.NewDeploymentForContainer(container)
+ deployment, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ t.Logf("exposing deployment %s via service", deployment.Name)
+ service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer)
+ _, err = env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Create(ctx, service, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ getString := "GET"
+ king := &kongv1.KongIngress{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testki",
+ Namespace: corev1.NamespaceDefault,
+ Annotations: map[string]string{
+ annotations.IngressClassKey: ingressClass,
+ },
+ },
+ Route: &kongv1.KongIngressRoute{
+ Methods: []*string{&getString},
+ },
+ }
+ _, err = c.ConfigurationV1().KongIngresses(corev1.NamespaceDefault).Create(ctx, king, metav1.CreateOptions{})
+ require.NoError(t, err)
+ t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass)
+ kubernetesVersion, err := env.Cluster().Version()
+ require.NoError(t, err)
+ ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{
+ annotations.IngressClassKey: ingressClass,
+ "konghq.com/strip-path": "true",
+ "konghq.com/override": "testki",
+ }, service)
+ require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress))
+}
+
+func verifyIngress(ctx context.Context, t *testing.T, env environments.Environment) {
+ t.Log("finding the kong proxy service ip")
+ svc, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, "kong-proxy", metav1.GetOptions{})
+ require.NoError(t, err)
+ proxyIP := getKongProxyIP(ctx, t, env, svc)
+
+ t.Logf("waiting for route from Ingress to be operational at http://%s/httpbin", proxyIP)
+ httpc := http.Client{Timeout: time.Second * 10}
+ require.Eventually(t, func() bool {
+ resp, err := httpc.Get(fmt.Sprintf("http://%s/httpbin", proxyIP))
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusOK {
+ b := new(bytes.Buffer)
+ n, err := b.ReadFrom(resp.Body)
+ require.NoError(t, err)
+ require.True(t, n > 0)
+ if !strings.Contains(b.String(), "httpbin.org") {
+ return false
+ }
+ }
+ // verify the KongIngress method restriction
+ fakeData := url.Values{}
+ fakeData.Set("foo", "bar")
+ resp, err = httpc.PostForm(fmt.Sprintf("http://%s/httpbin", proxyIP), fakeData)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ return resp.StatusCode == http.StatusNotFound
+ }, ingressWait, time.Second)
+}
+
+func deployGateway(ctx context.Context, t *testing.T, env environments.Environment) *gatewayv1alpha2.Gateway {
+ gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
+ require.NoError(t, err)
+
+ t.Log("deploying a supported gatewayclass to the test cluster")
+ supportedGatewayClass := &gatewayv1alpha2.GatewayClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: uuid.NewString(),
+ },
+ Spec: gatewayv1alpha2.GatewayClassSpec{
+ ControllerName: gateway.ControllerName,
+ },
+ }
+ supportedGatewayClass, err = gc.GatewayV1alpha2().GatewayClasses().Create(ctx, supportedGatewayClass, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ t.Log("deploying a gateway to the test cluster using unmanaged gateway mode")
+ gw := &gatewayv1alpha2.Gateway{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "kong",
+ Annotations: map[string]string{
+ annotations.AnnotationPrefix + annotations.GatewayUnmanagedAnnotation: "true", // trigger the unmanaged gateway mode
+ },
+ },
+ Spec: gatewayv1alpha2.GatewaySpec{
+ GatewayClassName: gatewayv1alpha2.ObjectName(supportedGatewayClass.Name),
+ Listeners: []gatewayv1alpha2.Listener{{
+ Name: "http",
+ Protocol: gatewayv1alpha2.HTTPProtocolType,
+ Port: gatewayv1alpha2.PortNumber(80),
+ }},
+ },
+ }
+ gw, err = gc.GatewayV1alpha2().Gateways(corev1.NamespaceDefault).Create(ctx, gw, metav1.CreateOptions{})
+ require.NoError(t, err)
+ return gw
+}
+
+func verifyGateway(ctx context.Context, t *testing.T, env environments.Environment, gw *gatewayv1alpha2.Gateway) {
+ gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
+ require.NoError(t, err)
+
+ t.Log("verifying that the gateway receives a final ready condition once reconciliation completes")
+ require.Eventually(t, func() bool {
+ gw, err = gc.GatewayV1alpha2().Gateways(corev1.NamespaceDefault).Get(ctx, gw.Name, metav1.GetOptions{})
+ require.NoError(t, err)
+ for _, cond := range gw.Status.Conditions {
+ if cond.Reason == string(gatewayv1alpha2.GatewayReasonReady) {
+ return true
+ }
+ }
+ return false
+ }, gatewayUpdateWaitTime, time.Second)
+}
+
+func deployHTTPRoute(ctx context.Context, t *testing.T, env environments.Environment, gw *gatewayv1alpha2.Gateway) {
+ gc, err := gatewayclient.NewForConfig(env.Cluster().Config())
+ assert.NoError(t, err)
+ t.Log("deploying an HTTP service to test the ingress controller and proxy")
+ container := generators.NewContainer("httpbin", httpBinImage, 80)
+ deployment := generators.NewDeploymentForContainer(container)
+ deployment, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ t.Logf("exposing deployment %s via service", deployment.Name)
+ service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer)
+ _, err = env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Create(ctx, service, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ t.Logf("creating an HTTPRoute for service %s with Gateway %s", service.Name, gw.Name)
+ pathMatchPrefix := gatewayv1alpha2.PathMatchPathPrefix
+ path := "/httpbin"
+ httpPort := gatewayv1alpha2.PortNumber(80)
+ httproute := &gatewayv1alpha2.HTTPRoute{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: uuid.NewString(),
+ },
+ Spec: gatewayv1alpha2.HTTPRouteSpec{
+ CommonRouteSpec: gatewayv1alpha2.CommonRouteSpec{
+ ParentRefs: []gatewayv1alpha2.ParentRef{{
+ Name: gatewayv1alpha2.ObjectName(gw.Name),
+ }},
+ },
+ Rules: []gatewayv1alpha2.HTTPRouteRule{{
+ Matches: []gatewayv1alpha2.HTTPRouteMatch{{
+ Path: &gatewayv1alpha2.HTTPPathMatch{
+ Type: &pathMatchPrefix,
+ Value: &path,
+ },
+ }},
+ BackendRefs: []gatewayv1alpha2.HTTPBackendRef{{
+ BackendRef: gatewayv1alpha2.BackendRef{
+ BackendObjectReference: gatewayv1alpha2.BackendObjectReference{
+ Name: gatewayv1alpha2.ObjectName(service.Name),
+ Port: &httpPort,
+ },
+ },
+ }},
+ }},
+ },
+ }
+ _, err = gc.GatewayV1alpha2().HTTPRoutes(corev1.NamespaceDefault).Create(ctx, httproute, metav1.CreateOptions{})
+ require.NoError(t, err)
+}
+
+// verifyHTTPRoute verifies an HTTPRoute exposes a route at /httpbin
+// TODO this is not actually specific to HTTPRoutes. It is verifyIngress with the KongIngress removed
+// Once we support HTTPMethod HTTPRouteMatch handling, we can combine the two into a single generic function
+func verifyHTTPRoute(ctx context.Context, t *testing.T, env environments.Environment) {
+ t.Log("finding the kong proxy service ip")
+ svc, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, "kong-proxy", metav1.GetOptions{})
+ require.NoError(t, err)
+ proxyIP := getKongProxyIP(ctx, t, env, svc)
+
+ t.Logf("waiting for route from Ingress to be operational at http://%s/httpbin", proxyIP)
+ httpc := http.Client{Timeout: time.Second * 10}
+ require.Eventually(t, func() bool {
+ resp, err := httpc.Get(fmt.Sprintf("http://%s/httpbin", proxyIP))
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusOK {
+ b := new(bytes.Buffer)
+ n, err := b.ReadFrom(resp.Body)
+ require.NoError(t, err)
+ require.True(t, n > 0)
+ return strings.Contains(b.String(), "httpbin.org")
+ }
+ return false
+ }, ingressWait, time.Second)
+}
+
+// verifyEnterprise performs some basic tests of the Kong Admin API in the provided
+// environment to ensure that the Admin API that responds is in fact the enterprise
+// version of Kong.
+func verifyEnterprise(ctx context.Context, t *testing.T, env environments.Environment, adminPassword string) {
+ t.Log("finding the ip address for the admin API")
+ service, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, adminServiceName, metav1.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, 1, len(service.Status.LoadBalancer.Ingress))
+ adminIP := service.Status.LoadBalancer.Ingress[0].IP
+
+ t.Log("building a GET request to gather admin api information")
+ req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://%s/", adminIP), nil)
+ require.NoError(t, err)
+ req.Header.Set("Kong-Admin-Token", adminPassword)
+
+ t.Log("pulling the admin api information")
+ adminOutput := struct {
+ Version string `json:"version"`
+ }{}
+ httpc := http.Client{Timeout: time.Second * 10}
+ require.Eventually(t, func() bool {
+ // at the time of writing it was seen that the admin API had
+ // brief timing windows where it could respond 200 OK but
+ // the API version data would not be populated and the JSON
+ // decode would fail. Thus this check actually waits until
+ // the response body is fully decoded with a non-empty value
+ // before considering this complete.
+ resp, err := httpc.Do(req)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return false
+ }
+ if resp.StatusCode != http.StatusOK {
+ return false
+ }
+ if err := json.Unmarshal(body, &adminOutput); err != nil {
+ return false
+ }
+ return adminOutput.Version != ""
+ }, adminAPIWait, time.Second)
+ require.True(t, strings.Contains(adminOutput.Version, "enterprise-edition"))
+}
+
+func verifyEnterpriseWithPostgres(ctx context.Context, t *testing.T, env environments.Environment, adminPassword string) {
+ t.Log("finding the ip address for the admin API")
+ service, err := env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, adminServiceName, metav1.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, 1, len(service.Status.LoadBalancer.Ingress))
+ adminIP := service.Status.LoadBalancer.Ingress[0].IP
+
+ t.Log("building a POST request to create a new kong workspace")
+ form := url.Values{"name": {"kic-e2e-tests"}}
+ req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/workspaces", adminIP), strings.NewReader(form.Encode()))
+ require.NoError(t, err)
+ req.Header.Set("Kong-Admin-Token", adminPassword)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ t.Log("creating a workspace to validate enterprise functionality")
+ httpc := http.Client{Timeout: time.Second * 10}
+ resp, err := httpc.Do(req)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusCreated, resp.StatusCode, fmt.Sprintf("STATUS=(%s), BODY=(%s)", resp.Status, string(body)))
+}
+
+func verifyPostgres(ctx context.Context, t *testing.T, env environments.Environment) {
+ t.Log("verifying that postgres pod was deployed and is running")
+ postgresPod, err := env.Cluster().Client().CoreV1().Pods(namespace).Get(ctx, "postgres-0", metav1.GetOptions{})
+ require.NoError(t, err)
+ require.Equal(t, corev1.PodRunning, postgresPod.Status.Phase)
+
+ t.Log("verifying that all migrations ran properly")
+ migrationJob, err := env.Cluster().Client().BatchV1().Jobs(namespace).Get(ctx, "kong-migrations", metav1.GetOptions{})
+ require.NoError(t, err)
+ require.GreaterOrEqual(t, migrationJob.Status.Succeeded, int32(1))
+}
diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go
index 6d4ed3b3b4..0aa6437d6c 100644
--- a/test/e2e/utils_test.go
+++ b/test/e2e/utils_test.go
@@ -4,9 +4,32 @@
package e2e
import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
"net/http"
"os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+ "testing"
"time"
+
+ "github.com/blang/semver/v4"
+ "github.com/kong/kubernetes-testing-framework/pkg/environments"
+ "github.com/sethvargo/go-password/password"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "sigs.k8s.io/kustomize/api/krusty"
+ "sigs.k8s.io/kustomize/kyaml/filesys"
)
var (
@@ -20,3 +43,260 @@ var (
// timeout instead of the longer default provided by the http stdlib.
httpc = http.Client{Timeout: time.Second * 10}
)
+
+const (
+ // adminPasswordSecretName is the name of the secret which will house the admin
+ // API admin password.
+ adminPasswordSecretName = "kong-enterprise-superuser-password"
+)
+
+func generateAdminPasswordSecret() (string, *corev1.Secret, error) {
+ adminPassword, err := password.Generate(64, 10, 10, false, false)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return adminPassword, &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: adminPasswordSecretName,
+ },
+ Type: corev1.SecretTypeOpaque,
+ Data: map[string][]byte{
+ "password": []byte(adminPassword),
+ },
+ }, nil
+}
+
+// exposeAdminAPI will override the KONG_ADMIN_LISTEN for the cluster's proxy to expose the
+// Admin API via a service. Some deployments only expose this on localhost by default as there's
+// no authentication, so note that this is only for testing environment purposes.
+func exposeAdminAPI(ctx context.Context, t *testing.T, env environments.Environment) *corev1.Service {
+ t.Log("updating the proxy container KONG_ADMIN_LISTEN to expose the admin api")
+ deployment, err := env.Cluster().Client().AppsV1().Deployments(namespace).Get(ctx, "ingress-kong", metav1.GetOptions{})
+ require.NoError(t, err)
+ for i, containerSpec := range deployment.Spec.Template.Spec.Containers {
+ if containerSpec.Name == "proxy" {
+ for j, envVar := range containerSpec.Env {
+ if envVar.Name == "KONG_ADMIN_LISTEN" {
+ deployment.Spec.Template.Spec.Containers[i].Env[j].Value = "0.0.0.0:8001, 0.0.0.0:8444 ssl"
+ }
+ }
+ }
+ }
+ deployment, err = env.Cluster().Client().AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{})
+ require.NoError(t, err)
+
+ t.Log("creating a loadbalancer service for the admin API")
+ svcPorts := []corev1.ServicePort{{
+ Name: "proxy",
+ Protocol: corev1.ProtocolTCP,
+ TargetPort: intstr.IntOrString{IntVal: 8001},
+ Port: 80,
+ }}
+ service := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: adminServiceName,
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeLoadBalancer,
+ Selector: deployment.Spec.Selector.MatchLabels,
+ Ports: svcPorts,
+ },
+ }
+ service, err = env.Cluster().Client().CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
+ require.NoError(t, err)
+
+ t.Log("waiting for loadbalancer ip to provision")
+ require.Eventually(t, func() bool {
+ service, err = env.Cluster().Client().CoreV1().Services(namespace).Get(ctx, service.Name, metav1.GetOptions{})
+ require.NoError(t, err)
+ return len(service.Status.LoadBalancer.Ingress) == 1
+ }, time.Minute, time.Second)
+
+ return service
+}
+
+// getTestManifest checks if a controller image override is set. If not, it returns the original provided path.
+// If an override is set, it runs a kustomize patch that replaces the controller image with the override image and
+// returns the modified manifest path. If there is any issue patching the manifest, it will log the issue and return
+// the original provided path
+func getTestManifest(t *testing.T, baseManifestPath string) (io.Reader, error) {
+ var imagetag string
+ if imageLoad != "" {
+ imagetag = imageLoad
+ } else {
+ imagetag = imageOverride
+ }
+ if imagetag == "" {
+ return os.Open(baseManifestPath)
+ }
+ split := strings.Split(imagetag, ":")
+ if len(split) != 2 {
+ t.Logf("could not parse override image '%v', using default manifest %v", imagetag, baseManifestPath)
+ return os.Open(baseManifestPath)
+ }
+ modified, err := patchControllerImage(baseManifestPath, split[0], split[1])
+ if err != nil {
+ t.Logf("failed patching override image '%v' (%v), using default manifest %v", imagetag, err, baseManifestPath)
+ return os.Open(baseManifestPath)
+ }
+ t.Logf("using modified %v manifest", baseManifestPath)
+ return modified, nil
+}
+
+const imageKustomizationContents = `resources:
+- base.yaml
+images:
+- name: kong/kubernetes-ingress-controller
+ newName: %v
+ newTag: '%v'
+`
+
+// patchControllerImage takes a manifest, image, and tag and runs kustomize to replace the
+// kong/kubernetes-ingress-controller image with the provided image. It returns the location of kustomize's output
+func patchControllerImage(baseManifestPath string, image string, tag string) (io.Reader, error) {
+ workDir, err := os.MkdirTemp("", "kictest.")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(workDir)
+ orig, err := ioutil.ReadFile(baseManifestPath)
+ if err != nil {
+ return nil, err
+ }
+ err = ioutil.WriteFile(filepath.Join(workDir, "base.yaml"), orig, 0600)
+ if err != nil {
+ return nil, err
+ }
+ kustomization := []byte(fmt.Sprintf(imageKustomizationContents, image, tag))
+ err = os.WriteFile(filepath.Join(workDir, "kustomization.yaml"), kustomization, 0600)
+ if err != nil {
+ return nil, err
+ }
+ kustomized, err := kustomizeManifest(workDir)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(kustomized), nil
+}
+
+// kustomizeManifest runs kustomize on a path and returns the YAML output
+func kustomizeManifest(path string) ([]byte, error) {
+ k := krusty.MakeKustomizer(krusty.MakeDefaultOptions())
+ m, err := k.Run(filesys.MakeFsOnDisk(), path)
+ if err != nil {
+ return []byte{}, err
+ }
+ return m.AsYaml()
+}
+
+func getCurrentGitTag(path string) (semver.Version, error) {
+ cmd := exec.Command("git", "describe", "--tags")
+ cmd.Dir = path
+ tagBytes, _ := cmd.Output()
+ tag, err := semver.ParseTolerant(string(tagBytes))
+ if err != nil {
+ return semver.Version{}, err
+ }
+ return tag, nil
+}
+
+func getPreviousGitTag(path string, cur semver.Version) (semver.Version, error) {
+ var tags []semver.Version
+ cmd := exec.Command("git", "tag")
+ cmd.Dir = path
+ tagsBytes, err := cmd.Output()
+ if err != nil {
+ return semver.Version{}, err
+ }
+ foo := strings.Split(string(tagsBytes), "\n")
+ for _, tag := range foo {
+ ver, err := semver.ParseTolerant(tag)
+ if err == nil {
+ tags = append(tags, ver)
+ }
+ }
+ sort.Slice(tags, func(i, j int) bool { return tags[i].LT(tags[j]) })
+ curIndex := sort.Search(len(tags), func(i int) bool { return tags[i].EQ(cur) })
+ if curIndex == 0 {
+ return tags[curIndex], nil
+ }
+ return tags[curIndex-1], nil
+}
+
+// getKongProxyIP takes a Service with Kong proxy ports and returns and its IP, or fails the test if it cannot
+func getKongProxyIP(ctx context.Context, t *testing.T, env environments.Environment, svc *corev1.Service) string {
+ proxyIP := ""
+ require.NotEqual(t, svc.Spec.Type, svc.Spec.ClusterIP)
+ if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
+ if len(svc.Status.LoadBalancer.Ingress) > 0 {
+ proxyIP = svc.Status.LoadBalancer.Ingress[0].IP
+ t.Logf("found loadbalancer IP for the Kong Proxy: %s", proxyIP)
+ }
+ }
+ // the above failed to find an address. either the LB didn't provision or we're using a NodePort
+ if proxyIP == "" {
+ var port int32
+ for _, sport := range svc.Spec.Ports {
+ if sport.Name == "kong-proxy" || sport.Name == "proxy" {
+ port = sport.NodePort
+ }
+ }
+ var extAddrs []string
+ var intAddrs []string
+ nodes, err := env.Cluster().Client().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
+ require.NoError(t, err)
+ for _, node := range nodes.Items {
+ for _, naddr := range node.Status.Addresses {
+ if naddr.Type == corev1.NodeExternalIP {
+ extAddrs = append(extAddrs, naddr.Address)
+ }
+ if naddr.Type == corev1.NodeInternalIP {
+ extAddrs = append(intAddrs, naddr.Address)
+ }
+ }
+ }
+ // local clusters (KIND, minikube) typically provide no external addresses, but their internal addresses are
+ // routeable from their host. We prefer external addresses if they're available, but fall back to internal
+ // in their absence
+ if len(extAddrs) > 0 {
+ proxyIP = fmt.Sprintf("%v:%v", extAddrs[0], port)
+ } else if len(intAddrs) > 0 {
+ proxyIP = fmt.Sprintf("%v:%v", intAddrs[0], port)
+ } else {
+ assert.Fail(t, "both extAddrs and intAddrs are empty")
+ }
+ }
+ return proxyIP
+}
+
+// startPortForwarder runs "kubectl port-forward" in the background. It stops the forward when the provided context
+// ends
+func startPortForwarder(ctx context.Context, t *testing.T, env environments.Environment, namespace, name, localPort,
+ targetPort string) {
+ kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
+ require.NoError(t, err)
+ kubeconfigFile, err := os.CreateTemp(os.TempDir(), "portforward-tests-kubeconfig-")
+ require.NoError(t, err)
+ defer os.Remove(kubeconfigFile.Name())
+ defer kubeconfigFile.Close()
+ written, err := kubeconfigFile.Write(kubeconfig)
+ require.NoError(t, err)
+ require.Equal(t, len(kubeconfig), written)
+ cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFile.Name(), "port-forward", "-n", namespace,
+ name, fmt.Sprintf("%s:%s", localPort, targetPort)) //nolint:gosec
+ t.Logf("forwarding port %s to %s/%s:%s", localPort, namespace, name, targetPort)
+ if startErr := cmd.Start(); startErr != nil {
+ startOutput, outputErr := cmd.Output()
+ assert.NoError(t, outputErr)
+ require.NoError(t, startErr, string(startOutput))
+ }
+ require.Eventually(t, func() bool {
+ conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%s", localPort))
+ if err == nil {
+ conn.Close()
+ return true
+ }
+ return false
+ }, kongComponentWait, time.Second)
+}
From a89e65043fefd2f02077c72a1063908bbadea849 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Fri, 18 Feb 2022 11:05:17 -0800
Subject: [PATCH 3/9] test(e2e) add log retrieval utility
---
test/e2e/utils_test.go | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go
index 0aa6437d6c..3ee964cf35 100644
--- a/test/e2e/utils_test.go
+++ b/test/e2e/utils_test.go
@@ -21,6 +21,7 @@ import (
"github.com/blang/semver/v4"
"github.com/kong/kubernetes-testing-framework/pkg/environments"
+ "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators"
"github.com/sethvargo/go-password/password"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -300,3 +301,24 @@ func startPortForwarder(ctx context.Context, t *testing.T, env environments.Envi
return false
}, kongComponentWait, time.Second)
}
+
+func getKubernetesLogs(t *testing.T, env environments.Environment, namespace, name string) (string, error) {
+ kubeconfig, err := generators.NewKubeConfigForRestConfig(env.Name(), env.Cluster().Config())
+ require.NoError(t, err)
+ kubeconfigFile, err := os.CreateTemp(os.TempDir(), "deploy-logs-tests-kubeconfig-")
+ require.NoError(t, err)
+ defer os.Remove(kubeconfigFile.Name())
+ defer kubeconfigFile.Close()
+ written, err := kubeconfigFile.Write(kubeconfig)
+ require.NoError(t, err)
+ require.Equal(t, len(kubeconfig), written)
+ stderr := new(bytes.Buffer)
+ cmd := exec.Command("kubectl", "--kubeconfig", kubeconfigFile.Name(), "logs", "-n", namespace, name,
+ "--all-containers")
+ cmd.Stderr = stderr
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("%s", stderr.String())
+ }
+ return string(out), nil
+}
From d1749e708ffaa9bd2e130397a18d65f2976238b0 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Fri, 18 Feb 2022 11:20:02 -0800
Subject: [PATCH 4/9] fix(tests) support ports in overide images
Support override images that include the Docker repository when the
Docker repository includes a port. Instead of requiring the image string
split have exactly 2 items, use the last item as the tag and join the
rest back together
---
test/e2e/utils_test.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go
index 3ee964cf35..7a3c05f919 100644
--- a/test/e2e/utils_test.go
+++ b/test/e2e/utils_test.go
@@ -132,11 +132,12 @@ func getTestManifest(t *testing.T, baseManifestPath string) (io.Reader, error) {
return os.Open(baseManifestPath)
}
split := strings.Split(imagetag, ":")
- if len(split) != 2 {
+ if len(split) < 2 {
t.Logf("could not parse override image '%v', using default manifest %v", imagetag, baseManifestPath)
return os.Open(baseManifestPath)
}
- modified, err := patchControllerImage(baseManifestPath, split[0], split[1])
+ modified, err := patchControllerImage(baseManifestPath, strings.Join(split[0:len(split)-1], ":"),
+ split[len(split)-1])
if err != nil {
t.Logf("failed patching override image '%v' (%v), using default manifest %v", imagetag, err, baseManifestPath)
return os.Open(baseManifestPath)
From 8edb14cd8e03941b9ea56899cf682bcb4ecdc2c5 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Fri, 18 Feb 2022 12:45:37 -0800
Subject: [PATCH 5/9] fix(ci) load local images into KIND
The Action registry is not exposed to the KIND network, so we cannot
actually use it. Load the image directly into KIND instead.
---
.github/workflows/e2e_targeted.yaml | 31 ++++++++++++++++++-----------
1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/.github/workflows/e2e_targeted.yaml b/.github/workflows/e2e_targeted.yaml
index 36b1c635eb..f37bdd9e88 100644
--- a/.github/workflows/e2e_targeted.yaml
+++ b/.github/workflows/e2e_targeted.yaml
@@ -12,9 +12,9 @@ on:
required: true
default: 'v1.12.2'
controller-image:
- description: 'KIC Docker image to test with. The default "localhost:5000/kong/kubernetes-ingress-controller:ci" builds an image from the dispatch branch'
+ description: 'KIC Docker image to test with. The default "kong/kubernetes-ingress-controller:ci" builds an image from the dispatch branch'
required: true
- default: 'localhost:5000/kong/kubernetes-ingress-controller:ci'
+ default: 'kong/kubernetes-ingress-controller:ci'
include-integration:
description: 'Set to "true" to run integration tests also'
required: true
@@ -24,11 +24,6 @@ jobs:
e2e-tests:
environment: "Configure ci"
runs-on: ubuntu-latest
- services:
- registry:
- image: registry:2
- ports:
- - 5000:5000
steps:
- name: setup golang
uses: actions/setup-go@v2
@@ -49,28 +44,29 @@ jobs:
fetch-depth: 0
- name: Set up QEMU
- if: ${{ github.event.inputs.controller-image == 'localhost:5000/kong/kubernetes-ingress-controller:ci' }}
+ if: ${{ github.event.inputs.controller-image == 'kong/kubernetes-ingress-controller:ci' }}
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
- if: ${{ github.event.inputs.controller-image == 'localhost:5000/kong/kubernetes-ingress-controller:ci' }}
+ if: ${{ github.event.inputs.controller-image == 'kong/kubernetes-ingress-controller:ci' }}
uses: docker/setup-buildx-action@v1
with:
driver-opts: network=host
- name: Build and push to local registry
- if: ${{ github.event.inputs.controller-image == 'localhost:5000/kong/kubernetes-ingress-controller:ci' }}
+ if: ${{ github.event.inputs.controller-image == 'kong/kubernetes-ingress-controller:ci' }}
id: docker_build
uses: docker/build-push-action@v2
with:
context: .
- push: true
+ load: true
file: Dockerfile
- tags: localhost:5000/kong/kubernetes-ingress-controller:ci
+ tags: kong/kubernetes-ingress-controller:ci
target: distroless
- name: run e2e tests
run: make test.e2e
+ if: ${{ github.event.inputs.controller-image != 'kong/kubernetes-ingress-controller:ci' }}
env:
TEST_KONG_CONTROLLER_IMAGE_OVERRIDE: ${{ github.event.inputs.controller-image }}
KONG_LICENSE_DATA: ${{ secrets.KONG_LICENSE_DATA }}
@@ -79,6 +75,17 @@ jobs:
NCPU: 1 # it was found that github actions (specifically) did not seem to perform well when spawning
# multiple kind clusters within a single job, so only 1 is allowed at a time.
+ - name: run e2e tests (local image)
+ run: make test.e2e
+ if: ${{ github.event.inputs.controller-image == 'kong/kubernetes-ingress-controller:ci' }}
+ env:
+ TEST_KONG_CONTROLLER_IMAGE_LOAD: ${{ github.event.inputs.controller-image }}
+ TEST_KONG_CONTROLLER_IMAGE_OVERRIDE: ${{ github.event.inputs.controller-image }}
+ KONG_LICENSE_DATA: ${{ secrets.KONG_LICENSE_DATA }}
+ KONG_CLUSTER_VERSION: ${{ github.event.inputs.kubernetes-version }}
+ ISTIO_VERSION: ${{ github.event.inputs.istio-version }}
+ NCPU: 1
+
integration-tests:
if: ${{ github.event.inputs.include-integration == 'true' }}
environment: "Configure ci"
From 25a6347bc54def9f669fbbbd1adaac32991f0ba2 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Tue, 22 Feb 2022 14:32:39 -0800
Subject: [PATCH 6/9] Apply suggestions from code review
Co-authored-by: Shane Utt
---
internal/admission/server.go | 5 ++++-
test/e2e/features_test.go | 8 ++++----
test/e2e/utils_test.go | 1 -
3 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/internal/admission/server.go b/internal/admission/server.go
index afd8134e6d..b2c333e6f4 100644
--- a/internal/admission/server.go
+++ b/internal/admission/server.go
@@ -44,9 +44,11 @@ func (sc *ServerConfig) toTLSConfig(ctx context.Context, log logrus.FieldLogger)
var watcher *certwatcher.CertWatcher
var cert, key []byte
switch {
+ // the caller provided certificates via the ENV (certwatcher can't be used here)
case sc.CertPath == "" && sc.KeyPath == "" && sc.Cert != "" && sc.Key != "":
cert, key = []byte(sc.Cert), []byte(sc.Key)
+ // the caller provided explicit file paths to the certs, enable certwatcher for these paths
case sc.CertPath != "" && sc.KeyPath != "" && sc.Cert == "" && sc.Key == "":
var err error
watcher, err = certwatcher.New(sc.CertPath, sc.KeyPath)
@@ -54,7 +56,8 @@ func (sc *ServerConfig) toTLSConfig(ctx context.Context, log logrus.FieldLogger)
return nil, fmt.Errorf("failed to create CertWatcher: %w", err)
}
- case sc.CertPath == "" && sc.KeyPath == "" && sc.Cert == "" && sc.Key == "":
+ // the caller provided no certificate configuration, assume the default paths and enable certwatcher for them
+ case sc.CertPath != "" && sc.KeyPath != "" && sc.Cert == "" && sc.Key == "":
var err error
watcher, err = certwatcher.New(DefaultAdmissionWebhookCertPath, DefaultAdmissionWebhookKeyPath)
if err != nil {
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
index e84fc43420..aa91666ccb 100644
--- a/test/e2e/features_test.go
+++ b/test/e2e/features_test.go
@@ -170,15 +170,15 @@ func TestWebhookUpdate(t *testing.T) {
}
_, err = env.Cluster().Client().CoreV1().Secrets("kong").Create(ctx, firstCertificate, metav1.CreateOptions{})
- assert.NoError(t, err)
+ require.NoError(t, err)
t.Log("exposing admission service to the test environment")
admission, err := env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
metav1.GetOptions{})
- assert.NoError(t, err)
+ require.NoError(t, err)
admission.Spec.Type = corev1.ServiceTypeLoadBalancer
_, err = env.Cluster().Client().CoreV1().Services("kong").Update(ctx, admission, metav1.UpdateOptions{})
- assert.NoError(t, err)
+ require.NoError(t, err)
var admissionAddress string
require.Eventually(t, func() bool {
admission, err = env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
@@ -232,7 +232,7 @@ func TestWebhookUpdate(t *testing.T) {
t.Log("changing certificate")
_, err = env.Cluster().Client().CoreV1().Secrets("kong").Update(ctx, secondCertificate, metav1.UpdateOptions{})
- assert.NoError(t, err)
+ require.NoError(t, err)
t.Log("checking second certificate")
require.Eventually(t, func() bool {
diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go
index 7a3c05f919..e987e200d6 100644
--- a/test/e2e/utils_test.go
+++ b/test/e2e/utils_test.go
@@ -27,7 +27,6 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/kustomize/api/krusty"
"sigs.k8s.io/kustomize/kyaml/filesys"
From 0dcb69624db7424b0fb571c1259ce6672115384c Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Tue, 22 Feb 2022 15:30:13 -0800
Subject: [PATCH 7/9] pr: rework tests constants
---
test/e2e/features_test.go | 49 +++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 23 deletions(-)
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
index aa91666ccb..81e69dd3da 100644
--- a/test/e2e/features_test.go
+++ b/test/e2e/features_test.go
@@ -35,7 +35,27 @@ type TLSPair struct {
Key, Cert string
}
-var (
+const (
+ // webhookKINDConfig is a KIND configuration used for TestWebhookUpdate. KIND, when running in GitHub Actions, is
+ // a bit wonky with handling Secret updates, and they do not propagate to container filesystems in a reasonable
+ // amount of time (>10m) when running this in the complete test suite, even though the actual sync frequency/update
+ // propagation should be 1m by default. These changes force Secret updates to go directly to the API server and
+ // update containers much more often. The latter causes significant performance degradation elsewhere, and Pods take
+ // much longer to start, but once they do Secret updates show up more quickly, enough for the test to complete in time.
+ webhookKINDConfig = `kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: KubeletConfiguration
+ configMapAndSecretChangeDetectionStrategy: Get
+ syncFrequency: 3s
+`
+ validationWebhookName = "kong-validation-webhook"
+ kongNamespace = "kong"
+ // openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) -keyout cert.key -out cert.crt -days 3650 -subj '/CN=first.example/'
+ // openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) -keyout cert.key -out cert.crt -days 3650 -subj '/CN=second.example/'
tlsPairs = []TLSPair{
{
Cert: `-----BEGIN CERTIFICATE-----
@@ -86,23 +106,6 @@ PMxZ3NvEwhsJgDJ82D7OUR2G7wZtgUj/WFj14XOofpZJmhzTQrtbbuc=
}
)
-// webhookKINDConfig is a KIND configuration used for TestWebhookUpdate. KIND, when running in GitHub Actions, is
-// a bit wonky with handling Secret updates, and they do not propagate to container filesystems in a reasonable
-// amount of time (>10m) when running this in the complete test suite, even though the actual sync frequency/update
-// propagation should be 1m by default. These changes force Secret updates to go directly to the API server and
-// update containers much more often. The latter causes significant performance degradation elsewhere, and Pods take
-// much longer to start, but once they do Secret updates show up more quickly, enough for the test to complete in time.
-const webhookKINDConfig = `kind: Cluster
-apiVersion: kind.x-k8s.io/v1alpha4
-nodes:
-- role: control-plane
- kubeadmConfigPatches:
- - |
- kind: KubeletConfiguration
- configMapAndSecretChangeDetectionStrategy: Get
- syncFrequency: 3s
-`
-
// TestWebhookUpdate checks that the webhook updates the certificate indicated by --admission-webhook-cert-file when
// the mounted Secret updates. This requires E2E because we can't mount Secrets with the locally-run integration
// test controller instance.
@@ -169,19 +172,19 @@ func TestWebhookUpdate(t *testing.T) {
},
}
- _, err = env.Cluster().Client().CoreV1().Secrets("kong").Create(ctx, firstCertificate, metav1.CreateOptions{})
+ _, err = env.Cluster().Client().CoreV1().Secrets(kongNamespace).Create(ctx, firstCertificate, metav1.CreateOptions{})
require.NoError(t, err)
t.Log("exposing admission service to the test environment")
- admission, err := env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
+ admission, err := env.Cluster().Client().CoreV1().Services(kongNamespace).Get(ctx, validationWebhookName,
metav1.GetOptions{})
require.NoError(t, err)
admission.Spec.Type = corev1.ServiceTypeLoadBalancer
- _, err = env.Cluster().Client().CoreV1().Services("kong").Update(ctx, admission, metav1.UpdateOptions{})
+ _, err = env.Cluster().Client().CoreV1().Services(kongNamespace).Update(ctx, admission, metav1.UpdateOptions{})
require.NoError(t, err)
var admissionAddress string
require.Eventually(t, func() bool {
- admission, err = env.Cluster().Client().CoreV1().Services("kong").Get(ctx, "kong-validation-webhook",
+ admission, err = env.Cluster().Client().CoreV1().Services(kongNamespace).Get(ctx, validationWebhookName,
metav1.GetOptions{})
if err != nil {
return false
@@ -231,7 +234,7 @@ func TestWebhookUpdate(t *testing.T) {
}, time.Minute*2, time.Second)
t.Log("changing certificate")
- _, err = env.Cluster().Client().CoreV1().Secrets("kong").Update(ctx, secondCertificate, metav1.UpdateOptions{})
+ _, err = env.Cluster().Client().CoreV1().Secrets(kongNamespace).Update(ctx, secondCertificate, metav1.UpdateOptions{})
require.NoError(t, err)
t.Log("checking second certificate")
From 37ade6526b617d261eaa136014547a1b49249470 Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Tue, 22 Feb 2022 15:33:43 -0800
Subject: [PATCH 8/9] pr: move return upward
---
internal/admission/server.go | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/internal/admission/server.go b/internal/admission/server.go
index b2c333e6f4..85528b69f0 100644
--- a/internal/admission/server.go
+++ b/internal/admission/server.go
@@ -47,6 +47,14 @@ func (sc *ServerConfig) toTLSConfig(ctx context.Context, log logrus.FieldLogger)
// the caller provided certificates via the ENV (certwatcher can't be used here)
case sc.CertPath == "" && sc.KeyPath == "" && sc.Cert != "" && sc.Key != "":
cert, key = []byte(sc.Cert), []byte(sc.Key)
+ keyPair, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, fmt.Errorf("X509KeyPair error: %w", err)
+ }
+ return &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Certificates: []tls.Certificate{keyPair},
+ }, nil
// the caller provided explicit file paths to the certs, enable certwatcher for these paths
case sc.CertPath != "" && sc.KeyPath != "" && sc.Cert == "" && sc.Key == "":
@@ -68,16 +76,6 @@ func (sc *ServerConfig) toTLSConfig(ctx context.Context, log logrus.FieldLogger)
return nil, fmt.Errorf("either cert/key files OR cert/key values must be provided, or none")
}
- if watcher == nil {
- keyPair, err := tls.X509KeyPair(cert, key)
- if err != nil {
- return nil, fmt.Errorf("X509KeyPair error: %w", err)
- }
- return &tls.Config{
- MinVersion: tls.VersionTLS12,
- Certificates: []tls.Certificate{keyPair},
- }, nil
- }
go func() {
if err := watcher.Start(ctx); err != nil {
log.WithError(err).Error("certificate watcher error")
From d1bb0bf875cdff648c07fcfc1ec778bc2dc05e3f Mon Sep 17 00:00:00 2001
From: Travis Raines <571832+rainest@users.noreply.github.com>
Date: Tue, 22 Feb 2022 15:38:04 -0800
Subject: [PATCH 9/9] pr: oops, that's a slice
---
test/e2e/features_test.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go
index 81e69dd3da..c414f92dcb 100644
--- a/test/e2e/features_test.go
+++ b/test/e2e/features_test.go
@@ -54,8 +54,11 @@ nodes:
`
validationWebhookName = "kong-validation-webhook"
kongNamespace = "kong"
+)
+
+var (
+ // openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) -keyout cert.key -out cert.crt -days 3650 -subj '/CN=first.example/'
// openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) -keyout cert.key -out cert.crt -days 3650 -subj '/CN=first.example/'
- // openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) -keyout cert.key -out cert.crt -days 3650 -subj '/CN=second.example/'
tlsPairs = []TLSPair{
{
Cert: `-----BEGIN CERTIFICATE-----