diff --git a/CHANGELOG.md b/CHANGELOG.md index d55292f61ef..bf9563db3ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ Here is an overview of all **stable** additions: - **General**: Introduce new ArangoDB Scaler ([#4000](https://github.com/kedacore/keda/issues/4000)) - **Prometheus Metrics**: Introduce scaler activity in Prometheus metrics ([#4114](https://github.com/kedacore/keda/issues/4114)) - **Prometheus Metrics**: Introduce scaler latency in Prometheus metrics ([#4037](https://github.com/kedacore/keda/issues/4037)) +- **Prometheus Scaler**: Extend Prometheus Scaler to support Azure managed service for Prometheus ([#4153](https://github.com/kedacore/keda/issues/4153)) Here is an overview of all new **experimental** features: diff --git a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go new file mode 100644 index 00000000000..b3bd400d798 --- /dev/null +++ b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go @@ -0,0 +1,82 @@ +package azure + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + az "github.com/Azure/go-autorest/autorest/azure" + + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/util" +) + +var azureManagedPrometheusResourceURLInCloud = map[string]string{ + "AZUREPUBLICCLOUD": "https://prometheus.monitor.azure.com/.default", + "AZUREUSGOVERNMENTCLOUD": "https://prometheus.monitor.azure.us/.default", + "AZURECHINACLOUD": "https://prometheus.monitor.azure.cn/.default", +} + +type azureManagedPrometheusHTTPRoundTripper struct { + chainedCredential *azidentity.ChainedTokenCredential + next http.RoundTripper + resourceURL string +} + +// Tries to get a round tripper. +// If the pod identity represents azure auth, it creates a round tripper and returns that. Returns error if fails to create one. +// If its not azure auth, then this becomes a no-op. Neither returns round tripper nor error. +func TryAndGetAzureManagedPrometheusHTTPRoundTripper(podIdentity kedav1alpha1.AuthPodIdentity, triggerMetadata map[string]string) (http.RoundTripper, error) { + switch podIdentity.Provider { + case kedav1alpha1.PodIdentityProviderAzureWorkload, kedav1alpha1.PodIdentityProviderAzure: + + if triggerMetadata == nil { + return nil, fmt.Errorf("trigger metadata cannot be nil") + } + + chainedCred, err := NewChainedCredential(podIdentity.IdentityID, podIdentity.Provider) + if err != nil { + return nil, err + } + + azureManagedPrometheusResourceURLProvider := func(env az.Environment) (string, error) { + if resource, ok := azureManagedPrometheusResourceURLInCloud[strings.ToUpper(env.Name)]; ok { + return resource, nil + } + + return "", fmt.Errorf("azure managed prometheus is not available in cloud %s", env.Name) + } + + resourceURLBasedOnCloud, err := ParseEnvironmentProperty(triggerMetadata, "azureManagedPrometheusResourceURL", azureManagedPrometheusResourceURLProvider) + if err != nil { + return nil, err + } + + transport := util.CreateHTTPTransport(false) + rt := &azureManagedPrometheusHTTPRoundTripper{ + next: transport, + chainedCredential: chainedCred, + resourceURL: resourceURLBasedOnCloud, + } + return rt, nil + } + + // Not azure managed prometheus. Don't create a round tripper and don't return error. + return nil, nil +} + +// Sets Auhtorization header for requests +func (rt *azureManagedPrometheusHTTPRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + token, err := rt.chainedCredential.GetToken(req.Context(), policy.TokenRequestOptions{Scopes: []string{rt.resourceURL}}) + + if err != nil { + return nil, err + } + + bearerAccessToken := "Bearer " + token.Token + req.Header.Set("Authorization", bearerAccessToken) + + return rt.next.RoundTrip(req) +} diff --git a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper_test.go b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper_test.go new file mode 100644 index 00000000000..951deed17b4 --- /dev/null +++ b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper_test.go @@ -0,0 +1,94 @@ +package azure + +import ( + "testing" + + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" +) + +type testTryAndGetAzureManagedPrometheusHTTPRoundTripperData struct { + testName string + podIdentityProvider kedav1alpha1.PodIdentityProvider + isError bool +} + +type testAzureManagedPrometheusResourceURLData struct { + testName string + podIdentityProvider kedav1alpha1.PodIdentityProvider + metadata map[string]string + resourceURL string + isError bool +} + +var testTryAndGetAzureManagedPrometheusHTTPRoundTripperTestData = []testTryAndGetAzureManagedPrometheusHTTPRoundTripperData{ + {"test azure WI trigger metadata absent", kedav1alpha1.PodIdentityProviderAzureWorkload, true}, + {"test azure pod identity trigger metadata absent", kedav1alpha1.PodIdentityProviderAzureWorkload, true}, + {"test not azure identity", kedav1alpha1.PodIdentityProviderNone, false}, +} + +var testAzureManagedPrometheusResourceURLTestData = []testAzureManagedPrometheusResourceURLData{ + // workload identity + + // with default cloud + {"test default azure cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up"}, "https://prometheus.monitor.azure.com/.default", false}, + // with public cloud + {"test azure public cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZUREPUBLICCLOUD"}, "https://prometheus.monitor.azure.com/.default", false}, + // with china cloud + {"test azure china cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZURECHINACLOUD"}, "https://prometheus.monitor.azure.cn/.default", false}, + // with US GOV cloud + {"test azure US GOV cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZUREUSGOVERNMENTCLOUD"}, "https://prometheus.monitor.azure.us/.default", false}, + // with private cloud success + {"test azure private cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "PRIVATE", "azureManagedPrometheusResourceURL": "blah-blah-resourceURL"}, "blah-blah-resourceURL", false}, + // with private cloud failure + {"test default azure cloud with WI", kedav1alpha1.PodIdentityProviderAzureWorkload, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "PRIVATE"}, "", true}, + + // pod identity + + // with default cloud + {"test default azure cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up"}, "https://prometheus.monitor.azure.com/.default", false}, + // with public cloud + {"test azure public cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZUREPUBLICCLOUD"}, "https://prometheus.monitor.azure.com/.default", false}, + // with china cloud + {"test azure china cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZURECHINACLOUD"}, "https://prometheus.monitor.azure.cn/.default", false}, + // with US GOV cloud + {"test azure US GOV cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "AZUREUSGOVERNMENTCLOUD"}, "https://prometheus.monitor.azure.us/.default", false}, + // with private cloud success + {"test azure private cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "PRIVATE", "azureManagedPrometheusResourceURL": "blah-blah-resourceURL"}, "blah-blah-resourceURL", false}, + // with private cloud failure + {"test default azure cloud with WI", kedav1alpha1.PodIdentityProviderAzure, map[string]string{"serverAddress": "http://dummy-azure-monitor-workspace", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cloud": "PRIVATE"}, "", true}, +} + +func TestTryAndGetAzureManagedPrometheusHTTPRoundTripperForTriggerMetadataAbsent(t *testing.T) { + for _, testData := range testTryAndGetAzureManagedPrometheusHTTPRoundTripperTestData { + _, err := TryAndGetAzureManagedPrometheusHTTPRoundTripper(kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentityProvider}, nil) + if testData.isError { + if err == nil { + t.Errorf("Test: %v; Expected error but got success. testData: %v", testData.testName, testData) + } + } else if err != nil { + t.Errorf("Test: %v; Expected success but got error: %v", testData.testName, err) + } + } +} + +func TestTryAndGetAzureManagedPrometheusHTTPRoundTripperWithTriggerForResourceURL(t *testing.T) { + for _, testData := range testAzureManagedPrometheusResourceURLTestData { + rt, err := TryAndGetAzureManagedPrometheusHTTPRoundTripper(kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentityProvider}, testData.metadata) + if testData.isError { + if err == nil { + t.Errorf("Test: %v; Expected error but got success. testData: %v", testData.testName, testData) + } + } else { + if err != nil { + t.Errorf("Test: %v; Expected success but got error: %v", testData.testName, err) + } else { + azureRT := rt.(*azureManagedPrometheusHTTPRoundTripper) + if azureRT == nil { + t.Errorf("Test: %v; Expected azure round tripper but got nil", testData.testName) + } else if azureRT.resourceURL != testData.resourceURL { + t.Errorf("Test: %v; Expected resourceURL %v but got %v", testData.testName, testData.resourceURL, azureRT.resourceURL) + } + } + } + } +} diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index f027851d2d8..5c10b6bcde7 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -16,6 +16,7 @@ import ( "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" + "github.com/kedacore/keda/v2/pkg/scalers/azure" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -92,17 +93,33 @@ func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) { httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.unsafeSsl) - if meta.prometheusAuth != nil && (meta.prometheusAuth.CA != "" || meta.prometheusAuth.EnableTLS) { - // create http.RoundTripper with auth settings from ScalerConfig - transport, err := authentication.CreateHTTPRoundTripper( - authentication.NetHTTP, - meta.prometheusAuth, - ) + if meta.prometheusAuth != nil { + if meta.prometheusAuth.CA != "" || meta.prometheusAuth.EnableTLS { + // create http.RoundTripper with auth settings from ScalerConfig + transport, err := authentication.CreateHTTPRoundTripper( + authentication.NetHTTP, + meta.prometheusAuth, + ) + if err != nil { + logger.V(1).Error(err, "init Prometheus client http transport") + return nil, err + } + httpClient.Transport = transport + } + } else { + // could be the case of azure managed prometheus. Try and get the roundtripper. + // If its not the case of azure managed prometheus, we will get both transport and err as nil and proceed assuming no auth. + transport, err := azure.TryAndGetAzureManagedPrometheusHTTPRoundTripper(config.PodIdentity, config.TriggerMetadata) + if err != nil { - logger.V(1).Error(err, "init Prometheus client http transport") + logger.V(1).Error(err, "error while init Azure Managed Prometheus client http transport") return nil, err } - httpClient.Transport = transport + + // transport should not be nil if its a case of azure managed prometheus + if transport != nil { + httpClient.Transport = transport + } } return &prometheusScaler{ @@ -195,14 +212,27 @@ func parsePrometheusMetadata(config *ScalerConfig) (meta *prometheusMetadata, er meta.scalerIndex = config.ScalerIndex + err = parseAuthConfig(config, meta) + if err != nil { + return nil, err + } + + return meta, nil +} + +func parseAuthConfig(config *ScalerConfig, meta *prometheusMetadata) error { // parse auth configs from ScalerConfig auth, err := authentication.GetAuthConfigs(config.TriggerMetadata, config.AuthParams) if err != nil { - return nil, err + return err + } + + if auth != nil && config.PodIdentity.Provider != "" { + return fmt.Errorf("pod identity cannot be enabled with other auth types") } meta.prometheusAuth = auth - return meta, nil + return nil } func (s *prometheusScaler) Close(context.Context) error { diff --git a/pkg/scalers/prometheus_scaler_test.go b/pkg/scalers/prometheus_scaler_test.go index a0aef8f3a6d..109ab6bf501 100644 --- a/pkg/scalers/prometheus_scaler_test.go +++ b/pkg/scalers/prometheus_scaler_test.go @@ -9,6 +9,8 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) type parsePrometheusMetadataTestData struct { @@ -60,38 +62,47 @@ var prometheusMetricIdentifiers = []prometheusMetricIdentifier{ } type prometheusAuthMetadataTestData struct { - metadata map[string]string - authParams map[string]string - isError bool + metadata map[string]string + authParams map[string]string + podIdentityProvider kedav1alpha1.PodIdentityProvider + isError bool } var testPrometheusAuthMetadata = []prometheusAuthMetadataTestData{ // success TLS - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey"}, "", false}, // TLS, ca is optional - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"cert": "ceert", "key": "keey"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"cert": "ceert", "key": "keey"}, "", false}, // fail TLS, key not given - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "cert": "ceert"}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "cert": "ceert"}, "", true}, // fail TLS, cert not given - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "key": "keey"}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls"}, map[string]string{"ca": "caaa", "key": "keey"}, "", true}, // success bearer default - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "bearer"}, map[string]string{"bearerToken": "tooooken"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "bearer"}, map[string]string{"bearerToken": "tooooken"}, "", false}, // fail bearerAuth with no token - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "bearer"}, map[string]string{}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "bearer"}, map[string]string{}, "", true}, // success basicAuth - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "basic"}, map[string]string{"username": "user", "password": "pass"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "basic"}, map[string]string{"username": "user", "password": "pass"}, "", false}, // fail basicAuth with no username - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "basic"}, map[string]string{}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "basic"}, map[string]string{}, "", true}, - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls, basic"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey", "username": "user", "password": "pass"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls, basic"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey", "username": "user", "password": "pass"}, "", false}, - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls,basic"}, map[string]string{"username": "user", "password": "pass"}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls,basic"}, map[string]string{"username": "user", "password": "pass"}, "", true}, // success custom auth - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthHeader": "header", "customAuthValue": "value"}, false}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthHeader": "header", "customAuthValue": "value"}, "", false}, // fail custom auth with no customAuthHeader - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthHeader": ""}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthHeader": ""}, "", true}, // fail custom auth with no customAuthValue - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthValue": ""}, true}, + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "custom"}, map[string]string{"customAuthValue": ""}, "", true}, + + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "tls,basic"}, map[string]string{"username": "user", "password": "pass"}, "", true}, + // pod identity and other auth modes enabled together + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "authModes": "basic"}, map[string]string{"username": "user", "password": "pass"}, "azure-workload", true}, + // azure workload identity + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up"}, nil, "azure-workload", false}, + // azure pod identity + {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up"}, nil, "azure", false}, } func TestPrometheusParseMetadata(t *testing.T) { @@ -127,7 +138,7 @@ func TestPrometheusGetMetricSpecForScaling(t *testing.T) { func TestPrometheusScalerAuthParams(t *testing.T) { for _, testData := range testPrometheusAuthMetadata { - meta, err := parsePrometheusMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + meta, err := parsePrometheusMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentityProvider}}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -137,11 +148,13 @@ func TestPrometheusScalerAuthParams(t *testing.T) { } if err == nil { - if (meta.prometheusAuth.EnableBearerAuth && !strings.Contains(testData.metadata["authModes"], "bearer")) || - (meta.prometheusAuth.EnableBasicAuth && !strings.Contains(testData.metadata["authModes"], "basic")) || - (meta.prometheusAuth.EnableTLS && !strings.Contains(testData.metadata["authModes"], "tls")) || - (meta.prometheusAuth.EnableCustomAuth && !strings.Contains(testData.metadata["authModes"], "custom")) { - t.Error("wrong auth mode detected") + if meta.prometheusAuth != nil { + if (meta.prometheusAuth.EnableBearerAuth && !strings.Contains(testData.metadata["authModes"], "bearer")) || + (meta.prometheusAuth.EnableBasicAuth && !strings.Contains(testData.metadata["authModes"], "basic")) || + (meta.prometheusAuth.EnableTLS && !strings.Contains(testData.metadata["authModes"], "tls")) || + (meta.prometheusAuth.EnableCustomAuth && !strings.Contains(testData.metadata["authModes"], "custom")) { + t.Error("wrong auth mode detected") + } } } } diff --git a/tests/.env b/tests/.env index 95bdbe5b268..1b9e5e0e054 100644 --- a/tests/.env +++ b/tests/.env @@ -16,6 +16,7 @@ AZURE_DEVOPS_PROJECT= TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING= TF_AZURE_KEYVAULT_URI= TF_AZURE_LOG_ANALYTICS_WORKSPACE_ID= +TF_AZURE_MANAGED_PROMETHEUS_QUERY_ENDPOINT= TF_AZURE_RESOURCE_GROUP= TF_AZURE_SERVICE_BUS_CONNECTION_STRING= TF_AZURE_SP_APP_ID= diff --git a/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_pod_identity/azure_managed_prometheus_aad_pod_identity_test.go b/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_pod_identity/azure_managed_prometheus_aad_pod_identity_test.go new file mode 100644 index 00000000000..35672792db4 --- /dev/null +++ b/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_pod_identity/azure_managed_prometheus_aad_pod_identity_test.go @@ -0,0 +1,53 @@ +//go:build e2e +// +build e2e + +package azure_managed_prometheus_aad_pod_identity_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/joho/godotenv" + + . "github.com/kedacore/keda/v2/tests/scalers/azure/azure_managed_prometheus/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testNamePodIdentity = "amp-pi-test" +) + +// Pod Identity test vars +var ( + randomNumberPod = rand.Int() + testNamespacePod = fmt.Sprintf("%s-ns-%d", testNamePodIdentity, randomNumberPod) + deploymentNamePod = fmt.Sprintf("%s-deployment-%d", testNamePodIdentity, randomNumberPod) + monitoredAppNamePod = fmt.Sprintf("%s-monitored-app-%d", testNamePodIdentity, randomNumberPod) + publishDeploymentNamePod = fmt.Sprintf("%s-publish-%d", testNamePodIdentity, randomNumberPod) + scaledObjectNamePod = fmt.Sprintf("%s-so-%d", testNamePodIdentity, randomNumberPod) + podIdentityProvider = "azure" +) + +// TestAzureManagedPrometheusScalerWithPodIdentity creates deployments - there are two deployments - both using the same image but one deployment +// is directly tied to the KEDA HPA while the other is isolated that can be used for metrics +// even when the KEDA deployment is at zero - the service points to both deployments +func TestAzureManagedPrometheusScalerWithPodIdentity(t *testing.T) { + TestAzureManagedPrometheusScaler(t, getTemplateDataForPodIdentityTest()) +} + +func getTemplateDataForPodIdentityTest() TemplateData { + return TemplateData{ + TestNamespace: testNamespacePod, + DeploymentName: deploymentNamePod, + PublishDeploymentName: publishDeploymentNamePod, + ScaledObjectName: scaledObjectNamePod, + MonitoredAppName: monitoredAppNamePod, + PodIdentityProvider: podIdentityProvider, + PrometheusQueryEndpoint: PrometheusQueryEndpoint, + MinReplicaCount: MinReplicaCount, + MaxReplicaCount: MaxReplicaCount, + } +} diff --git a/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_workload_identity/azure_managed_prometheus_aad_workload_identity_test.go b/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_workload_identity/azure_managed_prometheus_aad_workload_identity_test.go new file mode 100644 index 00000000000..81a20732208 --- /dev/null +++ b/tests/scalers/azure/azure_managed_prometheus/azure_managed_prometheus_aad_workload_identity/azure_managed_prometheus_aad_workload_identity_test.go @@ -0,0 +1,53 @@ +//go:build e2e +// +build e2e + +package azure_managed_prometheus_aad_workload_identity_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/joho/godotenv" + + . "github.com/kedacore/keda/v2/tests/scalers/azure/azure_managed_prometheus/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testNameWorkloadIdentity = "amp-wi-test" +) + +// Workload Identity test vars +var ( + randomNumberWI = rand.Int() + testNamespaceWI = fmt.Sprintf("%s-ns-%d", testNameWorkloadIdentity, randomNumberWI) + deploymentNameWI = fmt.Sprintf("%s-deployment-%d", testNameWorkloadIdentity, randomNumberWI) + monitoredAppNameWI = fmt.Sprintf("%s-monitored-app-%d", testNameWorkloadIdentity, randomNumberWI) + publishDeploymentNameWI = fmt.Sprintf("%s-publish-%d", testNameWorkloadIdentity, randomNumberWI) + scaledObjectNameWI = fmt.Sprintf("%s-so-%d", testNameWorkloadIdentity, randomNumberWI) + workloadIdentityProvider = "azure-workload" +) + +// TestAzureManagedPrometheusScalerWithWorkloadIdentity creates deployments - there are two deployments - both using the same image but one deployment +// is directly tied to the KEDA HPA while the other is isolated that can be used for metrics +// even when the KEDA deployment is at zero - the service points to both deployments +func TestAzureManagedPrometheusScalerWithWorkloadIdentity(t *testing.T) { + TestAzureManagedPrometheusScaler(t, getTemplateDataForWorkloadIdentityTest()) +} + +func getTemplateDataForWorkloadIdentityTest() TemplateData { + return TemplateData{ + TestNamespace: testNamespaceWI, + DeploymentName: deploymentNameWI, + PublishDeploymentName: publishDeploymentNameWI, + ScaledObjectName: scaledObjectNameWI, + MonitoredAppName: monitoredAppNameWI, + PodIdentityProvider: workloadIdentityProvider, + PrometheusQueryEndpoint: PrometheusQueryEndpoint, + MinReplicaCount: MinReplicaCount, + MaxReplicaCount: MaxReplicaCount, + } +} diff --git a/tests/scalers/azure/azure_managed_prometheus/helper/helper.go b/tests/scalers/azure/azure_managed_prometheus/helper/helper.go new file mode 100644 index 00000000000..ec0672f970d --- /dev/null +++ b/tests/scalers/azure/azure_managed_prometheus/helper/helper.go @@ -0,0 +1,269 @@ +//go:build e2e +// +build e2e + +package helper + +import ( + "os" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +// Common for pod and workload identity tests +var ( + PrometheusQueryEndpoint = os.Getenv("TF_AZURE_MANAGED_PROMETHEUS_QUERY_ENDPOINT") + MinReplicaCount = 0 + MaxReplicaCount = 2 +) + +type TemplateData struct { + TestNamespace string + DeploymentName string + MonitoredAppName string + PublishDeploymentName string + ScaledObjectName string + PodIdentityProvider string + PrometheusQueryEndpoint string + MinReplicaCount int + MaxReplicaCount int +} + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: test-app + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: test-app + template: + metadata: + labels: + app: test-app + type: keda-testing + spec: + containers: + - name: prom-test-app + image: ghcr.io/kedacore/tests-prometheus:latest + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault +--- +` + + monitoredAppDeploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{.MonitoredAppName}} + name: {{.MonitoredAppName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.MonitoredAppName}} + template: + metadata: + labels: + app: {{.MonitoredAppName}} + type: {{.MonitoredAppName}} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + containers: + - name: prom-test-app + image: ghcr.io/kedacore/tests-prometheus:latest + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault +--- +` + + monitoredAppServiceTemplate = `apiVersion: v1 +kind: Service +metadata: + labels: + app: {{.MonitoredAppName}} + name: {{.MonitoredAppName}} + namespace: {{.TestNamespace}} + annotations: + prometheus.io/scrape: "true" +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + type: {{.MonitoredAppName}} +` + + azureManagedPrometheusAuthTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: azure-managed-prometheus-trigger-auth + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: {{.PodIdentityProvider}} +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 3 + cooldownPeriod: 1 + triggers: + - type: prometheus + metadata: + serverAddress: {{.PrometheusQueryEndpoint}} + metricName: http_requests_total + threshold: '20' + activationThreshold: '20' + query: sum(rate(http_requests_total{app="{{.MonitoredAppName}}"}[2m])) + authenticationRef: + name: azure-managed-prometheus-trigger-auth +` + + generateLowLevelLoadJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: generate-low-level-requests-job + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - image: quay.io/zroubalik/hey + name: test + command: ["/bin/sh"] + args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 30 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 + ` + + generateLoadJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: generate-requests-job + namespace: {{.TestNamespace}} +spec: + template: + spec: + containers: + - image: quay.io/zroubalik/hey + name: test + command: ["/bin/sh"] + args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 80 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + activeDeadlineSeconds: 100 + backoffLimit: 2 +` +) + +func TestAzureManagedPrometheusScaler(t *testing.T, data TemplateData) { + require.NotEmpty(t, PrometheusQueryEndpoint, "TF_AZURE_MANAGED_PROMETHEUS_QUERY_ENDPOINT env variable is required for azure managed prometheus tests") + + kc := helper.GetKubernetesClient(t) + + // Create kubernetes resources for testing + helper.CreateNamespace(t, kc, data.TestNamespace) + + templates := getTemplates() + helper.KubectlApplyMultipleWithTemplate(t, data, templates) + assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, data.MonitoredAppName, data.TestNamespace, 1, 60, 3), + "replica count should be %d after 3 minutes", MinReplicaCount) + assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, data.DeploymentName, data.TestNamespace, MinReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", MinReplicaCount) + + testActivation(t, kc, data) + testScaleOut(t, kc, data) + testScaleIn(t, kc, data) + + // cleanup + helper.KubectlDeleteMultipleWithTemplate(t, data, templates) + helper.DeleteNamespace(t, kc, data.TestNamespace) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data TemplateData) { + t.Log("--- testing activation ---") + helper.KubectlApplyWithTemplate(t, data, "generateLowLevelLoadJobTemplate", generateLowLevelLoadJobTemplate) + + helper.AssertReplicaCountNotChangeDuringTimePeriod(t, kc, data.DeploymentName, data.TestNamespace, MinReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data TemplateData) { + t.Log("--- testing scale out ---") + helper.KubectlApplyWithTemplate(t, data, "generateLoadJobTemplate", generateLoadJobTemplate) + + assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, data.DeploymentName, data.TestNamespace, MaxReplicaCount, 144, 5), + "replica count should be %d after 12 minutes", MaxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data TemplateData) { + t.Log("--- testing scale in ---") + assert.True(t, helper.WaitForDeploymentReplicaReadyCount(t, kc, data.DeploymentName, data.TestNamespace, MinReplicaCount, 144, 5), + "replica count should be %d after 12 minutes", MinReplicaCount) +} + +func getTemplates() []helper.Template { + return []helper.Template{ + {Name: "monitoredAppDeploymentTemplate", Config: monitoredAppDeploymentTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "monitoredAppServiceTemplate", Config: monitoredAppServiceTemplate}, + {Name: "azureManagedPrometheusAuthTemplate", Config: azureManagedPrometheusAuthTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/utils/cleanup_test.go b/tests/utils/cleanup_test.go index 219dc2159c9..14766cb886e 100644 --- a/tests/utils/cleanup_test.go +++ b/tests/utils/cleanup_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" . "github.com/kedacore/keda/v2/tests/helper" + "github.com/kedacore/keda/v2/tests/utils/helper" ) func TestRemoveKEDA(t *testing.T) { @@ -84,3 +85,7 @@ func TestRemoveCertManager(t *testing.T) { DeleteNamespace(t, KubeClient, CertManagerNamespace) } + +func TestRemoveAzureManagedPrometheusComponents(t *testing.T) { + KubectlDeleteWithTemplate(t, helper.EmptyTemplateData{}, "azureManagedPrometheusConfigMapTemplate", helper.AzureManagedPrometheusConfigMapTemplate) +} diff --git a/tests/utils/helper/helper.go b/tests/utils/helper/helper.go new file mode 100644 index 00000000000..29f4498927f --- /dev/null +++ b/tests/utils/helper/helper.go @@ -0,0 +1,49 @@ +package helper + +type EmptyTemplateData struct{} + +const ( + AzureManagedPrometheusConfigMapTemplate = `apiVersion: v1 +kind: ConfigMap +metadata: + name: ama-metrics-prometheus-config + namespace: kube-system +data: + prometheus-config: |- + global: + evaluation_interval: 1m + scrape_interval: 1m + scrape_timeout: 10s + scrape_configs: + - job_name: kubernetes-pods + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_pod_annotation_prometheus_io_port + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: kubernetes_namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: kubernetes_pod_name +` +) diff --git a/tests/utils/setup_test.go b/tests/utils/setup_test.go index 1d4da933756..8dd0ac42749 100644 --- a/tests/utils/setup_test.go +++ b/tests/utils/setup_test.go @@ -15,6 +15,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" . "github.com/kedacore/keda/v2/tests/helper" + "github.com/kedacore/keda/v2/tests/utils/helper" ) func TestVerifyCommands(t *testing.T) { @@ -55,6 +56,12 @@ func TestSetupHelm(t *testing.T) { require.NoErrorf(t, err, "cannot get helm version - %s", err) } +// doing early in the sequence of tests so that config map update has time to be effective before the azure tests get executed. +func TestSetupAzureManagedPrometheusComponents(t *testing.T) { + // this will install config map in kube-system namespace, as needed by azure manage prometheus collector agent + KubectlApplyWithTemplate(t, helper.EmptyTemplateData{}, "azureManagedPrometheusConfigMapTemplate", helper.AzureManagedPrometheusConfigMapTemplate) +} + func TestSetupWorkloadIdentityComponents(t *testing.T) { if AzureRunWorkloadIdentityTests == "" || AzureRunWorkloadIdentityTests == StringFalse { t.Skip("skipping as workload identity tests are disabled")