Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Merge pull request #541 from kinvolk/surajssd/grafana-add-google-oauth
Browse files Browse the repository at this point in the history
grafana: Add secret_env variable
  • Loading branch information
surajssd authored Jun 18, 2020
2 parents 0cbadba + 3208834 commit b87dcd5
Show file tree
Hide file tree
Showing 7 changed files with 134 additions and 9 deletions.
6 changes: 6 additions & 0 deletions ci/aks/aks-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,12 @@ component "prometheus-operator" {
"k8s-app" = "kube-dns",
}
}

grafana {
secret_env = {
"LOKOMOTIVE_VERY_SECRET_PASSWORD" = "VERY_VERY_SECRET"
}
}
}

component "contour" {
Expand Down
8 changes: 7 additions & 1 deletion ci/aws/aws-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,13 @@ component "openebs-storage-class" {
}
}

component "prometheus-operator" {}
component "prometheus-operator" {
grafana {
secret_env = {
"LOKOMOTIVE_VERY_SECRET_PASSWORD" = "VERY_VERY_SECRET"
}
}
}

component "contour" {
ingress_hosts = ["dex.$CLUSTER_ID.$AWS_DNS_ZONE", "gangway.$CLUSTER_ID.$AWS_DNS_ZONE"]
Expand Down
8 changes: 7 additions & 1 deletion ci/packet/packet-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,13 @@ component "openebs-storage-class" {
}
}

component "prometheus-operator" {}
component "prometheus-operator" {
grafana {
secret_env = {
"LOKOMOTIVE_VERY_SECRET_PASSWORD" = "VERY_VERY_SECRET"
}
}
}

component "contour" {
ingress_hosts = ["dex.$CLUSTER_ID.$AWS_DNS_ZONE", "gangway.$CLUSTER_ID.$AWS_DNS_ZONE"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ component "prometheus-operator" {
grafana {
admin_password = "foobar"
secret_env = { # This might contain sensitive information, declare a variable and define this in `lokocfg.vars`.
"KEY" = "VERY_SECRET"
}
ingress {
host = "grafana.mydomain.net"
class = "contour"
Expand Down Expand Up @@ -116,6 +119,7 @@ Example:
|-------- |--------------|:-------:|:--------:|
| `namespace` | Namespace to deploy the Prometheus Operator. | `monitoring` | false |
| `grafana.admin_password` | Password for `admin` user in Grafana. If not provided it is auto generated and stored in secret `prometheus-operator-grafana`. | - | false |
| `grafana.secret_env` | Sensitive environment variables passed to Grafana pod and stored as secret. Read more on manipulating `grafana.ini` using env var [here](https://grafana.com/docs/grafana/latest/installation/configuration/#configure-with-environment-variables). | - | false |
| `grafana.ingress.host` | Ingress URL host to expose Grafana over the internet. **NOTE:** When running on Packet, a DNS entry pointing at the ingress controller needs to be created. | - | true |
| `grafana.ingress.class` | Ingress class to use for Grafana ingress. | `contour` | false |
| `grafana.ingress.certmanager_cluster_issuer` | `ClusterIssuer` to be used by cert-manager while issuing TLS certificates. Supported values: `letsencrypt-production`, `letsencrypt-staging`. | `letsencrypt-production` | false |
Expand Down
5 changes: 3 additions & 2 deletions pkg/components/prometheus-operator/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,9 @@ type CoreDNS struct {

// Grafana object collects sub component grafana related information.
type Grafana struct {
AdminPassword string `hcl:"admin_password,optional"`
Ingress *types.Ingress `hcl:"ingress,block"`
AdminPassword string `hcl:"admin_password,optional"`
SecretEnv map[string]string `hcl:"secret_env,optional"`
Ingress *types.Ingress `hcl:"ingress,block"`
}

type component struct {
Expand Down
6 changes: 6 additions & 0 deletions pkg/components/prometheus-operator/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ grafana:
rbac:
pspUseAppArmor: false
adminPassword: {{.Grafana.AdminPassword}}
{{- if .Grafana.SecretEnv }}
envRenderSecret:
{{ range $key, $value := .Grafana.SecretEnv }}
{{ $key }}: {{ $value }}
{{ end }}
{{- end }}
{{ if .Grafana.Ingress }}
ingress:
enabled: true
Expand Down
106 changes: 101 additions & 5 deletions test/components/prometheus-operator/prometheus_operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,38 @@
package prometheusoperator

import (
"bytes"
"context"
"fmt"
"strings"
"testing"
"time"

v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"

testutil "github.com/kinvolk/lokomotive/test/components/util"
)

const (
retryInterval = time.Second * 5
timeout = time.Minute * 10
retryInterval = time.Second * 5
timeout = time.Minute * 10
namespace = "monitoring"
grafanaDeployment = "prometheus-operator-grafana"
)

func TestPrometheusOperatorDeployment(t *testing.T) {
namespace := "monitoring"

client := testutil.CreateKubeClient(t)

deployments := []string{
"prometheus-operator-operator",
"prometheus-operator-kube-state-metrics",
"prometheus-operator-grafana",
grafanaDeployment,
}

for _, deployment := range deployments {
Expand Down Expand Up @@ -67,3 +78,88 @@ func TestPrometheusOperatorDeployment(t *testing.T) {

testutil.WaitForDaemonSet(t, client, namespace, "prometheus-operator-prometheus-node-exporter", retryInterval, timeout)
}

// nolint:funlen
func TestGrafanaLoadsEnvVars(t *testing.T) {
kubeconfig := testutil.KubeconfigPath(t)

t.Logf("using KUBECONFIG=%s", kubeconfig)

config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
t.Fatalf("failed building rest client: %v", err)
}

client, err := kubernetes.NewForConfig(config)
if err != nil {
t.Fatalf("failed creating new clientset: %v", err)
}

// We will wait until the Grafana Pods are up and running so we don't have to reimplement wait logic again.
testutil.WaitForDeployment(t, client, namespace, grafanaDeployment, retryInterval, timeout)

// Get grafana deployment object so that we can get the corresponding pod.
deploy, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), grafanaDeployment, metav1.GetOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
t.Fatalf("deployment %s not found", grafanaDeployment)
}

t.Fatalf("error looking up for deployment %s: %v", grafanaDeployment, err)
}

podList, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(deploy.Spec.Selector),
})
if err != nil {
t.Fatalf("could not list pods for the deployment %q: %v", grafanaDeployment, err)
}

if len(podList.Items) == 0 {
t.Fatalf("grafana pods not found")
}

// Exec into the pod.
pod := podList.Items[0]
containerName := "grafana"
searchEnvVar := "LOKOMOTIVE_VERY_SECRET_PASSWORD"

req := client.CoreV1().RESTClient().Post().
Resource("pods").
Name(pod.Name).
Namespace(namespace).
SubResource("exec").Param("container", containerName)

req.VersionedParams(&v1.PodExecOptions{
Command: []string{"env"},
Stdin: false,
Stdout: true,
Stderr: true,
TTY: true,
Container: containerName,
}, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
t.Fatalf("could not exec: %v", err)
}

var stdout, stderr bytes.Buffer

if err = exec.Stream(remotecommand.StreamOptions{
Stdout: &stdout,
Stderr: &stderr,
}); err != nil {
t.Fatalf("exec stream failed: %v", err)
}

containerErr := strings.TrimSpace(stderr.String())
if len(containerErr) > 0 {
t.Fatalf("error from container: %v", containerErr)
}

containerOutput := strings.TrimSpace(stdout.String())
if !strings.Contains(containerOutput, searchEnvVar) {
t.Fatalf("required env var %q not found in following env vars:\n\n%s\n", searchEnvVar, containerOutput)
}
}

0 comments on commit b87dcd5

Please sign in to comment.