Skip to content

Commit

Permalink
NETOBSERV-1343: generate dashboards from metrics API
Browse files Browse the repository at this point in the history
- Add dashboard config to metrics API
- Use this API internally for predefined dashboards
- Allow using SingleStats
- New dedicated buckets for latency histograms
  • Loading branch information
jotak committed Apr 3, 2024
1 parent 1888d8a commit 7d96d0f
Show file tree
Hide file tree
Showing 13 changed files with 715 additions and 543 deletions.
37 changes: 37 additions & 0 deletions apis/flowmetrics/v1alpha1/flowmetric_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,43 @@ type FlowMetricSpec struct {
// A list of buckets to use when `type` is "Histogram". The list must be parseable as floats. Prometheus default buckets will be used if unset.
// +optional
Buckets []string `json:"buckets,omitempty"`

// When non-zero, scale factor (divider) of the value. Metric value = Flow value / Divider.
// +optional
Divider float64 `json:"divider"`

// Charts configuration
// +optional
Charts []Chart `json:"charts,omitempty"`
}

type Unit string
type ChartType string

const (
UnitBytes Unit = "bytes"
UnitSeconds Unit = "seconds"
UnitBPS Unit = "Bps"
UnitPPS Unit = "pps"
ChartTypeSingleStat ChartType = "SingleStat"
ChartTypeLine ChartType = "Line"
ChartTypeStackArea ChartType = "StackArea"
)

// Configures charts / dashboard generation associated to a metric
type Chart struct {
DashboardName string `json:"dashboardName"`
SectionName string `json:"sectionName"`
Title string `json:"title"`
Unit Unit `json:"unit"`
Type ChartType `json:"type"`
Queries []Query `json:"queries"`
}

// Configures PromQL queries
type Query struct {
PromQL string `json:"promQL"`
Legend string `json:"legend"`
}

// FlowMetricStatus defines the observed state of FlowMetric
Expand Down
35 changes: 14 additions & 21 deletions controllers/flp/flp_pipeline_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,23 +165,23 @@ func (b *PipelineBuilder) AddProcessorStages() error {
}

// obtain encode_prometheus stage from metrics_definitions
names := metrics.GetIncludeList(b.desired)
promMetrics := metrics.GetDefinitions(names)
allMetrics := metrics.MergePredefined(b.flowMetrics.Items, b.desired)

for i := range b.flowMetrics.Items {
fm := &b.flowMetrics.Items[i]
var flpMetrics []api.MetricsItem
for i := range allMetrics {
fm := &allMetrics[i]
m, err := flowMetricToFLP(&fm.Spec)
if err != nil {
return fmt.Errorf("error reading FlowMetric definition '%s': %w", fm.Name, err)
}
promMetrics = append(promMetrics, *m)
flpMetrics = append(flpMetrics, *m)
}

if len(promMetrics) > 0 {
if len(flpMetrics) > 0 {
// prometheus stage (encode) configuration
promEncode := api.PromEncode{
Prefix: "netobserv_",
Metrics: promMetrics,
Metrics: flpMetrics,
}
enrichedStage.EncodePrometheus("prometheus", promEncode)
}
Expand All @@ -192,23 +192,16 @@ func (b *PipelineBuilder) AddProcessorStages() error {

func flowMetricToFLP(flowMetric *metricslatest.FlowMetricSpec) (*api.MetricsItem, error) {
m := &api.MetricsItem{
Name: flowMetric.MetricName,
Type: api.MetricEncodeOperationEnum(strings.ToLower(string(flowMetric.Type))),
Filters: []api.MetricsFilter{},
Labels: flowMetric.Labels,
ValueKey: flowMetric.ValueField,
Name: flowMetric.MetricName,
Type: api.MetricEncodeOperationEnum(strings.ToLower(string(flowMetric.Type))),
Filters: []api.MetricsFilter{},
Labels: flowMetric.Labels,
ValueKey: flowMetric.ValueField,
ValueScale: flowMetric.Divider,
}
for _, f := range flowMetric.Filters {
for _, f := range metrics.GetFilters(flowMetric) {
m.Filters = append(m.Filters, api.MetricsFilter{Key: f.Field, Value: f.Value, Type: api.MetricFilterEnum(conversion.PascalToLower(string(f.MatchType), '_'))})
}
if !flowMetric.IncludeDuplicates {
m.Filters = append(m.Filters, api.MetricsFilter{Key: "Duplicate", Value: "true", Type: api.MetricFilterNotEqual})
}
if flowMetric.Direction == metricslatest.Egress {
m.Filters = append(m.Filters, api.MetricsFilter{Key: "FlowDirection", Value: "1|2", Type: api.MetricFilterRegex})
} else if flowMetric.Direction == metricslatest.Ingress {
m.Filters = append(m.Filters, api.MetricsFilter{Key: "FlowDirection", Value: "0|2", Type: api.MetricFilterRegex})
}
for _, b := range flowMetric.Buckets {
f, err := strconv.ParseFloat(b, 64)
if err != nil {
Expand Down
30 changes: 26 additions & 4 deletions controllers/monitoring/monitoring_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,13 @@ import (
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2"
metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1"
"github.com/netobserv/network-observability-operator/controllers/constants"
"github.com/netobserv/network-observability-operator/controllers/reconcilers"
"github.com/netobserv/network-observability-operator/pkg/helper"
"github.com/netobserv/network-observability-operator/pkg/manager"
Expand All @@ -21,8 +25,9 @@ import (

type Reconciler struct {
client.Client
mgr *manager.Manager
status status.Instance
mgr *manager.Manager
status status.Instance
currentNamespace string
}

func Start(ctx context.Context, mgr *manager.Manager) error {
Expand All @@ -37,6 +42,15 @@ func Start(ctx context.Context, mgr *manager.Manager) error {
For(&flowslatest.FlowCollector{}, reconcilers.IgnoreStatusChange).
Named("monitoring").
Owns(&corev1.Namespace{}).
Watches(
&metricslatest.FlowMetric{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request {
if o.GetNamespace() == r.currentNamespace {
return []reconcile.Request{{NamespacedName: constants.FlowCollectorName}}
}
return []reconcile.Request{}
}),
).
Complete(&r)
}

Expand Down Expand Up @@ -74,6 +88,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result

func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, desired *flowslatest.FlowCollector) error {
ns := helper.GetNamespace(&desired.Spec)
r.currentNamespace = ns

// If namespace does not exist, we create it
nsExist, err := r.namespaceExist(ctx, ns)
Expand Down Expand Up @@ -104,8 +119,14 @@ func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, desired
}

if r.mgr.HasSvcMonitor() {
names := metrics.GetIncludeList(&desired.Spec)
desiredFlowDashboardCM, del, err := buildFlowMetricsDashboard(ns, names)
// List custom metrics
fm := metricslatest.FlowMetricList{}
if err := r.Client.List(ctx, &fm, &client.ListOptions{Namespace: ns}); err != nil {
return r.status.Error("CantListFlowMetrics", err)
}

allMetrics := metrics.MergePredefined(fm.Items, &desired.Spec)
desiredFlowDashboardCM, del, err := buildFlowMetricsDashboard(allMetrics)
if err != nil {
return err
} else if err = reconcilers.ReconcileConfigMap(ctx, clh, desiredFlowDashboardCM, del); err != nil {
Expand All @@ -119,6 +140,7 @@ func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, desired
return err
}
}

return nil
}

Expand Down
8 changes: 3 additions & 5 deletions controllers/monitoring/monitoring_objects.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package monitoring

import (
metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1"
"github.com/netobserv/network-observability-operator/controllers/constants"
"github.com/netobserv/network-observability-operator/pkg/dashboards"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -74,11 +75,8 @@ func buildRoleBindingMonitoringReader(ns string) *rbacv1.ClusterRoleBinding {
}
}

func buildFlowMetricsDashboard(namespace string, metrics []string) (*corev1.ConfigMap, bool, error) {
dashboard, err := dashboards.CreateFlowMetricsDashboard(namespace, metrics)
if err != nil {
return nil, false, err
}
func buildFlowMetricsDashboard(metrics []metricslatest.FlowMetric) (*corev1.ConfigMap, bool, error) {
dashboard := dashboards.CreateFlowMetricsDashboards(metrics)

configMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Expand Down
Loading

0 comments on commit 7d96d0f

Please sign in to comment.