-
Notifications
You must be signed in to change notification settings - Fork 506
/
values.yaml
573 lines (520 loc) · 18.1 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
# Default values for opentelemetry-collector.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
# Valid values are "daemonset", "deployment", and "statefulset".
mode: ""
# Specify which namespace should be used to deploy the resources into
namespaceOverride: ""
# Handles basic configuration of components that
# also require k8s modifications to work correctly.
# .Values.config can be used to modify/add to a preset
# component configuration, but CANNOT be used to remove
# preset configuration. If you require removal of any
# sections of a preset configuration, you cannot use
# the preset. Instead, configure the component manually in
# .Values.config and use the other fields supplied in the
# values.yaml to configure k8s as necessary.
presets:
# Configures the collector to collect logs.
# Adds the filelog receiver to the logs pipeline
# and adds the necessary volumes and volume mounts.
# Best used with mode = daemonset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver for details on the receiver.
logsCollection:
enabled: false
includeCollectorLogs: false
# Enabling this writes checkpoints in /var/lib/otelcol/ host directory.
# Note this changes collector's user to root, so that it can write to host directory.
storeCheckpoints: false
# The maximum bytes size of the recombined field.
# Once the size exceeds the limit, all received entries of the source will be combined and flushed.
maxRecombineLogSize: 102400
# Configures the collector to collect host metrics.
# Adds the hostmetrics receiver to the metrics pipeline
# and adds the necessary volumes and volume mounts.
# Best used with mode = daemonset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver for details on the receiver.
hostMetrics:
enabled: false
# Configures the Kubernetes Processor to add Kubernetes metadata.
# Adds the k8sattributes processor to all the pipelines
# and adds the necessary rules to ClusteRole.
# Best used with mode = daemonset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor for details on the receiver.
kubernetesAttributes:
enabled: false
# When enabled the processor will extra all labels for an associated pod and add them as resource attributes.
# The label's exact name will be the key.
extractAllPodLabels: false
# When enabled the processor will extra all annotations for an associated pod and add them as resource attributes.
# The annotation's exact name will be the key.
extractAllPodAnnotations: false
# Configures the collector to collect node, pod, and container metrics from the API server on a kubelet..
# Adds the kubeletstats receiver to the metrics pipeline
# and adds the necessary rules to ClusteRole.
# Best used with mode = daemonset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver for details on the receiver.
kubeletMetrics:
enabled: false
# Configures the collector to collect kubernetes events.
# Adds the k8sobject receiver to the logs pipeline
# and collects kubernetes events by default.
# Best used with mode = deployment or statefulset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver for details on the receiver.
kubernetesEvents:
enabled: false
# Configures the Kubernetes Cluster Receiver to collect cluster-level metrics.
# Adds the k8s_cluster receiver to the metrics pipeline
# and adds the necessary rules to ClusteRole.
# Best used with mode = deployment or statefulset.
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver for details on the receiver.
clusterMetrics:
enabled: false
configMap:
# Specifies whether a configMap should be created (true by default)
create: true
# Base collector configuration.
# Supports templating. To escape existing instances of {{ }}, use {{` <original content> `}}.
# For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}.
config:
exporters:
debug: {}
# Will be removed in a future release.
# Use the debug exporter instead.
logging: {}
extensions:
# The health_check extension is mandatory for this chart.
# Without the health_check extension the collector will fail the readiness and liveliness probes.
# The health_check extension can be modified, but should never be removed.
health_check: {}
memory_ballast: {}
processors:
batch: {}
# If set to null, will be overridden with values based on k8s resource limits
memory_limiter: null
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888
extensions:
- health_check
- memory_ballast
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
image:
# If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`.
repository: otel/opentelemetry-collector-contrib
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value).
digest: ""
imagePullSecrets: []
# OpenTelemetry Collector executable
command:
name: otelcol-contrib
extraArgs: []
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
clusterRole:
# Specifies whether a clusterRole should be created
# Some presets also trigger the creation of a cluster role and cluster role binding.
# If using one of those presets, this field is no-op.
create: false
# Annotations to add to the clusterRole
# Can be used in combination with presets that create a cluster role.
annotations: {}
# The name of the clusterRole to use.
# If not set a name is generated using the fullname template
# Can be used in combination with presets that create a cluster role.
name: ""
# A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/
# Can be used in combination with presets that create a cluster role to add additional rules.
rules: []
# - apiGroups:
# - ''
# resources:
# - 'pods'
# - 'nodes'
# verbs:
# - 'get'
# - 'list'
# - 'watch'
clusterRoleBinding:
# Annotations to add to the clusterRoleBinding
# Can be used in combination with presets that create a cluster role binding.
annotations: {}
# The name of the clusterRoleBinding to use.
# If not set a name is generated using the fullname template
# Can be used in combination with presets that create a cluster role binding.
name: ""
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
# Allows for pod scheduler prioritisation
priorityClassName: ""
extraEnvs: []
extraEnvsFrom: []
extraVolumes: []
extraVolumeMounts: []
# Configuration for ports
# nodePort is also allowed
ports:
otlp:
enabled: true
containerPort: 4317
servicePort: 4317
hostPort: 4317
protocol: TCP
# nodePort: 30317
appProtocol: grpc
otlp-http:
enabled: true
containerPort: 4318
servicePort: 4318
hostPort: 4318
protocol: TCP
jaeger-compact:
enabled: true
containerPort: 6831
servicePort: 6831
hostPort: 6831
protocol: UDP
jaeger-thrift:
enabled: true
containerPort: 14268
servicePort: 14268
hostPort: 14268
protocol: TCP
jaeger-grpc:
enabled: true
containerPort: 14250
servicePort: 14250
hostPort: 14250
protocol: TCP
zipkin:
enabled: true
containerPort: 9411
servicePort: 9411
hostPort: 9411
protocol: TCP
metrics:
# The metrics port is disabled by default. However you need to enable the port
# in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled).
enabled: false
containerPort: 8888
servicePort: 8888
protocol: TCP
# Resource limits & requests. Update according to your own use case as these values might be too low for a typical deployment.
resources: {}
# resources:
# limits:
# cpu: 250m
# memory: 512Mi
podAnnotations: {}
podLabels: {}
# Host networking requested for this pod. Use the host's network namespace.
hostNetwork: false
# Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None
dnsPolicy: ""
# Custom DNS config. Required when DNS policy is None.
dnsConfig: {}
# only used with deployment mode
replicaCount: 1
# only used with deployment mode
revisionHistoryLimit: 10
annotations: {}
# List of extra sidecars to add
extraContainers: []
# extraContainers:
# - name: test
# command:
# - cp
# args:
# - /bin/sleep
# - /test/sleep
# image: busybox:latest
# volumeMounts:
# - name: test
# mountPath: /test
# List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook.
# Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container.
initContainers: []
# initContainers:
# - name: test
# image: busybox:latest
# command:
# - cp
# args:
# - /bin/sleep
# - /test/sleep
# volumeMounts:
# - name: test
# mountPath: /test
# - name: init-fs
# image: busybox:latest
# command:
# - sh
# - '-c'
# - 'chown -R 10001: /var/lib/storage/otc' # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
# volumeMounts:
# - name: opentelemetry-collector-data # use the name of the volume used for persistence
# mountPath: /var/lib/storage/otc # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
# Pod lifecycle policies.
lifecycleHooks: {}
# lifecycleHooks:
# preStop:
# exec:
# command:
# - /test/sleep
# - "5"
# liveness probe configuration
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe:
# Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
# initialDelaySeconds: 1
# How often in seconds to perform the probe.
# periodSeconds: 10
# Number of seconds after which the probe times out.
# timeoutSeconds: 1
# Minimum consecutive failures for the probe to be considered failed after having succeeded.
# failureThreshold: 1
# Duration in seconds the pod needs to terminate gracefully upon probe failure.
# terminationGracePeriodSeconds: 10
httpGet:
port: 13133
path: /
# readiness probe configuration
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
readinessProbe:
# Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
# initialDelaySeconds: 1
# How often (in seconds) to perform the probe.
# periodSeconds: 10
# Number of seconds after which the probe times out.
# timeoutSeconds: 1
# Minimum consecutive successes for the probe to be considered successful after having failed.
# successThreshold: 1
# Minimum consecutive failures for the probe to be considered failed after having succeeded.
# failureThreshold: 1
httpGet:
port: 13133
path: /
service:
# Enable the creation of a Service.
# By default, it's enabled on mode != daemonset.
# However, to enable it on mode = daemonset, its creation must be explicitly enabled
# enabled: true
type: ClusterIP
# type: LoadBalancer
# loadBalancerIP: 1.2.3.4
# loadBalancerSourceRanges: []
# By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster'
# unless other value is explicitly set.
# Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)
# externalTrafficPolicy: Cluster
annotations: {}
# By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset
# unless other value is explicitly set.
# Setting 'internalTrafficPolicy: Cluster' on a daemonset is not recommended
# internalTrafficPolicy: Cluster
ingress:
enabled: false
# annotations: {}
# ingressClassName: nginx
# hosts:
# - host: collector.example.com
# paths:
# - path: /
# pathType: Prefix
# port: 4318
# tls:
# - secretName: collector-tls
# hosts:
# - collector.example.com
# Additional ingresses - only created if ingress.enabled is true
# Useful for when differently annotated ingress services are required
# Each additional ingress needs key "name" set to something unique
additionalIngresses: []
# - name: cloudwatch
# ingressClassName: nginx
# annotations: {}
# hosts:
# - host: collector.example.com
# paths:
# - path: /
# pathType: Prefix
# port: 4318
# tls:
# - secretName: collector-tls
# hosts:
# - collector.example.com
podMonitor:
# The pod monitor by default scrapes the metrics port.
# The metrics port needs to be enabled as well.
enabled: false
metricsEndpoints:
- port: metrics
# interval: 15s
# additional labels for the PodMonitor
extraLabels: {}
# release: kube-prometheus-stack
serviceMonitor:
# The service monitor by default scrapes the metrics port.
# The metrics port needs to be enabled as well.
enabled: false
metricsEndpoints:
- port: metrics
# interval: 15s
# additional labels for the ServiceMonitor
extraLabels: {}
# release: kube-prometheus-stack
# PodDisruptionBudget is used only if deployment enabled
podDisruptionBudget:
enabled: false
# minAvailable: 2
# maxUnavailable: 1
# autoscaling is used only if deployment enabled
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 10
behavior: {}
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
rollout:
rollingUpdate: {}
# When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports
# maxSurge: 25%
# maxUnavailable: 0
strategy: RollingUpdate
prometheusRule:
enabled: false
groups: []
# Create default rules for monitoring the collector
defaultRules:
enabled: false
# additional labels for the PrometheusRule
extraLabels: {}
statefulset:
# volumeClaimTemplates for a statefulset
volumeClaimTemplates: []
podManagementPolicy: "Parallel"
networkPolicy:
enabled: false
# Annotations to add to the NetworkPolicy
annotations: {}
# Configure the 'from' clause of the NetworkPolicy.
# By default this will restrict traffic to ports enabled for the Collector. If
# you wish to further restrict traffic to other hosts or specific namespaces,
# see the standard NetworkPolicy 'spec.ingress.from' definition for more info:
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
allowIngressFrom: []
# # Allow traffic from any pod in any namespace, but not external hosts
# - namespaceSelector: {}
# # Allow external access from a specific cidr block
# - ipBlock:
# cidr: 192.168.1.64/32
# # Allow access from pods in specific namespaces
# - namespaceSelector:
# matchExpressions:
# - key: kubernetes.io/metadata.name
# operator: In
# values:
# - "cats"
# - "dogs"
# Add additional ingress rules to specific ports
# Useful to allow external hosts/services to access specific ports
# An example is allowing an external prometheus server to scrape metrics
#
# See the standard NetworkPolicy 'spec.ingress' definition for more info:
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
extraIngressRules: []
# - ports:
# - port: metrics
# protocol: TCP
# from:
# - ipBlock:
# cidr: 192.168.1.64/32
# Restrict egress traffic from the OpenTelemetry collector pod
# See the standard NetworkPolicy 'spec.egress' definition for more info:
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
egressRules: []
# - to:
# - namespaceSelector: {}
# - ipBlock:
# cidr: 192.168.10.10/24
# ports:
# - port: 1234
# protocol: TCP
# When enabled, the chart will set the GOMEMLIMIT env var to 80% of the configured
# resources.limits.memory and remove the memory ballast extension.
# If no resources.limits.memory are defined enabling does nothing.
# In a future release this setting will be enabled by default.
# See https://github.com/open-telemetry/opentelemetry-helm-charts/issues/891
# for more details.
useGOMEMLIMIT: false