-
Notifications
You must be signed in to change notification settings - Fork 0
/
operator-openshift.yaml
764 lines (704 loc) · 30.2 KB
/
operator-openshift.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
#################################################################################################################
# Create the rook operator and necessary security context constraints for running
# Rook in an OpenShift cluster.
# For example, to create the rook-ceph cluster:
# oc create -f crds.yaml -f common.yaml -f operator-openshift.yaml
# oc create -f cluster.yaml
#################################################################################################################
# scc for the Rook and Ceph daemons
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph
allowPrivilegedContainer: true
allowHostDirVolumePlugin: true
allowHostPID: false
# set to true if running rook with host networking enabled
allowHostNetwork: false
# set to true if running rook with the provider as host
allowHostPorts: false
priority:
allowedCapabilities: ["MKNOD"]
allowHostIPC: true
readOnlyRootFilesystem: false
# drop all default privileges
requiredDropCapabilities: ["All"]
defaultAddCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
fsGroup:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- persistentVolumeClaim
- projected
- secret
users:
# A user needs to be added for each rook service account.
# This assumes running in the default sample "rook-ceph" namespace.
# If other namespaces or service accounts are configured, they need to be updated here.
- system:serviceaccount:rook-ceph:rook-ceph-system # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-ceph-default # serviceaccount:namespace:cluster
- system:serviceaccount:rook-ceph:rook-ceph-mgr # serviceaccount:namespace:cluster
- system:serviceaccount:rook-ceph:rook-ceph-osd # serviceaccount:namespace:cluster
- system:serviceaccount:rook-ceph:rook-ceph-rgw # serviceaccount:namespace:cluster
---
# scc for the CSI driver
kind: SecurityContextConstraints
apiVersion: security.openshift.io/v1
metadata:
name: rook-ceph-csi
# To allow running privilegedContainers
allowPrivilegedContainer: true
# CSI daemonset pod needs hostnetworking
allowHostNetwork: true
# This need to be set to true as we use HostPath
allowHostDirVolumePlugin: true
priority:
# SYS_ADMIN is needed for rbd to execute rbd map command
allowedCapabilities: ["SYS_ADMIN"]
# Needed as we run liveness container on daemonset pods
allowHostPorts: true
# Needed as we are setting this in RBD plugin pod
allowHostPID: true
# Required for encryption
allowHostIPC: true
# Set to false as we write to RootFilesystem inside csi containers
readOnlyRootFilesystem: false
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
# The type of volumes which are mounted to csi pods
volumes:
- configMap
- projected
- emptyDir
- hostPath
users:
# A user needs to be added for each rook service account.
# This assumes running in the default sample "rook-ceph" namespace.
# If other namespaces or service accounts are configured, they need to be updated here.
- system:serviceaccount:rook-ceph:rook-csi-rbd-plugin-sa # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-csi-rbd-provisioner-sa # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-csi-cephfs-plugin-sa # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-csi-nfs-plugin-sa # serviceaccount:namespace:operator
- system:serviceaccount:rook-ceph:rook-csi-nfs-provisioner-sa # serviceaccount:namespace:operator
---
# Rook Ceph Operator Config
# Use this ConfigMap to override operator configurations
# Precedence will be given to this config in case Env Var also exists for the same
#
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-ceph-operator-config
# should be in the namespace of the operator
namespace: rook-ceph # namespace:operator
data:
# The logging level for the operator: ERROR | WARNING | INFO | DEBUG
ROOK_LOG_LEVEL: "INFO"
# Allow using loop devices for osds in test clusters.
ROOK_CEPH_ALLOW_LOOP_DEVICES: "false"
# Enable the CSI driver.
# To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
ROOK_CSI_ENABLE_CEPHFS: "true"
# Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
ROOK_CSI_ENABLE_RBD: "true"
# Enable the CSI NFS driver. To start another version of the CSI driver, see image properties below.
ROOK_CSI_ENABLE_NFS: "false"
# Disable the CSI driver.
ROOK_CSI_DISABLE_DRIVER: "false"
# Set to true to enable Ceph CSI pvc encryption support.
CSI_ENABLE_ENCRYPTION: "false"
# set to true to enable adding volume metadata on the CephFS subvolume and RBD images.
# CSI_ENABLE_METADATA: "true"
# cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
# like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
# CSI_CLUSTER_NAME: "my-prod-cluster"
# Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance.
# CSI_ENABLE_HOST_NETWORK: "true"
# Deprecation note: Rook uses "holder" pods to allow CSI to connect to the multus public network
# without needing hosts to the network. Holder pods are being removed. See issue for details:
# https://github.com/rook/rook/issues/13055. New Rook deployments should set this to "true".
CSI_DISABLE_HOLDER_PODS: "true"
# Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
# CSI_LOG_LEVEL: "0"
# Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
# CSI_SIDECAR_LOG_LEVEL: "0"
# Set replicas for csi provisioner deployment.
CSI_PROVISIONER_REPLICAS: "2"
# OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled it willbe deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
# CSI_ENABLE_OMAP_GENERATOR: "true"
# set to false to disable deployment of snapshotter container in CephFS provisioner pod.
CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
# set to false to disable deployment of snapshotter container in NFS provisioner pod.
CSI_ENABLE_NFS_SNAPSHOTTER: "true"
# set to false to disable deployment of snapshotter container in RBD provisioner pod.
CSI_ENABLE_RBD_SNAPSHOTTER: "true"
# Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs
# If you disable the kernel client, your application may be disrupted during upgrade.
# See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
# (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_RBD_FSGROUPPOLICY: "File"
# (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_CEPHFS_FSGROUPPOLICY: "File"
# (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
CSI_NFS_FSGROUPPOLICY: "File"
# (Optional) Allow starting unsupported ceph-csi image
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
# (Optional) control the host mount of /etc/selinux for csi plugin pods.
CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
# ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.11.0"
# ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1"
# ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.10.1"
# ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v4.0.1"
# ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2"
# ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.5.1"
# (Optional) set user created priorityclassName for csi plugin pods.
CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
# (Optional) set user created priorityclassName for csi provisioner pods.
CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
# CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# Default value is 1.
# CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
# CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# Default value is 1.
# CSI_RBD_PLUGIN_UPDATE_STRATEGY_MAX_UNAVAILABLE: "1"
# CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
# Default value is RollingUpdate.
# CSI_NFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
# kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
# ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
# Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
# ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
# Labels to add to the CSI RBD Deployments and DaemonSets Pods.
# ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
# Labels to add to the CSI NFS Deployments and DaemonSets Pods.
# ROOK_CSI_NFS_POD_LABELS: "key1=value1,key2=value2"
# (Optional) CephCSI CephFS plugin Volumes
# CSI_CEPHFS_PLUGIN_VOLUME: |
# - name: lib-modules
# hostPath:
# path: /run/current-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# (Optional) CephCSI CephFS plugin Volume mounts
# CSI_CEPHFS_PLUGIN_VOLUME_MOUNT: |
# - name: host-nix
# mountPath: /nix
# readOnly: true
# (Optional) CephCSI RBD plugin Volumes
# CSI_RBD_PLUGIN_VOLUME: |
# - name: lib-modules
# hostPath:
# path: /run/current-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# (Optional) CephCSI RBD plugin Volume mounts
# CSI_RBD_PLUGIN_VOLUME_MOUNT: |
# - name: host-nix
# mountPath: /nix
# readOnly: true
# (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner).
# CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_PROVISIONER_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin).
# CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
# (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_PLUGIN_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_RBD_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/rbd
# operator: Exists
# (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
# (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_RBD_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/rbd
# operator: Exists
# (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
# (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/cephfs
# operator: Exists
# (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
# (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_CEPHFS_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/cephfs
# operator: Exists
# (Optional) CephCSI NFS provisioner NodeAffinity (overrides CSI_PROVISIONER_NODE_AFFINITY).
# CSI_NFS_PROVISIONER_NODE_AFFINITY: "role=nfs-node"
# (Optional) CephCSI NFS provisioner tolerations list (overrides CSI_PROVISIONER_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI provisioner would be best to start on the same nodes as other ceph daemons.
# CSI_NFS_PROVISIONER_TOLERATIONS: |
# - key: node.rook.io/nfs
# operator: Exists
# (Optional) CephCSI NFS plugin NodeAffinity (overrides CSI_PLUGIN_NODE_AFFINITY).
# CSI_NFS_PLUGIN_NODE_AFFINITY: "role=nfs-node"
# (Optional) CephCSI NFS plugin tolerations list (overrides CSI_PLUGIN_TOLERATIONS).
# Put here list of taints you want to tolerate in YAML format.
# CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# CSI_NFS_PLUGIN_TOLERATIONS: |
# - key: node.rook.io/nfs
# operator: Exists
# (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
#CSI_RBD_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-snapshotter
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : csi-omap-generator
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
#CSI_RBD_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# - name : csi-rbdplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
#CSI_CEPHFS_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-resizer
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-snapshotter
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
#CSI_CEPHFS_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# - name : csi-cephfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : liveness-prometheus
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# (Optional) CEPH CSI NFS provisioner resource requirement list, Put here list of resource
# requests and limits you want to apply for provisioner pod
# CSI_NFS_PROVISIONER_RESOURCE: |
# - name : csi-provisioner
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# - name : csi-nfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# - name : csi-attacher
# resource:
# requests:
# memory: 128Mi
# cpu: 100m
# limits:
# memory: 256Mi
# (Optional) CEPH CSI NFS plugin resource requirement list, Put here list of resource
# requests and limits you want to apply for plugin pod
# CSI_NFS_PLUGIN_RESOURCE: |
# - name : driver-registrar
# resource:
# requests:
# memory: 128Mi
# cpu: 50m
# limits:
# memory: 256Mi
# - name : csi-nfsplugin
# resource:
# requests:
# memory: 512Mi
# cpu: 250m
# limits:
# memory: 1Gi
# Configure CSI CephFS liveness metrics port
# Set to true to enable Ceph CSI liveness container.
CSI_ENABLE_LIVENESS: "false"
# CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
# Configure CSI RBD liveness metrics port
# CSI_RBD_LIVENESS_METRICS_PORT: "9080"
# CSIADDONS_PORT: "9070"
# Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
# CSI_CEPHFS_KERNEL_MOUNT_OPTIONS: "ms_mode=secure"
# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
# Custom prefix value for the OBC provisioner instead of ceph cluster namespace, do not set on existing cluster
# ROOK_OBC_PROVISIONER_NAME_PREFIX: "custom-prefix"
# Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
# This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
ROOK_ENABLE_DISCOVERY_DAEMON: "false"
# The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
# Enable the csi addons sidecar.
CSI_ENABLE_CSIADDONS: "false"
# Enable watch for faster recovery from rbd rwo node loss
ROOK_WATCH_FOR_NODE_FAILURE: "true"
# ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
# The GCSI RPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
# set to false to disable volume group snapshot feature. This feature is
# enabled by default as long as the necessary CRDs are available in the cluster.
CSI_ENABLE_VOLUME_GROUP_SNAPSHOT: "true"
# Enable topology based provisioning.
CSI_ENABLE_TOPOLOGY: "false"
# Domain labels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# CSI_TOPOLOGY_DOMAIN_LABELS: "kubernetes.io/hostname,topology.kubernetes.io/zone,topology.rook.io/rack"
# Whether to skip any attach operation altogether for CephCSI PVCs.
# See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the CephCSI PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes. for RBD PVC it can cause data corruption,
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false
# since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
CSI_CEPHFS_ATTACH_REQUIRED: "true"
CSI_RBD_ATTACH_REQUIRED: "true"
CSI_NFS_ATTACH_REQUIRED: "true"
# (Optional) Duration in seconds that non-leader candidates will wait to force acquire leadership. Default to 137 seconds.
# CSI_LEADER_ELECTION_LEASE_DURATION: "137s"
# (Optional) Deadline in seconds that the acting leader will retry refreshing leadership before giving up. Defaults to 107 seconds.
# CSI_LEADER_ELECTION_RENEW_DEADLINE: "107s"
# (Optional) Retry period in seconds the LeaderElector clients should wait between tries of actions. Defaults to 26 seconds.
# CSI_LEADER_ELECTION_RETRY_PERIOD: "26s"
# csi driver name prefix for cephfs, rbd and nfs. if not specified, default
# will be the namespace name where rook-ceph operator is deployed.
# search for `# csi-provisioner-name` in the storageclass and
# volumesnashotclass and update the name accordingly.
# CSI_DRIVER_NAME_PREFIX: "rook-ceph"
# Rook Discover toleration. Will tolerate all taints with all keys.
# (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
# DISCOVER_TOLERATIONS: |
# - effect: NoSchedule
# key: node-role.kubernetes.io/control-plane
# operator: Exists
# - effect: NoExecute
# key: node-role.kubernetes.io/etcd
# operator: Exists
# (Optional) Rook Discover priority class name to set on the pod(s)
# DISCOVER_PRIORITY_CLASS_NAME: "<PriorityClassName>"
# (Optional) Discover Agent NodeAffinity.
# DISCOVER_AGENT_NODE_AFFINITY: |
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: myKey
# operator: DoesNotExist
# (Optional) Discover Agent Pod Labels.
# DISCOVER_AGENT_POD_LABELS: "key1=value1,key2=value2"
# Disable automatic orchestration when new devices are discovered
ROOK_DISABLE_DEVICE_HOTPLUG: "false"
# The duration between discovering devices in the rook-discover daemonset.
ROOK_DISCOVER_DEVICES_INTERVAL: "60m"
# DISCOVER_DAEMON_RESOURCES: |
# - name: DISCOVER_DAEMON_RESOURCES
# resources:
# limits:
# memory: 512Mi
# requests:
# cpu: 200m
# memory: 128Mi
# (Optional) Burst to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_BURST: "10"
# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
---
# The deployment for the rook operator
# OLM: BEGIN OPERATOR DEPLOYMENT
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-operator
namespace: rook-ceph # namespace:operator
labels:
operator: rook
storage-backend: ceph
app.kubernetes.io/name: rook-ceph
app.kubernetes.io/instance: rook-ceph
app.kubernetes.io/component: rook-ceph-operator
app.kubernetes.io/part-of: rook-ceph-operator
spec:
selector:
matchLabels:
app: rook-ceph-operator
strategy:
type: Recreate
replicas: 1
template:
metadata:
labels:
app: rook-ceph-operator
spec:
tolerations:
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 5
serviceAccountName: rook-ceph-system
containers:
- name: rook-ceph-operator
image: rook/ceph:master
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
volumeMounts:
- mountPath: /var/lib/rook
name: rook-config
- mountPath: /etc/ceph
name: default-config-dir
env:
- name: ROOK_CURRENT_NAMESPACE_ONLY
value: "false"
# Whether to start pods as privileged that mount a host path, which includes the Ceph mon, osd pods and csi provisioners(if logrotation is on).
# Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
# For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "true"
# Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
# In case of more than one regex, use comma to separate between them.
# Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# add regex expression after putting a comma to blacklist a disk
# If value is empty, the default regex will be used.
- name: DISCOVER_DAEMON_UDEV_BLACKLIST
value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
# Whether to start machineDisruptionBudget and machineLabel controller to watch for the osd pods and MDBs.
- name: ROOK_ENABLE_MACHINE_DISRUPTION_BUDGET
value: "false"
# - name: DISCOVER_DAEMON_RESOURCES
# value: |
# resources:
# limits:
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Time to wait until the node controller will move Rook pods to other
# nodes after detecting an unreachable node.
# Pods affected by this setting are:
# mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
# The value used in this variable replaces the default value of 300 secs
# added automatically by k8s as Toleration for
# <node.kubernetes.io/unreachable>
# The total amount of time to reschedule Rook pods in healthy nodes
# before detecting a <not ready node> condition will be the sum of:
# --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
# --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
- name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
value: "5"
# The name of the node to pass with the downward API
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# The pod name to pass with the downward API
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# The pod namespace to pass with the downward API
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# Recommended resource requests and limits, if desired
#resources:
# limits:
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Uncomment it to run lib bucket provisioner in multithreaded mode
#- name: LIB_BUCKET_PROVISIONER_THREADS
# value: "5"
volumes:
- name: rook-config
emptyDir: {}
- name: default-config-dir
emptyDir: {}
# OLM: END OPERATOR DEPLOYMENT