Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add zookeeper cluster mode #36

Merged
merged 4 commits into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions deployment/zookeeper-cluster/kustomization.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

commonAnnotations:
app.kubernetes.io/managed-by: kustomization

resources:
- service-client.yml
- service-server.yml
- zookeeper.yml
- pdb.yml
9 changes: 9 additions & 0 deletions deployment/zookeeper-cluster/pdb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app.kubernetes.io/name: zookeeper
maxUnavailable: 1
13 changes: 13 additions & 0 deletions deployment/zookeeper-cluster/service-client.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: zookeeper-client
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/part-of: nifi
spec:
ports:
- port: 2181
name: client
selector:
app.kubernetes.io/name: zookeeper
16 changes: 16 additions & 0 deletions deployment/zookeeper-cluster/service-server.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: zookeeper-server
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/part-of: nifi
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app.kubernetes.io/name: zookeeper
104 changes: 104 additions & 0 deletions deployment/zookeeper-cluster/zookeeper.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
spec:
selector:
matchLabels:
app.kubernetes.io/name: zookeeper
serviceName: zookeeper-server
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app.kubernetes.io/name: zookeeper
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app.kubernetes.io/name"
operator: In
values:
- zookeeper
topologyKey: "kubernetes.io/hostname"
containers:
- name: zookeeper
imagePullPolicy: Always
image: "registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 200m
memory: 500Mi
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: data
mountPath: /var/lib/zookeeper
subPath: zookeeper
- name: data
mountPath: /opt/zookeeper/conf
subPath: conf
securityContext:
runAsUser: 1000
fsGroup: 1000
volumes:
- name: data
emptyDir:
sizeLimit: 2Gi
Loading