This is a experimental environment to learn, test and automate Strimzi Operator features.
terraform init
terraform apply --auto-approve
terraform destroy --auto-approve
Name | Version |
---|---|
aws | >= 5.0 |
helm | ~> 2.0 |
kubectl | ~> 1.14 |
kubernetes | ~> 2.0 |
tls | ~> 3.1.0 |
Name | Version |
---|---|
aws | 5.35.0 |
helm | 2.12.1 |
kubectl | 1.14.0 |
kubernetes | 2.25.2 |
time | 0.10.0 |
tls | 3.1.0 |
No modules.
Name | Description | Type | Default | Required |
---|---|---|---|---|
addon_cni_version | Specifies the version of the AWS VPC CNI (Container Network Interface) plugin to use, which manages the network interfaces for pod networking. | string |
"v1.14.1-eksbuild.1" |
no |
addon_coredns_version | Defines the version of CoreDNS to use, a DNS server/forwarder that is integral to internal Kubernetes DNS resolution. | string |
"v1.11.1-eksbuild.4" |
no |
addon_csi_version | Indicates the version of the Container Storage Interface (CSI) driver to use for managing storage volumes in Kubernetes. | string |
"v1.26.1-eksbuild.1" |
no |
addon_kubeproxy_version | Sets the version of Kubeproxy to be used, which handles Kubernetes network services like forwarding the requests to correct containers. | string |
"v1.29.0-eksbuild.1" |
no |
aws_region | AWS region where the EKS cluster will be deployed. This should be set to the region where you want your Kubernetes resources to reside. | string |
"us-east-1" |
no |
cluster_name | The name of the Amazon EKS cluster. This is a unique identifier for your EKS cluster within the AWS region. | string |
"eks-kafka" |
no |
cluster_private_zone | The private DNS zone name for the EKS cluster in AWS Route53. This zone is used for internal DNS resolution within the cluster. | string |
"k8s.cluster" |
no |
default_tags | A map of default tags to apply to all resources. These tags can help with identifying and organizing resources within the AWS environment. | map(string) |
{ |
no |
general_instances_sizes | A list of EC2 instance types to use for the EKS worker nodes. These instance types should balance between cost, performance, and resource requirements for your workload. | list |
[ |
no |
general_scale_options | Configuration for the EKS cluster auto-scaling. It includes the minimum (min), maximum (max), and desired (desired) number of worker nodes. | map |
{ |
no |
k8s_version | The version of Kubernetes to use for the EKS cluster. This version should be compatible with the AWS EKS service and other infrastructure components. | string |
"1.29" |
no |
kafka_desired_replicas | n/a | number |
4 |
no |
kafka_enable_cross_zone_load_balancing | Controls whether cross-zone load balancing is enabled for the Network Load Balancer (NLB) associated with the Kafka brokers, allowing even traffic distribution across all availability zones. | bool |
false |
no |
kafka_instances_sizes | A list of EC2 instance types to use for the EKS worker nodes. These instance types should balance between cost, performance, and resource requirements for your workload. | list |
[ |
no |
kafka_limit_cpu | n/a | string |
"2" |
no |
kafka_limit_memory | n/a | string |
"16Gi" |
no |
kafka_min_insync_replicas | n/a | number |
2 |
no |
kafka_nlb_ingress_enable_termination_protection | Determines if termination protection is enabled for the Network Load Balancer (NLB) associated with the Kafka brokers, preventing accidental deletion. | bool |
false |
no |
kafka_nlb_ingress_internal | Indicates whether the Network Load Balancer (NLB) for the Kafka brokers should be internal ('true') or external ('false'), controlling the scope of access to within the AWS network or from the internet, respectively. | string |
"true" |
no |
kafka_nlb_ingress_type | Specifies the type of ingress to be used for Kafka, such as 'network', determining how the NLB (Network Load Balancer) handles incoming traffic to the Kafka brokers. | string |
"network" |
no |
kafka_replication_factor | n/a | number |
3 |
no |
kafka_request_cpu | n/a | string |
"2" |
no |
kafka_request_max_bytes | n/a | number |
2147483647 |
no |
kafka_request_memory | n/a | string |
"8Gi" |
no |
kafka_scale_options | Configuration for the EKS cluster auto-scaling. It includes the minimum (min), maximum (max), and desired (desired) number of worker nodes. | map |
{ |
no |
kafka_storage_class | n/a | string |
"gp2" |
no |
kafka_storage_size | n/a | string |
"20Gi" |
no |
kafka_xms | n/a | string |
"4g" |
no |
kafka_xmx | n/a | string |
"6g" |
no |
observability_instances_sizes | A list of EC2 instance types to use for the EKS worker nodes. These instance types should balance between cost, performance, and resource requirements for your workload. | list |
[ |
no |
observability_scale_options | Configuration for the EKS cluster auto-scaling. It includes the minimum (min), maximum (max), and desired (desired) number of worker nodes. | map |
{ |
no |
prometheus_enable_cross_zone_load_balancing | Controls whether cross-zone load balancing is enabled for the Network Load Balancer (NLB) associated with the Prometheus server, allowing even traffic distribution across all availability zones. | bool |
false |
no |
prometheus_nlb_ingress_enable_termination_protection | Determines if termination protection is enabled for the Network Load Balancer (NLB) associated with the Prometheus server, preventing accidental deletion. | bool |
false |
no |
prometheus_nlb_ingress_internal | Indicates whether the Network Load Balancer (NLB) for the Prometheus server should be internal ('true') or external ('false'), controlling the scope of access to within the AWS network or from the internet, respectively. | string |
"false" |
no |
prometheus_nlb_ingress_type | Defines the type of ingress to be used for Prometheus, such as 'network', determining how the NLB (Network Load Balancer) handles incoming traffic to the Prometheus server. | string |
"network" |
no |
zookeeper_desired_replicas | Specifies the desired number of Zookeeper replicas for handling distributed coordination tasks efficiently and reliably. | number |
3 |
no |
zookeeper_instances_sizes | Specifies the sizes of EC2 instances for Zookeeper nodes within the EKS cluster. Optimal instance types are chosen based on the balance between cost, performance, and resource requirements for Zookeeper. | list |
[ |
no |
zookeeper_limit_cpu | Specifies the maximum CPU limit for each Zookeeper pod, preventing the pod from consuming more CPU resources than this limit. | string |
"2" |
no |
zookeeper_limit_memory | Sets the maximum memory limit for each Zookeeper pod, ensuring that the pod does not exceed this amount to maintain cluster stability. | string |
"16Gi" |
no |
zookeeper_request_cpu | Determines the CPU request for each Zookeeper pod, reserving the specified amount of CPU resources for optimal performance. | string |
"2" |
no |
zookeeper_request_memory | Defines the memory request for each Zookeeper pod, which guarantees the specified amount of memory for proper operation. | string |
"8Gi" |
no |
zookeeper_scale_options | Defines the scaling options for the Zookeeper nodes in the EKS cluster, including the minimum, maximum, and desired number of nodes to ensure consistent performance and fault tolerance. | map |
{ |
no |
zookeeper_storage_class | Defines the storage class to be used for Zookeeper persistent storage, impacting the performance and availability of the storage. | string |
"gp2" |
no |
zookeeper_storage_size | Allocates the size of the persistent storage for each Zookeeper node, ensuring sufficient space for data storage and log retention. | string |
"20Gi" |
no |
Name | Description |
---|---|
bootstrap_servers_plaintext | n/a |
grafana_default_pass | n/a |
grafana_default_user | n/a |
grafana_url | n/a |
kubectl get nodes -l NodeGroupType=kafka -o wide
kubectl get nodes -l NodeGroupType=observability -o wide
kubectl get nodes -l NodeGroupType=general -o wide
kubectl get nodes -l NodeGroupType=zookeeper -o wide
kubectl get strimzipodsets -n strimzi
kubectl get kafka -n strimzi
/usr/local/bin/kafka-stress --bootstrap-servers eks-kafka-kafka-1dfc54296e6bfac8.elb.us-east-1.amazonaws.com:9092 --events 300000 --topic kafka-stress
/usr/local/bin/kafka-stress --bootstrap-servers eks-kafka-kafka-1dfc54296e6bfac8.elb.us-east-1.amazonaws.com:9092 --test-mode consumer --topic kafka-stress --consumer-group teste
kubectl -n strimzi run --restart=Never --image=quay.io/strimzi/kafka:0.38.0-kafka-3.6.0 kafka-cli -- /bin/sh -c "exec tail -f /dev/null"
kubectl -n strimzi exec -it kafka-cli -- bin/kafka-topics.sh \
--describe \
--topic kafka-stress \
--bootstrap-server cluster-kafka-plain-bootstrap:9092