Skip to content

Commit

Permalink
Add a terraform configuration to start a Kubernetes cluster in EKS (e…
Browse files Browse the repository at this point in the history
  • Loading branch information
jsoriano committed May 7, 2020
1 parent cbbca88 commit 46a3d35
Show file tree
Hide file tree
Showing 7 changed files with 197 additions and 0 deletions.
6 changes: 6 additions & 0 deletions metricbeat/module/kubernetes/_meta/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,9 @@ a local cluster inside of your local docker to perform the intergation tests ins
`kind` cluster will be created and destroy before and after the test. If you would like to
keep the `kind` cluster running after the test has finished you can set `KIND_SKIP_DELETE=1`
inside of your environment.


## Starting Kubernetes clusters in Cloud providers

The `terraform` directory contains terraform configurations to start Kubernetes
clusters in cloud providers.
20 changes: 20 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
Terraform scenario to start a Kubernetes cluster in AWS EKS.

`kubectl` will be configured to use this cluster if `awscli >= 1.18` is
available, you can find a requirements.txt file in this directory to prepare
a virtual environment for `awscli`.

To start this scenario:

```
$ terraform init
$ terraform apply
```

It will ask for a cluster name.

Remember to destroy the scenario once you don't need it:

```
$ terraform destroy
```
10 changes: 10 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/aws.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
provider "aws" {
region = "eu-central-1"
}

data "aws_region" "current" {}

data "aws_availability_zones" "available" {
state = "available"
}

62 changes: 62 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/cluster.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
variable "cluster_name" {
type = string
}

resource "aws_eks_cluster" "example" {
name = var.cluster_name
role_arn = aws_iam_role.example.arn

vpc_config {
subnet_ids = aws_subnet.test_eks.*.id
}

# Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling.
# Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups.
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_policy,
aws_iam_role_policy_attachment.eks_service_policy,
]

# Manage kubectl configuration
provisioner "local-exec" {
command = "aws eks --region ${data.aws_region.current.name} update-kubeconfig --name ${self.name}"
}

provisioner "local-exec" {
when = destroy
command = "kubectl config unset users.${self.arn}"
}
provisioner "local-exec" {
when = destroy
command = "kubectl config unset clusters.${self.arn}"
}
provisioner "local-exec" {
when = destroy
command = "kubectl config unset contexts.${self.arn}"
}
}

resource "aws_iam_role" "example" {
name = "${var.cluster_name}-eks"

assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "eks.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}

resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.example.name
}

resource "aws_iam_role_policy_attachment" "eks_service_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.example.name
}
37 changes: 37 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/network.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
resource "aws_vpc" "test_eks" {
cidr_block = "10.0.0.0/16"
}

resource "aws_subnet" "test_eks" {
count = 2

vpc_id = aws_vpc.test_eks.id
cidr_block = "10.0.${count.index}.0/24"
availability_zone = data.aws_availability_zones.available.names[count.index]

map_public_ip_on_launch = true

tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
}
}

resource "aws_internet_gateway" "gateway" {
vpc_id = aws_vpc.test_eks.id
}

resource "aws_route_table" "internet_access" {
vpc_id = aws_vpc.test_eks.id

route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gateway.id
}
}

resource "aws_route_table_association" "internet_access" {
count = length(aws_subnet.test_eks)

subnet_id = aws_subnet.test_eks[count.index].id
route_table_id = aws_route_table.internet_access.id
}
12 changes: 12 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
awscli==1.18.48
botocore==1.15.48
colorama==0.4.3
docutils==0.15.2
jmespath==0.9.5
pyasn1==0.4.8
python-dateutil==2.8.1
PyYAML==5.3.1
rsa==3.4.2
s3transfer==0.3.3
six==1.14.0
urllib3==1.25.9
50 changes: 50 additions & 0 deletions metricbeat/module/kubernetes/_meta/terraform/eks/workers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
resource "aws_eks_node_group" "example" {
cluster_name = aws_eks_cluster.example.name
node_group_name = "workers"
node_role_arn = aws_iam_role.example_workers.arn
subnet_ids = aws_subnet.test_eks.*.id

scaling_config {
desired_size = 3
max_size = 6
min_size = 2
}

# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node,
aws_iam_role_policy_attachment.eks_cni,
aws_iam_role_policy_attachment.registry_read_only,
]
}

resource "aws_iam_role" "example_workers" {
name = "${var.cluster_name}-eks-node-group"

assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}

resource "aws_iam_role_policy_attachment" "eks_worker_node" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.example_workers.name
}

resource "aws_iam_role_policy_attachment" "eks_cni" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.example_workers.name
}

resource "aws_iam_role_policy_attachment" "registry_read_only" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.example_workers.name
}

0 comments on commit 46a3d35

Please sign in to comment.