Skip to content

Commit

Permalink
Add a specific mixin environment for doing CAPI mgmt clusters (#37)
Browse files Browse the repository at this point in the history
* Add a specific mixin inventory for doing CAPI mgmt clusters

* Remove unnecessary variable

* Updates to Kubernetes versions + flavor detection

* azimuth-ops pr merged to main
  • Loading branch information
mkjpryor committed Sep 27, 2023
1 parent b4052e5 commit dc1e147
Show file tree
Hide file tree
Showing 8 changed files with 169 additions and 1 deletion.
9 changes: 9 additions & 0 deletions environments/capi-mgmt-example/ansible.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[defaults]
# Layer the inventories for a CAPI mgmt cluster deployment
# For a single node deployment, replace the HA inventory with the singlenode one
inventory = ../base/inventory,../ha/inventory,../capi-mgmt/inventory,./inventory
roles_path = ../../.ansible/roles
collections_path = ../../.ansible/collections

# Disable host key checking as hosts are dynamically replaced
host_key_checking = False
15 changes: 15 additions & 0 deletions environments/capi-mgmt-example/clouds.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# This clouds.yaml is used to connect to the OpenStack project for the environment
# It should contain an application credential
#
# WARNING: This file should be encrypted

clouds:
openstack:
auth:
auth_url: "<keystone auth url>"
application_credential_id: "<app credential id>"
application_credential_secret: "<app credential secret>"
region_name: "RegionOne"
interface: "public"
identity_api_version: 3
auth_type: "v3applicationcredential"
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#####
# This file contains environment-specific secrets for a CAPI mgmt cluster deployment
#
# It should be encrypted if stored in version control
#####

# The Slack webhook URL for monitoring alerts (optional)
# alertmanager_config_slack_webhook_url: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#####
# Configuration for the seed node (HA) or single node
#####

# The CIDR of the subnet that should be created
# infra_network_cidr: 192.168.100.0/24

# The ID of the external network to connect to via a router
# By default, if there is exactly one external network then it will be used
# infra_external_network_id: "<external network id>"

# The image id of an Ubuntu 20.04 image to use for the node
# By default, a suitable image is uploaded to the target project
# infra_image_id: "<image id>"

# The id of the flavor to use for the node
# For a seed node for an HA cluster, 8GB RAM is fine (maybe even 4GB)
# For a single node deployment, >= 8GB RAM is recommended
# By default, the first flavor matching these constraints is selected
# infra_flavor_id: "<flavor id>"

# The size in GB for the data volume
# This will hold all cluster data, including Kubernetes resources
# infra_data_volume_size: 50

#####
# Configuration for the HA cluster
#####

# The ID of the image that will be used for the nodes of the HA cluster
# By default, a suitable image is uploaded to the target project
# capi_cluster_machine_image_id: "<image id>"

# The Kubernetes version that will be used for the HA cluster
# This should match the image specified image
# capi_cluster_kubernetes_version: 1.23.8

# The name of the flavor to use for control plane nodes
# At least 2 CPUs and 8GB RAM is required
# By default, the first flavor that matches this requirement is used
# capi_cluster_control_plane_flavor: "<flavor name>"

# The name of the flavor to use for worker nodes
# At least 2 CPUs and 8GB RAM is required
# By default, the first flavor that matches this requirement is used
# capi_cluster_worker_flavor: "<flavor name>"

# The number of worker nodes
# Defaults to 3
# capi_cluster_worker_count: 3
2 changes: 2 additions & 0 deletions environments/capi-mgmt-example/inventory/hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[terraform_provision]
localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
77 changes: 77 additions & 0 deletions environments/capi-mgmt/inventory/group_vars/all.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Give the seed node a name corresponding to the environment
infra_name: "capi-mgmt-{{ azimuth_environment }}{{ '-seed' if install_mode == 'ha' else '' }}"
# Give the HA cluster a name corresponding to the environment
capi_cluster_release_name: "capi-mgmt-{{ azimuth_environment }}"

# The size in GB for the data volume
# This will hold all cluster data, including Kubernetes resources
infra_data_volume_size: 50

# Pick a flavor with at least 20GB disk, 2 CPUs and 8GB RAM from the available flavors
infra_flavor_id: >-
{{-
lookup('pipe', 'openstack flavor list -f json') |
from_json |
selectattr('Disk', '>=', 20) |
selectattr('VCPUs', '>=', 2) |
selectattr('RAM', '>=', 8192) |
sort(attribute = 'RAM') |
first |
default(undef(hint = 'Unable to determine a suitable flavor')) |
json_query('ID')
}}
# Upload the Kubernetes image we need for the HA cluster as a private image
# By default, we get the image from the azimuth-images version
community_images_default:
kube_1_27:
name: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].name }}"
source_url: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].url }}"
checksum: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].checksum }}"
source_disk_format: "qcow2"
container_format: "bare"
kubernetes_version: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].kubernetes_version }}"
community_images_default_visibility: private
community_images_update_existing_visibility: false

capi_cluster_kubernetes_version: >-
{{-
community_images.kube_1_27.kubernetes_version
if community_images is defined and 'kube_1_27' in community_images
else undef(hint = 'capi_cluster_kubernetes_version is required')
}}
capi_cluster_machine_image_id: >-
{{-
community_images_image_ids.kube_1_27
if (
community_images_image_ids is defined and
'kube_1_27' in community_images_image_ids
)
else undef(hint = 'capi_cluster_machine_image_id is required')
}}
# Flavors for the HA cluster
capi_cluster_control_plane_flavor: >-
{{-
lookup('pipe', 'openstack flavor show -f json ' ~ infra_flavor_id) |
from_json |
json_query('name')
}}
capi_cluster_worker_flavor: "{{ capi_cluster_control_plane_flavor }}"

# By default, provision the cluster with a FIP on the API server load-balancer
# so that the Kubernetes API can be reached from outside
capi_cluster_apiserver_floating_ip: true

# By default, don't worry about failure domains
capi_cluster_control_plane_omit_failure_domain: true
capi_cluster_worker_failure_domain: ~

# We don't need the ingress controller for a CAPI mgmt cluster
ingress_controller_enabled: no

# cert-manager is always required
certmanager_enabled: yes

# Harbor is never required
harbor_enabled: no
7 changes: 7 additions & 0 deletions environments/capi-mgmt/inventory/hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# This file exists so that Ansible treats the directory as an inventory

[terraform_provision]
# This group should contain the hosts that are used to provision the infrastructure
# onto which Azimuth will be deployed
# E.g.:
# localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
2 changes: 1 addition & 1 deletion requirements.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
collections:
- name: https://github.com/stackhpc/ansible-collection-azimuth-ops.git
type: git
version: a8a088865bceedb5f35f2c3bb239e31cf07ad491
version: 208684f84fe632d5d14b19e79205d777096e5a12
# For local development
# - type: dir
# source: ../ansible-collection-azimuth-ops

0 comments on commit dc1e147

Please sign in to comment.