From dc1e147e8a9530b989a9bb2880a0fd7fae611c90 Mon Sep 17 00:00:00 2001 From: Matt Pryor Date: Wed, 27 Sep 2023 16:52:17 +0100 Subject: [PATCH] Add a specific mixin environment for doing CAPI mgmt clusters (#37) * Add a specific mixin inventory for doing CAPI mgmt clusters * Remove unnecessary variable * Updates to Kubernetes versions + flavor detection * azimuth-ops pr merged to main --- environments/capi-mgmt-example/ansible.cfg | 9 +++ environments/capi-mgmt-example/clouds.yaml | 15 ++++ .../inventory/group_vars/all/secrets.yml | 8 ++ .../inventory/group_vars/all/variables.yml | 50 ++++++++++++ .../capi-mgmt-example/inventory/hosts | 2 + .../capi-mgmt/inventory/group_vars/all.yml | 77 +++++++++++++++++++ environments/capi-mgmt/inventory/hosts | 7 ++ requirements.yml | 2 +- 8 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 environments/capi-mgmt-example/ansible.cfg create mode 100644 environments/capi-mgmt-example/clouds.yaml create mode 100644 environments/capi-mgmt-example/inventory/group_vars/all/secrets.yml create mode 100644 environments/capi-mgmt-example/inventory/group_vars/all/variables.yml create mode 100644 environments/capi-mgmt-example/inventory/hosts create mode 100644 environments/capi-mgmt/inventory/group_vars/all.yml create mode 100644 environments/capi-mgmt/inventory/hosts diff --git a/environments/capi-mgmt-example/ansible.cfg b/environments/capi-mgmt-example/ansible.cfg new file mode 100644 index 00000000..e88c9e01 --- /dev/null +++ b/environments/capi-mgmt-example/ansible.cfg @@ -0,0 +1,9 @@ +[defaults] +# Layer the inventories for a CAPI mgmt cluster deployment +# For a single node deployment, replace the HA inventory with the singlenode one +inventory = ../base/inventory,../ha/inventory,../capi-mgmt/inventory,./inventory +roles_path = ../../.ansible/roles +collections_path = ../../.ansible/collections + +# Disable host key checking as hosts are dynamically replaced +host_key_checking = False diff --git a/environments/capi-mgmt-example/clouds.yaml b/environments/capi-mgmt-example/clouds.yaml new file mode 100644 index 00000000..994973bd --- /dev/null +++ b/environments/capi-mgmt-example/clouds.yaml @@ -0,0 +1,15 @@ +# This clouds.yaml is used to connect to the OpenStack project for the environment +# It should contain an application credential +# +# WARNING: This file should be encrypted + +clouds: + openstack: + auth: + auth_url: "" + application_credential_id: "" + application_credential_secret: "" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 + auth_type: "v3applicationcredential" diff --git a/environments/capi-mgmt-example/inventory/group_vars/all/secrets.yml b/environments/capi-mgmt-example/inventory/group_vars/all/secrets.yml new file mode 100644 index 00000000..1d655439 --- /dev/null +++ b/environments/capi-mgmt-example/inventory/group_vars/all/secrets.yml @@ -0,0 +1,8 @@ +##### +# This file contains environment-specific secrets for a CAPI mgmt cluster deployment +# +# It should be encrypted if stored in version control +##### + +# The Slack webhook URL for monitoring alerts (optional) +# alertmanager_config_slack_webhook_url: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX diff --git a/environments/capi-mgmt-example/inventory/group_vars/all/variables.yml b/environments/capi-mgmt-example/inventory/group_vars/all/variables.yml new file mode 100644 index 00000000..12314941 --- /dev/null +++ b/environments/capi-mgmt-example/inventory/group_vars/all/variables.yml @@ -0,0 +1,50 @@ +##### +# Configuration for the seed node (HA) or single node +##### + +# The CIDR of the subnet that should be created +# infra_network_cidr: 192.168.100.0/24 + +# The ID of the external network to connect to via a router +# By default, if there is exactly one external network then it will be used +# infra_external_network_id: "" + +# The image id of an Ubuntu 20.04 image to use for the node +# By default, a suitable image is uploaded to the target project +# infra_image_id: "" + +# The id of the flavor to use for the node +# For a seed node for an HA cluster, 8GB RAM is fine (maybe even 4GB) +# For a single node deployment, >= 8GB RAM is recommended +# By default, the first flavor matching these constraints is selected +# infra_flavor_id: "" + +# The size in GB for the data volume +# This will hold all cluster data, including Kubernetes resources +# infra_data_volume_size: 50 + +##### +# Configuration for the HA cluster +##### + +# The ID of the image that will be used for the nodes of the HA cluster +# By default, a suitable image is uploaded to the target project +# capi_cluster_machine_image_id: "" + +# The Kubernetes version that will be used for the HA cluster +# This should match the image specified image +# capi_cluster_kubernetes_version: 1.23.8 + +# The name of the flavor to use for control plane nodes +# At least 2 CPUs and 8GB RAM is required +# By default, the first flavor that matches this requirement is used +# capi_cluster_control_plane_flavor: "" + +# The name of the flavor to use for worker nodes +# At least 2 CPUs and 8GB RAM is required +# By default, the first flavor that matches this requirement is used +# capi_cluster_worker_flavor: "" + +# The number of worker nodes +# Defaults to 3 +# capi_cluster_worker_count: 3 diff --git a/environments/capi-mgmt-example/inventory/hosts b/environments/capi-mgmt-example/inventory/hosts new file mode 100644 index 00000000..9dcf1df6 --- /dev/null +++ b/environments/capi-mgmt-example/inventory/hosts @@ -0,0 +1,2 @@ +[terraform_provision] +localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/environments/capi-mgmt/inventory/group_vars/all.yml b/environments/capi-mgmt/inventory/group_vars/all.yml new file mode 100644 index 00000000..89dc4d0f --- /dev/null +++ b/environments/capi-mgmt/inventory/group_vars/all.yml @@ -0,0 +1,77 @@ +# Give the seed node a name corresponding to the environment +infra_name: "capi-mgmt-{{ azimuth_environment }}{{ '-seed' if install_mode == 'ha' else '' }}" +# Give the HA cluster a name corresponding to the environment +capi_cluster_release_name: "capi-mgmt-{{ azimuth_environment }}" + +# The size in GB for the data volume +# This will hold all cluster data, including Kubernetes resources +infra_data_volume_size: 50 + +# Pick a flavor with at least 20GB disk, 2 CPUs and 8GB RAM from the available flavors +infra_flavor_id: >- + {{- + lookup('pipe', 'openstack flavor list -f json') | + from_json | + selectattr('Disk', '>=', 20) | + selectattr('VCPUs', '>=', 2) | + selectattr('RAM', '>=', 8192) | + sort(attribute = 'RAM') | + first | + default(undef(hint = 'Unable to determine a suitable flavor')) | + json_query('ID') + }} + +# Upload the Kubernetes image we need for the HA cluster as a private image +# By default, we get the image from the azimuth-images version +community_images_default: + kube_1_27: + name: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].name }}" + source_url: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].url }}" + checksum: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].checksum }}" + source_disk_format: "qcow2" + container_format: "bare" + kubernetes_version: "{{ community_images_azimuth_images_manifest['kubernetes-1-27-jammy'].kubernetes_version }}" +community_images_default_visibility: private +community_images_update_existing_visibility: false + +capi_cluster_kubernetes_version: >- + {{- + community_images.kube_1_27.kubernetes_version + if community_images is defined and 'kube_1_27' in community_images + else undef(hint = 'capi_cluster_kubernetes_version is required') + }} +capi_cluster_machine_image_id: >- + {{- + community_images_image_ids.kube_1_27 + if ( + community_images_image_ids is defined and + 'kube_1_27' in community_images_image_ids + ) + else undef(hint = 'capi_cluster_machine_image_id is required') + }} + +# Flavors for the HA cluster +capi_cluster_control_plane_flavor: >- + {{- + lookup('pipe', 'openstack flavor show -f json ' ~ infra_flavor_id) | + from_json | + json_query('name') + }} +capi_cluster_worker_flavor: "{{ capi_cluster_control_plane_flavor }}" + +# By default, provision the cluster with a FIP on the API server load-balancer +# so that the Kubernetes API can be reached from outside +capi_cluster_apiserver_floating_ip: true + +# By default, don't worry about failure domains +capi_cluster_control_plane_omit_failure_domain: true +capi_cluster_worker_failure_domain: ~ + +# We don't need the ingress controller for a CAPI mgmt cluster +ingress_controller_enabled: no + +# cert-manager is always required +certmanager_enabled: yes + +# Harbor is never required +harbor_enabled: no diff --git a/environments/capi-mgmt/inventory/hosts b/environments/capi-mgmt/inventory/hosts new file mode 100644 index 00000000..17e8ed2f --- /dev/null +++ b/environments/capi-mgmt/inventory/hosts @@ -0,0 +1,7 @@ +# This file exists so that Ansible treats the directory as an inventory + +[terraform_provision] +# This group should contain the hosts that are used to provision the infrastructure +# onto which Azimuth will be deployed +# E.g.: +# localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/requirements.yml b/requirements.yml index 764c5420..b42df382 100644 --- a/requirements.yml +++ b/requirements.yml @@ -3,7 +3,7 @@ collections: - name: https://github.com/stackhpc/ansible-collection-azimuth-ops.git type: git - version: a8a088865bceedb5f35f2c3bb239e31cf07ad491 + version: 208684f84fe632d5d14b19e79205d777096e5a12 # For local development # - type: dir # source: ../ansible-collection-azimuth-ops