diff --git a/.README.html b/.README.html index 1cd3197..051a7b2 100644 --- a/.README.html +++ b/.README.html @@ -147,6 +147,8 @@
defaults/main.yml
ha_cluster_export_configuration
ha_cluster_enable_repos
See below
@@ -372,6 +384,12 @@defaults/main.yml
ha_cluster_export_configuration
boolean, default: false
Export existing cluster configuration. See Variables Exported by the +Role for details.
+ha_cluster_enable_repos
boolean, default: true
RHEL and CentOS only, enable repositories containing needed @@ -424,7 +442,8 @@
boolean, default: true
If set to true
, HA cluster will be configured on the
hosts according to other variables. If set to false
, all HA
-Cluster configuration will be purged from target hosts.
null
, HA cluster configuration will not be changed.
ha_cluster_start_on_boot
boolean, default: true
ha_cluster_qnetd
Note that you cannot run qnetd on a cluster node as fencing would disrupt qnetd operation.
+If you set ha_cluster_qnetd: null
, then qnetd host
+configuration will not be changed.
You may take a look at an example.
/dev/disk/by-id/
).
+The role contains ha_cluster_info
module which exports
+current cluster configuration in a dictionary matching the structure of
+this role variables. If the role is run with these variables, it
+recreates the same cluster.
Note that the dictionary of variables may not be complete and manual
+modification of it is expected. Most notably, you need to set ha_cluster_hacluster_password
.
Note that depending on pcs version installed on managed nodes, +certain variables may not be present in the export.
+Following variables are present in the export:
+ha_cluster_cluster_present
ha_cluster_start_on_boot
ha_cluster_cluster_name
ha_cluster_transport
ha_cluster_totem
ha_cluster_quorum
ha_cluster_node_options
+- currently only node_name
, corosync_addresses
+and pcs_address
are presentFollowing variables are never present in the export (consult the +role documentation for impact of the variables missing when running the +role):
+ha_cluster_hacluster_password
+- This is a mandatory variable for the role but it cannot be extracted
+from existing clusters.ha_cluster_corosync_key_src
,
+ha_cluster_pacemaker_key_src
+and ha_cluster_fence_virt_key_src
+- These are supposed to contain paths to files with the keys. Since the
+keys themselves are not exported, these variables are not present in the
+export either. Corosync and pacemaker keys are supposed to be unique for
+each cluster.ha_cluster_regenerate_keys
+- It is your responsibility to decide if you want to use existing keys
+or generate new ones.To export current cluster configuration and store it in
+ha_cluster_facts
variable, run the role with
+ha_cluster_export_configuration: true
. This triggers the
+export once the role finishes configuring a cluster or a qnetd host. If
+you want to trigger the export without modifying existing configuration,
+run the role like this:
- hosts: node1
+ vars:
+ ha_cluster_cluster_present: null
+ ha_cluster_qnetd: null
+ ha_cluster_export_configuration: true
+
+ roles:
+ - linux-system-roles.ha_cluster
Note: By default,
+ha_cluster_cluster_present
is set to true
and
+ha_cluster_qnetd.present
is set to false
. If
+you do not set the variables as shown in the example above, the role
+will reconfigure your cluster on the specified hosts, remove qnetd
+configuration from the specified hosts, and then export
+configuration.
You may use the ha_cluster_facts
variable in your
+playbook depending on your needs.
If you just want to see the content of the variable, use the ansible +debug module like this:
+- hosts: node1
+ vars:
+ ha_cluster_cluster_present: null
+ ha_cluster_qnetd: null
+ ha_cluster_export_configuration: true
+
+ roles:
+ - linux-system-roles.ha_cluster
+
+ tasks:
+ - name: Print ha_cluster_info_result variable
+ debug:
+ var: ha_cluster_facts
Or you may want to save the configuration to a file on your +controller node in YAML format with a task similar to this one, so that +you can write a playbook around it:
+- hosts: node1
+ vars:
+ ha_cluster_cluster_present: null
+ ha_cluster_qnetd: null
+ ha_cluster_export_configuration: true
+
+ roles:
+ - linux-system-roles.ha_cluster
+
+ tasks:
+ - name: Save current cluster configuration to a file
+ delegate_to: localhost
+ copy:
+ content: "{{ ha_cluster_facts | to_nice_yaml(sort_keys=false) }}"
+ dest: /path/to/file
Following examples show what the structure of the role variables looks like. They are not guides or best practices for configuring a @@ -1872,772 +2005,772 @@
true
in your playbooks using the ha_cluster
role.
-- name: Manage HA cluster and firewall and selinux
- hosts: node1 node2
- vars:
- ha_cluster_manage_firewall: true
- ha_cluster_manage_selinux: true
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster and firewall and selinux
+ hosts: node1 node2
+ vars:
+ ha_cluster_manage_firewall: true
+ ha_cluster_manage_selinux: true
+
+ roles:
+ - linux-system-roles.ha_cluster
certificate
roleThis example creates self-signed pcsd certificate and private key files in /var/lib/pcsd with the file name FILENAME.crt and FILENAME.key, respectively.
-- name: Manage HA cluster with certificates
- hosts: node1 node2
- vars:
- ha_cluster_pcsd_certificates:
- - name: FILENAME
- common_name: "{{ ansible_hostname }}"
- ca: self-sign
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with certificates
+ hosts: node1 node2
+ vars:
+ ha_cluster_pcsd_certificates:
+ - name: FILENAME
+ common_name: "{{ ansible_hostname }}"
+ ca: self-sign
+ roles:
+ - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+
+ roles:
+ - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_transport:
- type: knet
- options:
- - name: ip_version
- value: ipv4-6
- - name: link_mode
- value: active
- links:
- -
- - name: linknumber
- value: 1
- - name: link_priority
- value: 5
- -
- - name: linknumber
- value: 0
- - name: link_priority
- value: 10
- compression:
- - name: level
- value: 5
- - name: model
- value: zlib
- crypto:
- - name: cipher
- value: none
- - name: hash
- value: none
- ha_cluster_totem:
- options:
- - name: block_unlisted_ips
- value: 'yes'
- - name: send_join
- value: 0
- ha_cluster_quorum:
- options:
- - name: auto_tie_breaker
- value: 1
- - name: wait_for_all
- value: 1
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_transport:
+ type: knet
+ options:
+ - name: ip_version
+ value: ipv4-6
+ - name: link_mode
+ value: active
+ links:
+ -
+ - name: linknumber
+ value: 1
+ - name: link_priority
+ value: 5
+ -
+ - name: linknumber
+ value: 0
+ - name: link_priority
+ value: 10
+ compression:
+ - name: level
+ value: 5
+ - name: model
+ value: zlib
+ crypto:
+ - name: cipher
+ value: none
+ - name: hash
+ value: none
+ ha_cluster_totem:
+ options:
+ - name: block_unlisted_ips
+ value: 'yes'
+ - name: send_join
+ value: 0
+ ha_cluster_quorum:
+ options:
+ - name: auto_tie_breaker
+ value: 1
+ - name: wait_for_all
+ value: 1
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster_node_options
variable- hosts: node1 node2
- vars:
- my_sbd_devices:
- # This variable is not used by the role directly.
- # Its purpose is to define SBD devices once so they don't need
- # to be repeated several times in the role variables.
- # Instead, variables directly used by the role refer to this variable.
- - /dev/disk/by-id/000001
- - /dev/disk/by-id/000002
- - /dev/disk/by-id/000003
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- ha_cluster_node_options:
- - node_name: node1
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices: "{{ my_sbd_devices }}"
- - node_name: node2
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices: "{{ my_sbd_devices }}"
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- - name: devices
- value: "{{ my_sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ my_sbd_devices:
+ # This variable is not used by the role directly.
+ # Its purpose is to define SBD devices once so they don't need
+ # to be repeated several times in the role variables.
+ # Instead, variables directly used by the role refer to this variable.
+ - /dev/disk/by-id/000001
+ - /dev/disk/by-id/000002
+ - /dev/disk/by-id/000003
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_sbd_enabled: true
+ ha_cluster_sbd_options:
+ - name: delay-start
+ value: 'no'
+ - name: startmode
+ value: always
+ - name: timeout-action
+ value: 'flush,reboot'
+ - name: watchdog-timeout
+ value: 30
+ ha_cluster_node_options:
+ - node_name: node1
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices: "{{ my_sbd_devices }}"
+ - node_name: node2
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices: "{{ my_sbd_devices }}"
+ # Best practice for setting SBD timeouts:
+ # watchdog-timeout * 2 = msgwait-timeout (set automatically)
+ # msgwait-timeout * 1.2 = stonith-timeout
+ ha_cluster_cluster_properties:
+ - attrs:
+ - name: stonith-timeout
+ value: 72
+ ha_cluster_resource_primitives:
+ - id: fence_sbd
+ agent: 'stonith:fence_sbd'
+ instance_attrs:
+ - attrs:
+ - name: devices
+ value: "{{ my_sbd_devices | join(',') }}"
+ - name: pcmk_delay_base
+ value: 30
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster
variableThe same result can be achieved by specifying node-specific options in inventory like this:
-all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/disk/by-id/000001
- - /dev/disk/by-id/000002
- - /dev/disk/by-id/000003
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/disk/by-id/000001
- - /dev/disk/by-id/000002
- - /dev/disk/by-id/000003
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/disk/by-id/000001
+ - /dev/disk/by-id/000002
+ - /dev/disk/by-id/000003
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/disk/by-id/000001
+ - /dev/disk/by-id/000002
+ - /dev/disk/by-id/000003
Variables specified in inventory can be omitted when writing the playbook:
-- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- # this only works if all nodes have the same sbd_devices
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
-ha_cluster
variables contain SBD options, those in
-ha_cluster_node_options
have precedence.
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-enabled
- value: 'true'
- - name: no-quorum-policy
- value: stop
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: simple-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: resource-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- instance_attrs:
- - attrs:
- - name: fake
- value: fake-value
- - name: passwd
- value: passwd-value
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- operations:
- - action: start
- attrs:
- - name: timeout
- value: '30s'
- - action: monitor
- attrs:
- - name: timeout
- value: '5'
- - name: interval
- value: '1min'
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: simple-clone
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: clone-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: bundled-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- ha_cluster_resource_groups:
- - id: simple-group
- resource_ids:
- - example-1
- - example-2
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- - id: cloned-group
- resource_ids:
- - example-3
- ha_cluster_resource_clones:
- - resource_id: simple-clone
- - resource_id: clone-with-options
- promotable: true
- id: custom-clone-id
- meta_attrs:
- - attrs:
- - name: clone-max
- value: '2'
- - name: clone-node-max
- value: '1'
- - resource_id: cloned-group
- promotable: true
- ha_cluster_resource_bundles:
- - id: bundle-with-resource
- resource-id: bundled-resource
- container:
- type: podman
- options:
- - name: image
- value: my:image
- network_options:
- - name: control-port
- value: 3121
- port_map:
- -
- - name: port
- value: 10001
- -
- - name: port
- value: 10002
- - name: internal-port
- value: 10003
- storage_map:
- -
- - name: source-dir
- value: /srv/daemon-data
- - name: target-dir
- value: /var/daemon/data
- -
- - name: source-dir-root
- value: /var/log/pacemaker/bundles
- - name: target-dir
- value: /var/log/daemon
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # Set a different `resource-stickiness` value during and outside work
- # hours. This allows resources to automatically move back to their most
- # preferred hosts, but at a time that (in theory) does not interfere with
- # business activities.
- ha_cluster_resource_defaults:
- meta_attrs:
- - id: core-hours
- rule: date-spec hours=9-16 weekdays=1-5
- score: 2
- attrs:
- - name: resource-stickiness
- value: INFINITY
- - id: after-hours
- score: 1
- attrs:
- - name: resource-stickiness
- value: 0
- # Default the timeout on all 10-second-interval monitor actions on IPaddr2
- # resources to 8 seconds.
- ha_cluster_resource_operation_defaults:
- meta_attrs:
- - rule: resource ::IPaddr2 and op monitor interval=10s
- score: INFINITY
- attrs:
- - name: timeout
- value: 8s
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
+ha_cluster
variables contain SBD options, those in
+ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: apc1
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc1.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- - id: apc2
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc2.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- # Nodes have redundant power supplies, apc1 and apc2. Cluster must ensure
- # that when attempting to reboot a node, both power supplies are turned off
- # before either power supply is turned back on.
- ha_cluster_stonith_levels:
- - level: 1
- target: node1
- resource_ids:
- - apc1
- - apc2
- - level: 1
- target: node2
- resource_ids:
- - apc1
- - apc2
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # In order to use constraints, we need resources the constraints will apply
- # to.
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-4
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-5
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-6
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # location constraints
- ha_cluster_constraints_location:
- # resource ID and node name
- - resource:
- id: example-1
- node: node1
- options:
- - name: score
- value: 20
- # resource pattern and node name
- - resource:
- pattern: example-\d+
- node: node1
- options:
- - name: score
- value: 10
- # resource ID and rule
- - resource:
- id: example-2
- rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
- # resource pattern and rule
- - resource:
- pattern: example-\d+
- rule: node-type eq weekend and date-spec weekdays=6-7
- # colocation constraints
- ha_cluster_constraints_colocation:
- # simple constraint
- - resource_leader:
- id: example-3
- resource_follower:
- id: example-4
- options:
- - name: score
- value: -5
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- - resource_ids:
- - example-5
- - example-6
- options:
- - name: sequential
- value: "false"
- options:
- - name: score
- value: 20
- # order constraints
- ha_cluster_constraints_order:
- # simple constraint
- - resource_first:
- id: example-1
- resource_then:
- id: example-6
- options:
- - name: symmetrical
- value: "false"
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- options:
- - name: require-all
- value: "false"
- - name: sequential
- value: "false"
- - resource_ids:
- - example-3
- - resource_ids:
- - example-4
- - example-5
- options:
- - name: sequential
- value: "false"
- # ticket constraints
- ha_cluster_constraints_ticket:
- # simple constraint
- - resource:
- id: example-1
- ticket: ticket1
- options:
- - name: loss-policy
- value: stop
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-3
- - example-4
- - example-5
- ticket: ticket2
- options:
- - name: loss-policy
- value: fence
-
- roles:
- - linux-system-roles.ha_cluster
Before you can add a quorum device to a cluster, you need to set the -device up. This is only needed to be done once for each quorum device. -Once it has been set up, you can use a quorom device in any number of -clusters.
-Note that you cannot run a quorum device on a cluster node.
+ ha_cluster_resource_primitives: + - id: xvm-fencing + agent: 'stonith:fence_xvm' + instance_attrs: + - attrs: + - name: pcmk_host_list + value: node1 node2 + - id: simple-resource + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: resource-with-options + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + instance_attrs: + - attrs: + - name: fake + value: fake-value + - name: passwd + value: passwd-value + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + operations: + - action: start + attrs: + - name: timeout + value: '30s' + - action: monitor + attrs: + - name: timeout + value: '5' + - name: interval + value: '1min' + - id: example-1 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: example-2 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: example-3 + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: simple-clone + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: clone-with-options + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + - id: bundled-resource + # wokeignore:rule=dummy + agent: 'ocf:pacemaker:Dummy' + ha_cluster_resource_groups: + - id: simple-group + resource_ids: + - example-1 + - example-2 + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + - id: cloned-group + resource_ids: + - example-3 + ha_cluster_resource_clones: + - resource_id: simple-clone + - resource_id: clone-with-options + promotable: true + id: custom-clone-id + meta_attrs: + - attrs: + - name: clone-max + value: '2' + - name: clone-node-max + value: '1' + - resource_id: cloned-group + promotable: true + ha_cluster_resource_bundles: + - id: bundle-with-resource + resource-id: bundled-resource + container: + type: podman + options: + - name: image + value: my:image + network_options: + - name: control-port + value: 3121 + port_map: + - + - name: port + value: 10001 + - + - name: port + value: 10002 + - name: internal-port + value: 10003 + storage_map: + - + - name: source-dir + value: /srv/daemon-data + - name: target-dir + value: /var/daemon/data + - + - name: source-dir-root + value: /var/log/pacemaker/bundles + - name: target-dir + value: /var/log/daemon + meta_attrs: + - attrs: + - name: target-role + value: Started + - name: is-managed + value: 'true' + + roles: + - linux-system-roles.ha_cluster +- hosts: nodeQ
+class="sourceCode yaml">- hosts: node1 node2
vars:
- ha_cluster_cluster_present: false
+ ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_qnetd:
- present: true
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_quorum:
- device:
- model: net
- model_options:
- - name: host
- value: nodeQ
- - name: algorithm
- value: lms
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_node_options:
- - node_name: node1
- attributes:
- - attrs:
- - name: attribute1
- value: value1A
- - name: attribute2
- value: value2A
- - node_name: node2
- attributes:
- - attrs:
- - name: attribute1
- value: value1B
- - name: attribute2
- value: value2B
-
- roles:
- - linux-system-roles.ha_cluster
Before you can add a quorum device to a cluster, you need to set the +device up. This is only needed to be done once for each quorum device. +Once it has been set up, you can use a quorom device in any number of +clusters.
+Note that you cannot run a quorum device on a cluster node.
- hosts: node1 node2
+class="sourceCode yaml">- hosts: nodeQ
vars:
- ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_cluster_present: false
ha_cluster_hacluster_password: password
- # To use an ACL role permission reference, the reference must exist in CIB.
- ha_cluster_resource_primitives:
- - id: not-for-operator
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # ACLs must be enabled (using the enable-acl cluster property) in order to
- # be effective.
- ha_cluster_cluster_properties:
- - attrs:
- - name: enable-acl
- value: 'true'
- ha_cluster_acls:
- acl_roles:
- - id: operator
- description: HA cluster operator
- permissions:
- - kind: write
- xpath: //crm_config//nvpair[@name='maintenance-mode']
- - kind: deny
- reference: not-for-operator
- - id: administrator
- permissions:
- - kind: write
- xpath: /cib
- acl_users:
- - id: alice
- roles:
- - operator
- - administrator
- - id: bob
- roles:
- - administrator
- acl_groups:
- - id: admins
- roles:
- - administrator
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # For utilization to have an effect, the `placement-strategy` property
- # must be set and its value must be different from the value `default`.
- ha_cluster_cluster_properties:
- - attrs:
- - name: placement-strategy
- value: utilization
- ha_cluster_node_options:
- - node_name: node1
- utilization:
- - attrs:
- - name: utilization1
- value: 1
- - name: utilization2
- value: 2
- - node_name: node2
- utilization:
- - attrs:
- - name: utilization1
- value: 3
- - name: utilization2
- value: 4
- ha_cluster_resource_primitives:
- - id: resource1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- utilization:
- - attrs:
- - name: utilization1
- value: 2
- - name: utilization2
- value: 3
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_alerts:
- - id: alert1
- path: /alert1/path
- description: Alert1 description
- instance_attrs:
- - attrs:
- - name: alert_attr1_name
- value: alert_attr1_value
- meta_attrs:
- - attrs:
- - name: alert_meta_attr1_name
- value: alert_meta_attr1_value
- recipients:
- - value: recipient_value
- id: recipient1
- description: Recipient1 description
- instance_attrs:
- - attrs:
- - name: recipient_attr1_name
- value: recipient_attr1_value
- meta_attrs:
- - attrs:
- - name: recipient_meta_attr1_name
- value: recipient_meta_attr1_value
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
- ha_cluster_cluster_present: false
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ # For utilization to have an effect, the `placement-strategy` property
+ # must be set and its value must be different from the value `default`.
+ ha_cluster_cluster_properties:
+ - attrs:
+ - name: placement-strategy
+ value: utilization
+ ha_cluster_node_options:
+ - node_name: node1
+ utilization:
+ - attrs:
+ - name: utilization1
+ value: 1
+ - name: utilization2
+ value: 2
+ - node_name: node2
+ utilization:
+ - attrs:
+ - name: utilization1
+ value: 3
+ - name: utilization2
+ value: 4
+ ha_cluster_resource_primitives:
+ - id: resource1
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ utilization:
+ - attrs:
+ - name: utilization1
+ value: 2
+ - name: utilization2
+ value: 3
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_alerts:
+ - id: alert1
+ path: /alert1/path
+ description: Alert1 description
+ instance_attrs:
+ - attrs:
+ - name: alert_attr1_name
+ value: alert_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: alert_meta_attr1_name
+ value: alert_meta_attr1_value
+ recipients:
+ - value: recipient_value
+ id: recipient1
+ description: Recipient1 description
+ instance_attrs:
+ - attrs:
+ - name: recipient_attr1_name
+ value: recipient_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: recipient_meta_attr1_name
+ value: recipient_meta_attr1_value
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_present: false
+
+ roles:
+ - linux-system-roles.ha_cluster
See README-ostree.md