Skip to content

Commit

Permalink
Merge pull request #898 from IBM/fk-misc
Browse files Browse the repository at this point in the history
Miscellaneous changes
  • Loading branch information
fketelaars authored Feb 8, 2025
2 parents f44c9a6 + 973b8e5 commit 40a4073
Show file tree
Hide file tree
Showing 86 changed files with 543 additions and 402 deletions.
1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ LABEL authors="Arthur Laimbock, \
Markus Wiegleb, \
Frank Ketelaars, \
Jiri Petnik"
LABEL product=cloud-pak-deployer

USER 0

Expand Down
1 change: 1 addition & 0 deletions Dockerfile.ppc64le
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ LABEL authors="Arthur Laimbock, \
Markus Wiegleb, \
Frank Ketelaars, \
Jiri Petnik"
LABEL product=cloud-pak-deployer

USER 0

Expand Down
57 changes: 35 additions & 22 deletions automation-generators/aws/openshift/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@
# - subnet-027ca7cc695ce8515
# cloud_native_toolkit: False
# openshift_storage:
# - storage_name: ocs-storage
# storage_type: ocs
# ocs_storage_label: ocs
# ocs_storage_size_gb: 500
# - storage_name: odf-storage
# storage_type: odf
# odf_storage_label: ocs
# odf_storage_size_gb: 500

def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
g = GeneratorPreProcessor(attributes,fullConfig,moduleVariables)
Expand Down Expand Up @@ -119,28 +119,41 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
# Check openshift_storage atttributes
if len(ge['openshift_storage']) < 1:
g.appendError(msg='At least one openshift_storage element must be specified.')
for os in ge['openshift_storage']:
for i, os in enumerate(ge['openshift_storage']):
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified for all openshift_storage elements')
if "storage_type" not in os:
g.appendError(msg='storage_type must be specified for all openshift_storage elements')
if "storage_type" in os and os['storage_type'] not in ['ocs','aws-elastic']:
g.appendError(msg='storage_type must be ocs or aws-elastic')
if "storage_type" in os and os['storage_type']=='aws-elastic':
nfs_server_names = []
if 'nfs_server' in fc:
nfs_server_names = fc.match('nfs_server[*].name')
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified when storage_type is aws-elastic')
elif os['storage_name'] not in nfs_server_names:
g.appendError(msg="'"+ os['storage_name'] + "' is not an existing nfs_server name (Found nfs_server: ["+ ','.join(nfs_server_names) +"] )")
if "storage_type" in os and os['storage_type']=='ocs':
if "ocs_storage_label" not in os:
g.appendError(msg='ocs_storage_label must be specified when storage_type is ocs')
if "ocs_storage_size_gb" not in os:
g.appendError(msg='ocs_storage_size_gb must be specified when storage_type is ocs')
if "ocs_version" in os and version.parse(str(os['ocs_version'])) < version.parse("4.6"):
g.appendError(msg='ocs_version must be 4.6 or higher. If the OCS version is 4.10, specify ocs_version: "4.10"')
else:
if os['storage_type']=='ocs':
os.update([("storage_type", "odf")])
if os['storage_type'] not in ['odf','aws-elastic']:
g.appendError(msg='storage_type must be odf or aws-elastic')
if os['storage_type']=='aws-elastic':
nfs_server_names = []
if 'nfs_server' in fc:
nfs_server_names = fc.match('nfs_server[*].name')
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified when storage_type is aws-elastic')
elif os['storage_name'] not in nfs_server_names:
g.appendError(msg="'"+ os['storage_name'] + "' is not an existing nfs_server name (Found nfs_server: ["+ ','.join(nfs_server_names) +"] )")
if os['storage_type']=='odf':
if "ocs_storage_label" in os:
os.update([("odf_storage_label", os['ocs_storage_label'])])
if "ocs_storage_size_gb" in os:
os.update([("odf_storage_size_gb", os['ocs_storage_size_gb'])])
if "ocs_version" in os:
os.update([("odf_version", os['ocs_version'])])
if "odf_storage_label" not in os:
g.appendError(msg='odf_storage_label must be specified when storage_type is odf')
if "odf_storage_size_gb" not in os:
g.appendError(msg='odf_storage_size_gb must be specified when storage_type is odf')
if "odf_version" in os and version.parse(str(os['odf_version'])) < version.parse("4.6"):
g.appendError(msg='odf_version must be 4.6 or higher. If the ODF version is 4.10, specify odf_version: "4.10"')

# Ensure the openshift_storage attribute is updated
ge['openshift_storage'][i]=os
g.setExpandedAttributes(ge)

#check variables for aws
if '_aws_access_key' in var and var['_aws_access_key'] == "":
Expand Down
50 changes: 33 additions & 17 deletions automation-generators/azure/openshift/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
# service_cidr: "172.30.0.0/16"
# machine_cidr:
# openshift_storage:
# - storage_name: ocs-storage
# storage_type: ocs
# ocs_storage_label: ocs
# ocs_storage_size_gb: 512
# ocs_dynamic_storage_class: managed-premium
# - storage_name: odf-storage
# storage_type: odf
# odf_storage_label: ocs
# odf_storage_size_gb: 512
# odf_dynamic_storage_class: managed-premium

def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
g = GeneratorPreProcessor(attributes,fullConfig,moduleVariables)
Expand Down Expand Up @@ -113,22 +113,38 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
# Check openshift_storage atttributes
if len(ge['openshift_storage']) < 1:
g.appendError(msg='At least one openshift_storage element must be specified.')
for os in ge['openshift_storage']:
for i, os in enumerate(ge['openshift_storage']):
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified for all openshift_storage elements')
if "storage_type" not in os:
g.appendError(msg='storage_type must be specified for all openshift_storage elements')
if "storage_type" in os and os['storage_type'] not in ['ocs','nfs']:
g.appendError(msg='storage_type must be ocs or nfs')
if "storage_type" in os and os['storage_type']=='ocs':
if "ocs_storage_label" not in os:
g.appendError(msg='ocs_storage_label must be specified when storage_type is ocs')
if "ocs_storage_size_gb" not in os:
g.appendError(msg='ocs_storage_size_gb must be specified when storage_type is ocs')
if "ocs_dynamic_storage_class" not in os:
g.appendError(msg='ocs_dynamic_storage_class must be specified when storage_type is ocs')
if "ocs_version" in os and version.parse(str(os['ocs_version'])) < version.parse("4.6"):
g.appendError(msg='ocs_version must be 4.6 or higher. If the OCS version is 4.10, specify ocs_version: "4.10"')
else:
if os['storage_type']=='ocs':
os.update([("storage_type", "odf")])
if os['storage_type'] not in ['odf','nfs']:
g.appendError(msg='storage_type must be odf or nfs')

if os['storage_type']=='odf':
if "ocs_storage_label" in os:
os.update([("odf_storage_label", os['ocs_storage_label'])])
if "ocs_storage_size_gb" in os:
os.update([("odf_storage_size_gb", os['ocs_storage_size_gb'])])
if "ocs_dynamic_storage_class" in os:
os.update([("odf_dynamic_storage_class", os['ocs_dynamic_storage_class'])])
if "ocs_version" in os:
os.update([("odf_version", os['ocs_version'])])
if "odf_storage_label" not in os:
g.appendError(msg='odf_storage_label must be specified when storage_type is odf')
if "odf_storage_size_gb" not in os:
g.appendError(msg='odf_storage_size_gb must be specified when storage_type is odf')
if "odf_dynamic_storage_class" not in os:
g.appendError(msg='odf_dynamic_storage_class must be specified when storage_type is odf')
if "odf_version" in os and version.parse(str(os['odf_version'])) < version.parse("4.6"):
g.appendError(msg='odf_version must be 4.6 or higher. If the ODF version is 4.10, specify odf_version: "4.10"')

# Ensure the openshift_storage attribute is updated
ge['openshift_storage'][i]=os
g.setExpandedAttributes(ge)

# Check azure configuration
if len(g.getErrors()) == 0:
Expand Down
23 changes: 15 additions & 8 deletions automation-generators/existing-ocp/openshift/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,19 +93,26 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
# Validate openshift_storage attributes
if len(ge['openshift_storage']) < 1:
g.appendError(msg='At least one openshift_storage element must be specified.')
for os in ge['openshift_storage']:
for i, os in enumerate(ge['openshift_storage']):
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified for all openshift_storage elements')
if "storage_type" not in os:
g.appendError(msg='storage_type must be specified for all openshift_storage elements')
if "storage_type" in os and os['storage_type'] not in ['nfs','ocs','aws-elastic','pwx','ibm-storage-fdf','custom','auto']:
g.appendError(msg='storage_type must be nfs, ocs, aws-elastic, ibm-storage-fdf, custom, or auto')
if "storage_type" in os and os['storage_type'] == 'custom':
if "ocp_storage_class_file" not in os:
g.appendError(msg='ocp_storage_class_file must be specified when storage_type is custom')
if "ocp_storage_class_block" not in os:
g.appendError(msg='ocp_storage_class_block must be specified when storage_type is custom')
else:
if os['storage_type']=='ocs':
os.update([("storage_type", "odf")])
if os['storage_type'] not in ['nfs','odf','odf-ext','aws-elastic','pwx','ibm-storage-fdf','custom','auto']:
g.appendError(msg='storage_type must be nfs, odf, odf-ext, aws-elastic, ibm-storage-fdf, custom, or auto')
if os['storage_type'] == 'custom':
if "ocp_storage_class_file" not in os:
g.appendError(msg='ocp_storage_class_file must be specified when storage_type is custom')
if "ocp_storage_class_block" not in os:
g.appendError(msg='ocp_storage_class_block must be specified when storage_type is custom')

# Ensure the openshift_storage attribute is updated
ge['openshift_storage'][i]=os
g.setExpandedAttributes(ge)

# Return result containing updated attributes and errors
result = {
'attributes_updated': g.getExpandedAttributes(),
Expand Down
15 changes: 1 addition & 14 deletions automation-generators/generic/cp4d/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,11 @@
# openshift:
# - name: sample
# ocp_version: 4.6
# compute_flavour: bx2.16x64
# compute_nodes: 3
# resource_group_name: ibm
# infrastructure:
# type: vpc
# vpc_name: "{{ env_id }}"
# subnets:
# - "{{ env_id }}-subnet-zone-1"
# - "{{ env_id }}-subnet-zone-2"
# - "{{ env_id }}-subnet-zone-3"
#...
# openshift_storage:
# - storage_name: nfs-storage
# storage_type: nfs
# nfs_server_name: "{{ env_id }}-nfs"
# - storage_name: ocs-storage
# storage_type: ocs
# ocs_storage_label: ocs
# ocs_storage_size_gb: 500

# Validating:
# ---
Expand Down
10 changes: 5 additions & 5 deletions automation-generators/generic/cp4i/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
# - "{{ env_id }}-subnet-zone-2"
# - "{{ env_id }}-subnet-zone-3"
# openshift_storage:
# - storage_name: ocs-storage
# storage_type: ocs
# ocs_storage_label: ocs
# ocs_storage_size_gb: 500
# - storage_name: odf-storage
# storage_type: odf
# odf_storage_label: ocs
# odf_storage_size_gb: 500

# Validating:
# ---
Expand All @@ -32,7 +32,7 @@

# - project: cp4i
# openshift_cluster_name: "{{ env_id }}"
# openshift_storage_name: nfs-rook-ceph
# openshift_storage_name: odf-storage
# cp4i_version: 2021.4.1
# use_case_files: True
# accept_licenses: False
Expand Down
92 changes: 53 additions & 39 deletions automation-generators/ibm-cloud/openshift/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@
# - storage_name: nfs-storage
# storage_type: nfs
# nfs_server_name: sample-nfs
# - storage_name: ocs-storage
# storage_type: ocs
# ocs_storage_label: ocs
# ocs_storage_size_gb: 500
# - storage_name: odf-storage
# storage_type: odf
# odf_storage_label: ocs
# odf_storage_size_gb: 500

def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
g = GeneratorPreProcessor(attributes,fullConfig,moduleVariables)
Expand Down Expand Up @@ -127,45 +127,59 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
# Check openshift_storage atttributes
if len(ge['openshift_storage']) < 1:
g.appendError(msg='At least one openshift_storage element must be specified.')
for os in ge['openshift_storage']:
for i, os in enumerate(ge['openshift_storage']):
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified for all openshift_storage elements')
if "storage_type" not in os:
g.appendError(msg='storage_type must be specified for all openshift_storage elements')
if "storage_type" in os and os['storage_type'] not in ['nfs','ocs','pwx']:
g.appendError(msg='storage_type must be nfs, ocs or pwx')
if "storage_type" in os and os['storage_type']=='nfs':
nfs_server_names = []
if 'nfs_server' in fc:
nfs_server_names = fc.match('nfs_server[*].name')
if "nfs_server_name" not in os:
g.appendError(msg='nfs_server_name must be specified when storage_type is nfs')
elif os['nfs_server_name'] not in nfs_server_names:
g.appendError(msg="'"+ os['nfs_server_name'] + "' is not an existing nfs_server name (Found nfs_server: ["+ ','.join(nfs_server_names) +"] )")
if "storage_type" in os and os['storage_type']=='ocs':
if "ocs_storage_label" not in os:
g.appendError(msg='ocs_storage_label must be specified when storage_type is ocs')
if "ocs_storage_size_gb" not in os:
g.appendError(msg='ocs_storage_size_gb must be specified when storage_type is ocs')
g.appendError(msg='Storage type OCS was specified but there are not 3 subnets for the cluster. You must have 3 subnets for the OpenShift cluster to implement OCS.')
if "ocs_version" in os and version.parse(str(os['ocs_version'])) < version.parse("4.6"):
g.appendError(msg='ocs_version must be 4.6 or higher. If the OCS version is 4.10, specify ocs_version: "4.10"')

if "storage_type" in os and os['storage_type']=='pwx':
if "pwx_etcd_location" not in os:
g.appendError(msg='pwx_etcd_location must be specified when storage_type is pwx')
if "pwx_storage_size_gb" not in os:
g.appendError(msg='pwx_storage_size_gb must be specified when storage_type is pwx')
if "pwx_storage_iops" not in os:
g.appendError(msg='pwx_storage_iops must be specified when storage_type is pwx')
if "pwx_storage_profile" not in os:
g.appendError(msg='pwx_storage_profile must be specified when storage_type is pwx')
if "portworx_version" not in os:
g.appendError(msg='portworx_version must be specified when storage_type is pwx')
if "stork_version" not in os:
g.appendError(msg='stork_version must be specified when storage_type is pwx')
if len(ge['infrastructure']['subnets']) != 3:
g.appendError(msg='Storage type PWX was specified but there are not 3 subnets for the cluster. You must have 3 subnets for the OpenShift cluster to implement PWX.')
else:
if os['storage_type']=='ocs':
os.update([("storage_type", "odf")])
if os['storage_type'] not in ['nfs','odf','pwx']:
g.appendError(msg='storage_type must be nfs, odf or pwx')
if os['storage_type']=='nfs':
nfs_server_names = []
if 'nfs_server' in fc:
nfs_server_names = fc.match('nfs_server[*].name')
if "nfs_server_name" not in os:
g.appendError(msg='nfs_server_name must be specified when storage_type is nfs')
elif os['nfs_server_name'] not in nfs_server_names:
g.appendError(msg="'"+ os['nfs_server_name'] + "' is not an existing nfs_server name (Found nfs_server: ["+ ','.join(nfs_server_names) +"] )")

if os['storage_type']=='odf':
if "ocs_storage_label" in os:
os.update([("odf_storage_label", os['ocs_storage_label'])])
if "ocs_storage_size_gb" in os:
os.update([("odf_storage_size_gb", os['ocs_storage_size_gb'])])
if "ocs_version" in os:
os.update([("odf_version", os['ocs_version'])])
if "odf_storage_label" not in os:
g.appendError(msg='odf_storage_label must be specified when storage_type is odf')
if "odf_storage_size_gb" not in os:
g.appendError(msg='odf_storage_size_gb must be specified when storage_type is odf')
if "odf_version" in os and version.parse(str(os['odf_version'])) < version.parse("4.6"):
g.appendError(msg='odf_version must be 4.6 or higher. If the ODF version is 4.10, specify odf_version: "4.10"')

if os['storage_type']=='pwx':
if "pwx_etcd_location" not in os:
g.appendError(msg='pwx_etcd_location must be specified when storage_type is pwx')
if "pwx_storage_size_gb" not in os:
g.appendError(msg='pwx_storage_size_gb must be specified when storage_type is pwx')
if "pwx_storage_iops" not in os:
g.appendError(msg='pwx_storage_iops must be specified when storage_type is pwx')
if "pwx_storage_profile" not in os:
g.appendError(msg='pwx_storage_profile must be specified when storage_type is pwx')
if "portworx_version" not in os:
g.appendError(msg='portworx_version must be specified when storage_type is pwx')
if "stork_version" not in os:
g.appendError(msg='stork_version must be specified when storage_type is pwx')
if len(ge['infrastructure']['subnets']) != 3:
g.appendError(msg='Storage type PWX was specified but there are not 3 subnets for the cluster. You must have 3 subnets for the OpenShift cluster to implement PWX.')

# Ensure the openshift_storage attribute is updated
ge['openshift_storage'][i]=os
g.setExpandedAttributes(ge)

result = {
'attributes_updated': g.getExpandedAttributes(),
'errors': g.getErrors()
Expand Down
Loading

0 comments on commit 40a4073

Please sign in to comment.