-
Notifications
You must be signed in to change notification settings - Fork 41
404 lines (377 loc) · 15.5 KB
/
master_cli.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
# This workflow is a reusable one called by other workflows
name: (template) Elemental E2E CLI tests
on:
workflow_call:
# Variables to set when calling this reusable workflow
inputs:
backup_restore_version:
type: string
ca_type:
default: selfsigned
type: string
cert-manager_version:
type: string
cluster_name:
required: true
type: string
cluster_number:
type: string
cluster_type:
type: string
cypress_tags:
default: main
type: string
destroy_runner:
default: true
type: boolean
iso_boot:
default: false
type: boolean
k8s_version_to_provision:
required: true
type: string
node_number:
default: 5
type: string
operator_repo:
type: string
default: oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher
operator_upgrade:
type: string
os_to_test:
type: string
default: dev
rancher_version:
default: stable/latest/none
type: string
rancher_upgrade:
type: string
reset:
default: false
type: boolean
runner_template:
default: elemental-e2e-ci-runner-spot-x86-64-template-n2-standard-16-v5
type: string
sequential:
default: false
type: boolean
type: string
test_type:
default: single_cli
type: string
upgrade_image:
type: string
upgrade_os_channel:
type: string
upgrade_type:
type: string
upstream_cluster_version:
default: v1.26.10+k3s2
type: string
zone:
default: us-central1-a
type: string
jobs:
cli:
runs-on: ${{ inputs.uuid }}
outputs:
# For this to work 'id:' in steps are mandatory!
steps_status: ${{ join(steps.*.conclusion, ' ') }}
env:
ARCH: amd64
CERT_MANAGER_VERSION: ${{ inputs.cert-manager_version }}
CLUSTER_NAME: ${{ inputs.cluster_name }}
CLUSTER_NS: fleet-default
CLUSTER_TYPE: ${{ inputs.cluster_type }}
# K3S / RKE2 flags to use for installation
INSTALL_K3S_SKIP_ENABLE: true
INSTALL_K3S_VERSION: ${{ inputs.upstream_cluster_version }}
INSTALL_RKE2_VERSION: ${{ inputs.upstream_cluster_version }}
K3S_KUBECONFIG_MODE: 0644
# Distribution to use to host Rancher Manager (K3s or RKE2)
K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }}
# For K8s cluster to provision with Rancher Manager
K8S_VERSION_TO_PROVISION: ${{ inputs.k8s_version_to_provision }}
# For Rancher Manager
RANCHER_VERSION: ${{ inputs.rancher_version }}
TEST_TYPE: ${{ inputs.test_type }}
TIMEOUT_SCALE: 3
steps:
- name: Checkout
id: checkout
uses: actions/checkout@v4
- name: Install Go
id: install_go
uses: actions/setup-go@v5
with:
cache-dependency-path: tests/go.sum
go-version-file: tests/go.mod
- name: Define needed system variables
id: define_sys_vars
run: |
# Add missing PATH, removed in recent distributions for security reasons...
echo "/usr/local/bin" >> ${GITHUB_PATH}
- name: Install Rancher+Elemental components
id: install_rancher_elemental
env:
CA_TYPE: ${{ inputs.ca_type }}
OPERATOR_REPO: ${{ inputs.operator_repo }}
PROXY: ${{ inputs.proxy }}
PUBLIC_DNS: ${{ needs.create-runner.outputs.public_dns }}
PUBLIC_DOMAIN: bc.googleusercontent.com
run: cd tests && make e2e-install-rancher
- name: Workaround for DynamicSchemas (if needed)
run: |
# Check if DynamicSchemas for MachineInventorySelectorTemplate exists
if ! kubectl get dynamicschema machineinventoryselectortemplate >/dev/null 2>&1; then
# If not we have to add it to avoid weird issues!
echo "WORKAROUND: DynamicSchemas for MachineInventorySelectorTemplate is missing!"
kubectl apply -f tests/assets/add_missing_dynamicschemas.yaml
fi
- name: Install backup-restore components (K3s only for now)
id: install_backup_restore
if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }}
run: cd tests && make e2e-install-backup-restore
- name: Extract component versions/informations
id: component
run: |
# Extract rancher-backup-operator version
BACKUP_RESTORE_VERSION=$(kubectl get pod \
--namespace cattle-resources-system \
-l app.kubernetes.io/name=rancher-backup \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract CertManager version
CERT_MANAGER_VERSION=$(kubectl get pod \
--namespace cert-manager \
-l app=cert-manager \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract elemental-operator version
OPERATOR_VERSION=$(kubectl get pod \
--namespace cattle-elemental-system \
-l app=elemental-operator \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Extract Rancher Manager version
RM_VERSION=$(kubectl get pod \
--namespace cattle-system \
-l app=rancher \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "backup_restore_version=${BACKUP_RESTORE_VERSION}" >> ${GITHUB_OUTPUT}
echo "cert_manager_version=${CERT_MANAGER_VERSION}" >> ${GITHUB_OUTPUT}
echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
echo "rm_version=${RM_VERSION}" >> ${GITHUB_OUTPUT}
- name: Configure Rancher & Libvirt
id: configure_rancher
if: ${{ inputs.test_type == 'single_cli' }}
run: cd tests && make e2e-configure-rancher
- name: Create ISO image for master pool
id: create_iso_master
if: ${{ inputs.test_type == 'single_cli' }}
env:
EMULATE_TPM: true
OS_TO_TEST: ${{ inputs.os_to_test }}
POOL: master
run: |
# Only use ISO boot if the upstream cluster is RKE2
# due to issue with pxe, dhcp traffic
if ${{ contains(inputs.upstream_cluster_version, 'rke') }}; then
export ISO_BOOT=true
fi
cd tests && make e2e-iso-image
- name: Extract iPXE artifacts from ISO
id: extract_ipxe_artifacts
if: ${{ inputs.test_type == 'single_cli' && inputs.iso_boot == false }}
run: cd tests && make extract_kernel_init_squash && make ipxe
- name: Bootstrap node 1, 2 and 3 in pool "master" (use Emulated TPM if possible)
id: bootstrap_master_nodes
if: ${{ inputs.test_type == 'single_cli' }}
env:
EMULATE_TPM: true
POOL: master
VM_START: 1
VM_END: 3
run: |
# Only use ISO boot if the upstream cluster is RKE2
# due to issue with pxe, dhcp traffic
# Set RAM to 10GB for RKE2 and vCPU to 6, a bit more than the recommended values
if ${{ contains(inputs.upstream_cluster_version, 'rke') }}; then
export ISO_BOOT=true
export VM_MEM=10240
export VM_CPU=6
fi
# Execute bootstrapping test
if ${{ inputs.sequential == true }}; then
# Force node bootstrapping in sequential instead of parallel
cd tests
for ((i = VM_START ; i <= VM_END ; i++)); do
VM_INDEX=${i} make e2e-bootstrap-node
done
else
cd tests && VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} make e2e-bootstrap-node
fi
- name: Deploy multiple clusters (with 3 nodes by cluster)
id: deploy_multi_clusters
if: inputs.test_type == 'multi_cli'
env:
CLUSTER_NUMBER: ${{ inputs.cluster_number }}
ISO_BOOT: ${{ inputs.iso_boot }}
run: |
# Set RAM to 10GB for RKE2 and vCPU to 6, a bit more than the recommended values
if ${{ contains(inputs.upstream_cluster_version, 'rke') }}; then
export VM_MEM=10240
export VM_CPU=6
fi
cd tests && make e2e-multi-cluster
- name: Install a simple application
id: install_simple_app
if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }}
run: cd tests && make e2e-install-app && make e2e-check-app
- name: Reset a node in the cluster
id: reset_node
if: ${{ inputs.test_type == 'single_cli' && inputs.reset == true }}
run: cd tests && make e2e-reset
- name: Check app after reset
id: check_app
if: ${{ inputs.test_type == 'single_cli' && inputs.reset == true && contains(inputs.upstream_cluster_version, 'k3s') }}
run: cd tests && make e2e-check-app
- name: Upgrade Elemental Operator
id: operator_upgrade
if: ${{ inputs.test_type == 'single_cli' && inputs.operator_upgrade != '' }}
env:
OPERATOR_UPGRADE: ${{ inputs.operator_upgrade }}
run: |
cd tests && make e2e-upgrade-operator
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
# Extract elemental-operator version
OPERATOR_VERSION=$(kubectl get pod \
--namespace cattle-elemental-system \
-l app=elemental-operator \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "operator_upgrade=${OPERATOR_UPGRADE}" >> ${GITHUB_OUTPUT}
echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
- name: Upgrade Rancher Manager
id: rancher_upgrade
if: ${{ inputs.test_type == 'single_cli' && inputs.rancher_upgrade != '' }}
env:
CA_TYPE: ${{ inputs.ca_type }}
PROXY: ${{ inputs.proxy }}
PUBLIC_DNS: ${{ needs.create-runner.outputs.public_dns }}
PUBLIC_DOMAIN: bc.googleusercontent.com
RANCHER_UPGRADE: ${{ inputs.rancher_upgrade }}
run: |
cd tests && make e2e-upgrade-rancher-manager
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
# Extract Rancher Manager version
RM_VERSION=$(kubectl get pod \
--namespace cattle-system \
-l app=rancher \
-o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
# Export values
echo "rm_version=${RM_VERSION}" >> ${GITHUB_OUTPUT}
- name: Upgrade node 1 to specified OS version with osImage
id: upgrade_node_1
if: ${{ inputs.test_type == 'single_cli' && inputs.upgrade_image != '' }}
env:
UPGRADE_IMAGE: ${{ inputs.upgrade_image }}
UPGRADE_TYPE: osImage
VM_INDEX: 1
run: |
cd tests && make e2e-upgrade-node
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
- name: Upgrade other nodes to specified OS version with managedOSVersionName
id: upgrade_other_nodes
if: ${{ inputs.test_type == 'single_cli' && inputs.upgrade_os_channel != '' }}
env:
UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }}
UPGRADE_TYPE: managedOSVersionName
VM_INDEX: 2
VM_NUMBERS: 3
run: |
cd tests && make e2e-upgrade-node
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
- name: Test Backup/Restore Elemental resources with Rancher Manager
id: test_backup_restore
if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }}
env:
BACKUP_RESTORE_VERSION: ${{ inputs.backup_restore_version }}
run: |
cd tests && make e2e-backup-restore
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
- name: Extract ISO version
id: iso_version
if: ${{ always() }}
run: |
# Extract OS version from ISO
ISO=$(file -Ls *.iso 2>/dev/null | awk -F':' '/boot sector/ { print $1 }')
if [[ -n "${ISO}" ]]; then
INITRD_FILE=$(isoinfo -i ${ISO} -R -find -type f -name initrd -print 2>/dev/null)
isoinfo -i ${ISO} -R -x ${INITRD_FILE} 2>/dev/null \
| xz -dc \
| cpio -i --to-stdout usr/lib/initrd-release > os-release
eval $(grep IMAGE_TAG os-release 2>/dev/null)
fi
# Export value (even if empty!)
echo "image_tag=${IMAGE_TAG}" >> ${GITHUB_OUTPUT}
- name: Remove old built ISO image
id: clean_master_iso
# Only one at a time is allowed, the new one will be created after if needed
if: ${{ inputs.test_type == 'single_cli' }}
run: rm -f *.iso
- name: Create ISO image for worker pool
if: ${{ inputs.test_type == 'single_cli' }}
env:
ISO_BOOT: true
OS_TO_TEST: ${{ inputs.os_to_test }}
POOL: worker
run: cd tests && make e2e-iso-image
- name: Bootstrap additional nodes in pool "worker" (total of ${{ inputs.node_number }})
id: bootstrap_worker_nodes
if: ${{ inputs.test_type == 'single_cli' && inputs.node_number > 3 }}
env:
ISO_BOOT: true
POOL: worker
VM_START: 4
VM_END: ${{ inputs.node_number }}
run: |
# Set RAM to 10GB for RKE2 and vCPU to 6, a bit more than the recommended values
if ${{ contains(inputs.upstream_cluster_version, 'rke') }}; then
export VM_MEM=10240
export VM_CPU=6
fi
if ${{ inputs.sequential == true }}; then
# Force node bootstrapping in sequential instead of parallel
cd tests
for ((i = VM_START ; i <= VM_END ; i++)); do
VM_INDEX=${i} make e2e-bootstrap-node
done
else
cd tests && VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} make e2e-bootstrap-node
fi
# Check the installed application
if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then
make e2e-check-app
fi
- name: Uninstall Elemental Operator
id: uninstall_elemental_operator
env:
OPERATOR_REPO: ${{ inputs.operator_repo }}
# Don't test Operator uninstall if we want to keep the runner for debugging purposes
if: ${{ inputs.destroy_runner == true && inputs.test_type == 'single_cli' }}
run: cd tests && make e2e-uninstall-operator
# This step must be called in each worklow that wants a summary!
- name: Get logs and add summary
id: logs_summary
uses: ./.github/actions/logs-and-summary
with:
k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }}