-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathtink-helpers.source.sh
186 lines (170 loc) · 6.24 KB
/
tink-helpers.source.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
source ~/tinkerbell-sandbox/deploy/compose/.env
TINKERBELL_STATE_WEBROOT_PATH="$HOME/tinkerbell-sandbox/deploy/compose/state/webroot"
function tink {
# NB its unfortunate that this will output the following to stderr:
# Flag based client configuration failed with err: fetch cert: Get "http://127.0.0.1:42114/cert"
# but there is no universal workaround for it... we have to wait
# for an upstream fix.
# see https://github.com/tinkerbell/tink/issues/524
docker exec -i compose-tink-cli-1 tink "$@"
}
# recreate the given template and workflow.
# e.g. provision-workflow hello-world bios
function provision-workflow {
local template_name="$1"; shift
local hardware_hostname="$1"; shift || true
bash "/vagrant/templates/$template_name/provision.sh" \
&& bash "/vagrant/templates/$template_name/provision-workflow.sh" "$hardware_hostname" "$@"
}
function delete-template {
local template_name="$1"
tink template get --format json | jq -r --arg name "$template_name" '.data[] | select(.name==$name) | .id' | while read template_id; do
tink template delete "$template_id"
done
}
# TODO possible change get-hardware-workflows, get-hardware-mac and
# provision-workflow.sh depending on the outcome of
# https://github.com/tinkerbell/tink/issues/550
function get-hardware-workflows {
docker exec -i compose-db-1 psql -U tinkerbell -A -t <<EOF
select
w.workflow_id, h.hardware_id, h.hostname, h.mac
from
(
select
id as workflow_id,
devices->>'device_1' as mac
from
workflow
where
deleted_at is null
) as w
inner join
(
select
id as hardware_id,
jsonb_array_elements(data->'network'->'interfaces')->'dhcp'->>'mac' as mac,
(data->>'metadata')::jsonb->'instance'->>'hostname' as hostname
from
hardware
) as h
on
w.mac=h.mac
where
h.hostname='$1'
EOF
}
function get-hardware-mac {
local hardware_hostname="$1"
(docker exec -i compose-db-1 psql -U tinkerbell -A -t | awk -F '|' '{print $2}') <<EOF
select
id as hardware_id,
jsonb_array_elements(data->'network'->'interfaces')->'dhcp'->>'mac' as mac,
(data->>'metadata')::json->'instance'->>'hostname' as hostname
from
hardware
where
(data->>'metadata')::json->'instance'->>'hostname'='$1'
EOF
}
# delete the workflows associated with the hardware.
# NB we do not need to really delete the existing workflows. they are only
# applied once. but deleting them makes things easier to follow.
# NB workflows are not really deleted from the database, they are only
# marked as deleted.
function delete-hardware-workflows {
local hardware_hostname="$1"
get-hardware-workflows "$hardware_hostname" | awk -F '|' '{print $1}' | while read workflow_id; do
tink workflow delete "$workflow_id"
done
}
function watch-hardware-workflows {
local hardware_hostname="$1"
workflow_ids="$(get-hardware-workflows "$hardware_hostname" | awk -F '|' '{print $1}')"
[ -z "$workflow_ids" ] && echo "the $hardware_hostname hardware does not have any workflow" && return
watch "
echo \"$workflow_ids\" | while read workflow_id; do
docker exec -i compose-tink-cli-1 tink workflow state \$workflow_id
docker exec -i compose-tink-cli-1 tink workflow events \$workflow_id
done
"
}
function install-vagrant-box-raw-image {
local VAGRANT_BOX_IMAGE_PATH=/vagrant-boxes/$1/0/libvirt/box.img
local IMAGE_NAME=$2
local IMAGE_PATH="$TINKERBELL_STATE_WEBROOT_PATH/images/$IMAGE_NAME.raw.gz"
#local IMAGE_PATH="$TINKERBELL_STATE_WEBROOT_PATH/images/$IMAGE_NAME.raw.zs" # TODO see https://github.com/tinkerbell/hub/issues/65
if [ ! -f "$VAGRANT_BOX_IMAGE_PATH" ]; then
echo "WARNING: $VAGRANT_BOX_IMAGE_PATH does not exist. skipping creating the $RAW_IMAGE_NAME image."
exit 0
fi
# convert the vagrant box to a compressed raw image.
if [ ! -f "$IMAGE_PATH" ] || [ "$VAGRANT_BOX_IMAGE_PATH" -nt "$IMAGE_PATH" ]; then
local IMAGE_RAW_CACHE_PATH="/vagrant/tmp/$IMAGE_NAME.raw"
local IMAGE_CACHE_PATH="$IMAGE_RAW_CACHE_PATH.gz"
#local IMAGE_CACHE_PATH="$IMAGE_RAW_CACHE_PATH.zs"
qemu-img convert -W -O raw "$VAGRANT_BOX_IMAGE_PATH" "$IMAGE_RAW_CACHE_PATH"
pigz --stdout "$IMAGE_RAW_CACHE_PATH">"$IMAGE_CACHE_PATH.tmp"
#zstd -T0 -o "$IMAGE_CACHE_PATH.tmp" "$IMAGE_RAW_CACHE_PATH"
mv "$IMAGE_CACHE_PATH.tmp" "$IMAGE_CACHE_PATH"
rm "$IMAGE_RAW_CACHE_PATH"
install -d "$(dirname "$IMAGE_PATH")"
cp "$IMAGE_CACHE_PATH" "$IMAGE_PATH"
du -h $IMAGE_PATH
fi
}
function install-vagrant-box-clonezilla-image {
local VAGRANT_BOX_IMAGE_PATH=/vagrant-boxes/$1/0/libvirt/box.img
local CLONEZILLA_IMAGE_NAME=$2
local CLONEZILLA_IMAGE_PATH=/vagrant/tmp/$CLONEZILLA_IMAGE_NAME
if [ ! -f "$VAGRANT_BOX_IMAGE_PATH" ]; then
echo "WARNING: $VAGRANT_BOX_IMAGE_PATH does not exist. skipping creating the $CLONEZILLA_IMAGE_NAME image."
exit 0
fi
# convert the vagrant box to a clonezilla image.
if [ ! -f "$CLONEZILLA_IMAGE_PATH/SHA1SUMS" ] || [ "$VAGRANT_BOX_IMAGE_PATH" -nt "$CLONEZILLA_IMAGE_PATH/SHA1SUMS" ]; then
qemu-img info $VAGRANT_BOX_IMAGE_PATH
qemu-nbd --read-only --connect /dev/nbd0 $VAGRANT_BOX_IMAGE_PATH
parted --script /dev/nbd0 print
rm -rf $CLONEZILLA_IMAGE_PATH
ocs-sr \
--batch \
--nogui \
--ocsroot /vagrant/tmp \
--use-partclone \
--clone-hidden-data \
--pzstd-compress \
--skip-check-restorable \
--gen-sha1sum \
savedisk \
$CLONEZILLA_IMAGE_NAME \
nbd0
qemu-nbd --disconnect /dev/nbd0
du -h $CLONEZILLA_IMAGE_PATH
fi
# you can restore the image with:
# qemu-img create -f qcow2 $CLONEZILLA_IMAGE_PATH-test.qcow2 60G
# qemu-img info $CLONEZILLA_IMAGE_PATH-test.qcow2
# qemu-nbd --connect /dev/nbd1 $CLONEZILLA_IMAGE_PATH-test.qcow2
# ocs-sr \
# --batch \
# --nogui \
# --ocsroot /vagrant/tmp \
# --skip-check-restorable-r \
# --check-sha1sum \
# restoredisk \
# $CLONEZILLA_IMAGE_NAME \
# nbd1
# parted --script /dev/nbd1 print
# qemu-nbd --disconnect /dev/nbd1
# copy the clonezilla image to the tinkerbell webroot.
install -d "$TINKERBELL_STATE_WEBROOT_PATH/images"
rsync \
--archive \
--no-owner \
--no-group \
--chmod Du=rwx,Dg=rx,Do=rx,Fu=rw,Fg=r,Fo=r \
--delete \
$CLONEZILLA_IMAGE_PATH \
"$TINKERBELL_STATE_WEBROOT_PATH/images"
}