Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PR - Custom remote transport port for gobgp #369

Merged
merged 4 commits into from
Jul 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion api/models/b_g_p_neigh.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 10 additions & 2 deletions api/restapi/embedded_spec.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions api/restapi/handler/gobgp.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ func ConfigPostBGPNeigh(params operations.PostConfigBgpNeighParams) middleware.R
// Remote AS
bgpNeighMod.RemoteAS = uint32(params.Attr.RemoteAs)

// Remote Port
bgpNeighMod.RemotePort = uint16(params.Attr.RemotePort)

tk.LogIt(tk.LogDebug, "[API] GoBGP neighAdd : %v\n", bgpNeighMod)
_, err := ApiHooks.NetGoBGPNeighAdd(&bgpNeighMod)
if err != nil {
Expand Down
5 changes: 4 additions & 1 deletion api/swagger.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3090,10 +3090,13 @@ definitions:
properties:
ipAddress:
type: string
description: BGP Nieghbor IP address
description: BGP Neighbor IP address
remoteAs:
type: integer
description: Remote AS number
remotePort:
type: integer
description: Remote Connect Port (default 179)

BGPGlobalConfig:
type: object
Expand Down
81 changes: 81 additions & 0 deletions cicd/k8s-calico-incluster/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

require "yaml"
settings = YAML.load_file "yaml/settings.yaml"

workers = settings["nodes"]["workers"]["count"]

bname = ("sysnet4admin/Ubuntu-k8s")
bversion = "0.7.1"

Vagrant.configure("2") do |config|

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end

config.vm.box = "#{bname}"
config.vm.box_version = "#{bversion}"

config.vm.define "host" do |host|
host.vm.hostname = 'host1'
#loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp"
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "node_scripts/host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 1]
end
end

#config.vm.box = settings["software"]["cluster"]["box"]
config.vm.define "master" do |master|
master.vm.hostname = 'master'
master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0"
master.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
master.vm.provision "shell",
env: {
"CALICO_VERSION" => settings["software"]["calico"],
"CONTROL_IP" => settings["network"]["control_ip"],
"POD_CIDR" => settings["network"]["pod_cidr"],
"SERVICE_CIDR" => settings["network"]["service_cidr"]
},
path: "node_scripts/master.sh"

master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end

(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 100
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision "shell",
env: {
"DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
"ENVIRONMENT" => settings["environment"],
"KUBERNETES_VERSION" => settings["software"]["kubernetes"],
"OS" => settings["software"]["os"]
},
path: "node_scripts/common.sh"
worker.vm.provision "shell", path: "node_scripts/worker.sh"

worker.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end
end
end
36 changes: 36 additions & 0 deletions cicd/k8s-calico-incluster/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash
VMs=$(vagrant global-status | grep -i virtualbox)
while IFS= read -a VMs; do
read -a vm <<< "$VMs"
cd ${vm[4]} 2>&1>/dev/null
echo "Destroying ${vm[1]}"
vagrant destroy -f ${vm[1]}
cd - 2>&1>/dev/null
done <<< "$VMs"

vagrant up

for((i=1; i<=60; i++))
do
fin=1
pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE")

while IFS= read -a pods; do
read -a pod <<< "$pods"
if [[ ${pod[3]} != *"Running"* ]]; then
echo "${pod[1]} is not UP yet"
fin=0
fi
done <<< "$pods"
if [ $fin == 1 ];
then
break;
fi
echo "Will try after 10s"
sleep 10
done

#Create fullnat Service
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null
vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null
19 changes: 19 additions & 0 deletions cicd/k8s-calico-incluster/configs/config
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1EY3pNREV6TURVek1sb1hEVE16TURjeU56RXpNRFV6TWxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVNyClltV0M3Q0lSYlZFZy92M0FZUk5rRTZnUk5CQ2k3MThCMitHbUllVkQ4c2d5aXoxdWprdDZnbDcwQXhIRDkwSlUKcnVzSnFTc2ZvdDZ3YWJodU5MR3pMdy9ZK0xwZlRNMG5pRmorM3NlVlZiTExQOWxlRUx0Y2R5MnNIWDRQSU5KNApHcmNWM0lETjYrNGZOUWZkT1pjcGtIMjVkMmFKa01sM1YrdTFUbExTK0VSckRhQnNpOTJESXFkb0wxdlhwbm8xCjh6TnpYV2J3M1EyQ1dldWlOaW11eHNIWDM0MlpzRnNJY2FwYWhqa0MxVFZCbkNVOVowSXJSR2pVaW4rbkwvRVcKQUp2SHhCVEVMWkFmd1VkcG10ODBIcGFGVDNZMlcxYW1VWmR2b2w1V0RUaE83T3R4eGpUeTVrSXAwVlhIL1Q2WApRalRLb0RIUERsUWVNc01aQ1BjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFWFMrWUs1ampsOWNSc3hPQW9qNktMWTRkVGpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2h1MFhnZ1d1MkdqZStId3BTQwpSaE5kbUVwWnVPSjcyVlhrUGlsTUxJeHZ6Rys1NG1PbWZPOFNjRTJqaitTVG90VXk2N1hvMklMU1loNHpydFRXCitPcnFYMmJqaC9rZTRhOU5MSTJORjlmc1Z6ZS9YVElRQ0Uwbnlkb21lOC91b0s2b29DRGc1bm5FM1Y0NXFiRE0KdVJ4VGU5dUkvV1J1ZHVuSW95MXNPTHh2My9yZE1DeVZvRkljdm9ZazlES2NBU1F5Z09CRE1uaEc4RHBrZE44Ngo5eW01SDdYMVBvNkZVVCt0TCtKOHlmRFRhc0VXRDhRODRuVmRVckE3TGdtNnZYbmFGeTNCQ3dJUXZGRjNhbTlPCnZ3ZzJ5bzdPZ1RmdU9YUWpjVHZNWmpmNUp4OGlKQXc4WkN1aGkxVlpjQitpWnNDb2I1cUxHdENnbWxNMlNpTmMKaTVnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://192.168.80.10:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJWVF5Tkszb3lBa2N3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBM016QXhNekExTXpKYUZ3MHlOREEzTWpreE16QTFNelZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXJ2QXR6OHhxd1RBYUxjWk4KRmI4R1A3VlVranlESFRRRy95R3Q5WXVleUNIeUE5RG9pRFF6dkRnSStwSlFqMmx3QXhUVjR5N1k2U1VZM1BiTgpKd01Kd2F3VG1HZUowVmpuWThpbFF4RHAxdk5sM0k0bGc0VVFDanZUb0k0Y2doejM3Wk1yMVB3MmRVeHBwUGkxCjVHSjA0bTVVbUJPZWJrc1dOOWRpWk5FYmYxUWRHaENwZHFyRHAwMWRqNER2MFZFbEhsdDBzT0FmYkdvS2EreDEKTHlwckVvamJWQkE2NGVRRVFRWGJCcXlGZHpweTdPbWJCSG1vVnhVRXBFQ0dFb2JzRVV5eFFoRysxRmxnd01ZYQpzTkRtQnRDcW42SzVoMUMzV20wYzRmdElEM2pwYmUybzhLbVQrekdLYWJCYmJUY1kxZWJrRjBHWHcwcXY2WWNjCmIybEVtd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSRjB2bUN1WTQ1ZlhFYk1UZ0tJK2lpMk9IVQo0ekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBYVk3WllSVTJtT3JvcExGVGRuUjc5Q2ovQlI1UW4rTHU2cVErCkQ5ZVJ2WmxGQjB1TFl6OFNicWpwR1lCTmVvWU55bXlsd1NKZnl0V3p5WVpDQXRhQmNZZUNqbld2elhVQi82YVgKeTduaGJFVWRUUkVRRXZEc2M4eUxOVkx1OThEcjd1OUVIUWxZWm9NM2pIZFF6dFlQNW00M1JHVWxJTW1jN05NZgpsSk1tK1RvTmZkUHo0VlpqNmJjQ3VYbmtGdnZPc0VsUXNMViswZHVHQkpDM2JFZGNmR01najh6Qm1ML3QvWXIzCitMYWNpeFpQeVVCRjdKVzBNOUp0dFpzQ2hXbWZraHBHYm5qRElncXNnK1lzRldvempBMkMxcS9hUyttdUd2YjkKZ2JkVTZvOXA5alZmR0tEbFVDa2JYbDVId01YS09PZ0RQV3pVWFp0UEdTUVJpcjE0Ync9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcnZBdHo4eHF3VEFhTGNaTkZiOEdQN1ZVa2p5REhUUUcveUd0OVl1ZXlDSHlBOURvCmlEUXp2RGdJK3BKUWoybHdBeFRWNHk3WTZTVVkzUGJOSndNSndhd1RtR2VKMFZqblk4aWxReERwMXZObDNJNGwKZzRVUUNqdlRvSTRjZ2h6MzdaTXIxUHcyZFV4cHBQaTE1R0owNG01VW1CT2Via3NXTjlkaVpORWJmMVFkR2hDcApkcXJEcDAxZGo0RHYwVkVsSGx0MHNPQWZiR29LYSt4MUx5cHJFb2piVkJBNjRlUUVRUVhiQnF5RmR6cHk3T21iCkJIbW9WeFVFcEVDR0VvYnNFVXl4UWhHKzFGbGd3TVlhc05EbUJ0Q3FuNks1aDFDM1dtMGM0ZnRJRDNqcGJlMm8KOEttVCt6R0thYkJiYlRjWTFlYmtGMEdYdzBxdjZZY2NiMmxFbXdJREFRQUJBb0lCQUN1M3hoc2FJTXVxczhBZwp3SDdnd0RVSG9kengxbXBqNkNPMlRQMENLV29tWVk3bWxGWUZoYkJSNkp5R0dDL2V6NmxWZWFaT3ZOSjIvT0dyCm85Vk9BeEF0YXJBNW44MTdoRWdCaXB0YURMWTFHWTJtMEdVdnliUmxBeHdxcDZFMGtCa0ZJSDBYa3B4NXZpVUcKS3A2cXBEODZCMVlDQVNQYkMvQmttU2hNd2F4dDlNMkYzeVZNRExnN2RpYXlZZUx1MHhtNXd4VXVwUmVkU1hYdgpPcHppWE5tdGZGR01QUkRVWXdNUGoycUNzNlZUdHErQlhoOUVWQVU3OGlkOU50bU5KQ2M5Zk1MLzUzekg3OVlhCnJjb2VXZFRMNlNRYVB6YUlSWEx6Mm90VG5nVHJ2RnlNY2lrTWdVVVZ5M3ZndlpySUFRd3J4elQ5TEJXYWhVRkwKMFVRd0gzRUNnWUVBNUNXTC9jRGxaTGxGYUp5ZTFMNnhZK1ZOT2lnQStBVHVQWDdtczdjV2t0Slk4YjErQ3IzcwpUYTRmTmlpYUM1Zk9RT0RkcmhNdS9GUzBCcHVjRk44OVVCL2xaZSsrbStJY0tpVDRZM0lHTmYyKzBDT3Z0ZGFmCkkrZ2lIaW5JTnV2T3Fkek83TW5WUEc0Q2NubUJHU3NybnovTnI1TFJnREF1SWh6NEhhUGxTdFVDZ1lFQXhFdXEKSkl4c3RvMGVKWDdORnNUMW9TUFl5a3FTWnh4clB4TFdpdHFCSzQvV0NTMW1HWTJUemRkS3hGaTlnVWdBaitmNApWSmg0aXlrTXdKVWtJOUQ0YllPR2JqdW1XTXlMKzRZWm5zbFBIS2FwcVBkQkJiM0UzVlJTK1hyOHJxaEhxVEhpCms2ME9RN1Qya0Z6SWlySy9teWlMb2J1YnYxKzlVVytoL2xOekthOENnWUJhalh5Tzd5MGRXVnZ2TlpybEhmc1MKaDBTcnZJMEY1QThiWVc3NERjZHI1d2xlaWJPcFY5Q2UxR21XK1c2TEEybmQzbUtlWVFiWktGVjcrZTl0YVYzUQptNWhWYVY3aVNGQ2RlYWNNOFlqOWpRVmJYNDZ5UWNsUVd5YVBpazNwWHBiY1hNUFV3QmRlc050UHpHSXROekZOCk4rblBzaHB0SXJKczM4cXJHUTQ5TVFLQmdRQ0hYVTVsaWRqbVFvYUpnTm5aVzlXdlc5TUNIVTY4Z0dLTXltYmMKdGpYaFhuMVJNdGQzdzZRcmpNM29mUEdpRjQ4YnJmSVlGRlQ4VWtDVEJjWTRWTUVjZEZqZDU1Q2RKK0ZZZ0c5bQppcGhkdjZpNzlsWUdxWWo2d0UyLzhVb1MvOFQ3TG9WN0pSbnpJdlh0TTY2dnh2aE8vVFRkUVV6ME9nZUtBeHVKCkVPOFh6UUtCZ0FnOUpTTFBrRDV6NzdtcWN3Tk9pa1VBa1RUek52ZWxsRW9yK2RVWHRyVUlKdmtKT2xBbmZJekMKMlpRM3p4YzRpTVd1a3hHc2Z2VzFIZngxOUdBNzBVWXZkNFVab09mYjVRYWtBaGh2WUh6VEdlNnZ1VXBoZS9KTQo5QXdwQ3YzcEg5TW1VWk5wbzlhcWhQTGNnUzd5Uy9Xc1pVbFlpUzNrRUtYOUhOcUtiMHVsCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
1 change: 1 addition & 0 deletions cicd/k8s-calico-incluster/configs/join.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
kubeadm join 192.168.80.10:6443 --token wxki6c.cifh2d82k592rpwf --discovery-token-ca-cert-hash sha256:f581308b2a8fb3647d7e1297d2dac741529bb84c711d3ae9193ab4574fcb3aae
83 changes: 83 additions & 0 deletions cicd/k8s-calico-incluster/node_scripts/common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/bin/bash
#
# Common setup for all servers (Control Plane and Nodes)

set -euxo pipefail

# Variable Declaration

# DNS Setting
if [ ! -d /etc/systemd/resolved.conf.d ]; then
sudo mkdir /etc/systemd/resolved.conf.d/
fi
cat <<EOF | sudo tee /etc/systemd/resolved.conf.d/dns_servers.conf
[Resolve]
DNS=${DNS_SERVERS}
EOF

sudo systemctl restart systemd-resolved

# disable swap
sudo swapoff -a

# keeps the swap off during reboot
(crontab -l 2>/dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true
sudo apt-get update -y
# Install CRI-O Runtime

VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')"

# Create the .conf file to load the modules at bootup
cat <<EOF | sudo tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# Set up required sysctl params, these persist across reboots.
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sudo sysctl --system

cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /
EOF
cat <<EOF | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list
deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /
EOF

curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/libcontainers.gpg add -

sudo apt-get update
sudo apt-get install cri-o cri-o-runc -y

cat >> /etc/default/crio << EOF
${ENVIRONMENT}
EOF
sudo systemctl daemon-reload
sudo systemctl enable crio --now

echo "CRI runtime installed successfully"

sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update -y
sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION"
sudo apt-get update -y
sudo apt-get install -y jq

local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')"
cat > /etc/default/kubelet << EOF
KUBELET_EXTRA_ARGS=--node-ip=$local_ip
${ENVIRONMENT}
EOF
5 changes: 5 additions & 0 deletions cicd/k8s-calico-incluster/node_scripts/host.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
sudo su
echo "123.123.123.1 k8s-svc" >> /etc/hosts
ifconfig eth2 mtu 1450
ip route add 123.123.123.0/24 via 192.168.90.10
echo "Host is up"
13 changes: 13 additions & 0 deletions cicd/k8s-calico-incluster/node_scripts/loxilb.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/')

apt-get update
apt-get install -y software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y docker-ce
docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest
echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc
echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc

echo $LOXILB_IP > /vagrant/loxilb-ip
57 changes: 57 additions & 0 deletions cicd/k8s-calico-incluster/node_scripts/master.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/bin/bash
#
# Setup for Control Plane (Master) servers

set -euxo pipefail

NODENAME=$(hostname -s)

sudo kubeadm config images pull

echo "Preflight Check Passed: Downloaded All Required Images"

sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap

mkdir -p "$HOME"/.kube
sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config
sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config

# Save Configs to shared /Vagrant location

# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration.

config_path="/vagrant/configs"

if [ -d $config_path ]; then
rm -f $config_path/*
else
mkdir -p $config_path
fi

cp -i /etc/kubernetes/admin.conf $config_path/config
touch $config_path/join.sh
chmod +x $config_path/join.sh

kubeadm token create --print-join-command > $config_path/join.sh

# Install Calico Network Plugin

curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O

kubectl apply -f calico.yaml

sudo -i -u vagrant bash << EOF
whoami
mkdir -p /home/vagrant/.kube
sudo cp -i $config_path/config /home/vagrant/.kube/
sudo chown 1000:1000 /home/vagrant/.kube/config
EOF

# Install Metrics Server

kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml

# Install loxilb
kubectl apply -f /vagrant/yaml/loxilb.yml
kubectl apply -f /vagrant/yaml/loxilb-peer.yml
kubectl apply -f /vagrant/yaml/kube-loxilb.yml
18 changes: 18 additions & 0 deletions cicd/k8s-calico-incluster/node_scripts/worker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash
#
# Setup for Node servers

set -euxo pipefail

config_path="/vagrant/configs"

/bin/bash $config_path/join.sh -v

sudo -i -u vagrant bash << EOF
whoami
mkdir -p /home/vagrant/.kube
sudo cp -i $config_path/config /home/vagrant/.kube/
sudo chown 1000:1000 /home/vagrant/.kube/config
NODENAME=$(hostname -s)
kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker
EOF
Loading