diff --git a/api/models/b_g_p_neigh.go b/api/models/b_g_p_neigh.go index 75c53cb69..2671eab50 100644 --- a/api/models/b_g_p_neigh.go +++ b/api/models/b_g_p_neigh.go @@ -17,11 +17,14 @@ import ( // swagger:model BGPNeigh type BGPNeigh struct { - // BGP Nieghbor IP address + // BGP Neighbor IP address IPAddress string `json:"ipAddress,omitempty"` // Remote AS number RemoteAs int64 `json:"remoteAs,omitempty"` + + // Remote Connect Port (default 179) + RemotePort int64 `json:"remotePort,omitempty"` } // Validate validates this b g p neigh diff --git a/api/restapi/embedded_spec.go b/api/restapi/embedded_spec.go index 210244bcf..625e3730c 100644 --- a/api/restapi/embedded_spec.go +++ b/api/restapi/embedded_spec.go @@ -3254,12 +3254,16 @@ func init() { "type": "object", "properties": { "ipAddress": { - "description": "BGP Nieghbor IP address", + "description": "BGP Neighbor IP address", "type": "string" }, "remoteAs": { "description": "Remote AS number", "type": "integer" + }, + "remotePort": { + "description": "Remote Connect Port (default 179)", + "type": "integer" } } }, @@ -7621,12 +7625,16 @@ func init() { "type": "object", "properties": { "ipAddress": { - "description": "BGP Nieghbor IP address", + "description": "BGP Neighbor IP address", "type": "string" }, "remoteAs": { "description": "Remote AS number", "type": "integer" + }, + "remotePort": { + "description": "Remote Connect Port (default 179)", + "type": "integer" } } }, diff --git a/api/restapi/handler/gobgp.go b/api/restapi/handler/gobgp.go index 1829e762f..b1b4d2040 100644 --- a/api/restapi/handler/gobgp.go +++ b/api/restapi/handler/gobgp.go @@ -33,6 +33,9 @@ func ConfigPostBGPNeigh(params operations.PostConfigBgpNeighParams) middleware.R // Remote AS bgpNeighMod.RemoteAS = uint32(params.Attr.RemoteAs) + // Remote Port + bgpNeighMod.RemotePort = uint16(params.Attr.RemotePort) + tk.LogIt(tk.LogDebug, "[API] GoBGP neighAdd : %v\n", bgpNeighMod) _, err := ApiHooks.NetGoBGPNeighAdd(&bgpNeighMod) if err != nil { diff --git a/api/swagger.yml b/api/swagger.yml index c774f2e9a..54ed9e0e6 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -3090,10 +3090,13 @@ definitions: properties: ipAddress: type: string - description: BGP Nieghbor IP address + description: BGP Neighbor IP address remoteAs: type: integer description: Remote AS number + remotePort: + type: integer + description: Remote Connect Port (default 179) BGPGlobalConfig: type: object diff --git a/cicd/k8s-calico-incluster/Vagrantfile b/cicd/k8s-calico-incluster/Vagrantfile new file mode 100644 index 000000000..e18a50ccc --- /dev/null +++ b/cicd/k8s-calico-incluster/Vagrantfile @@ -0,0 +1,81 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] + +bname = ("sysnet4admin/Ubuntu-k8s") +bversion = "0.7.1" + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.box = "#{bname}" + config.vm.box_version = "#{bversion}" + + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + #loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp" + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "node_scripts/host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + #config.vm.box = settings["software"]["cluster"]["box"] + config.vm.define "master" do |master| + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + end +end diff --git a/cicd/k8s-calico-incluster/config.sh b/cicd/k8s-calico-incluster/config.sh new file mode 100755 index 000000000..68ebe3d3a --- /dev/null +++ b/cicd/k8s-calico-incluster/config.sh @@ -0,0 +1,36 @@ +#!/bin/bash +VMs=$(vagrant global-status | grep -i virtualbox) +while IFS= read -a VMs; do + read -a vm <<< "$VMs" + cd ${vm[4]} 2>&1>/dev/null + echo "Destroying ${vm[1]}" + vagrant destroy -f ${vm[1]} + cd - 2>&1>/dev/null +done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/sctp_fullnat.yml' 2> /dev/null diff --git a/cicd/k8s-calico-incluster/configs/config b/cicd/k8s-calico-incluster/configs/config new file mode 100644 index 000000000..fc66b8aed --- /dev/null +++ b/cicd/k8s-calico-incluster/configs/config @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1EY3pNREV6TURVek1sb1hEVE16TURjeU56RXpNRFV6TWxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVNyClltV0M3Q0lSYlZFZy92M0FZUk5rRTZnUk5CQ2k3MThCMitHbUllVkQ4c2d5aXoxdWprdDZnbDcwQXhIRDkwSlUKcnVzSnFTc2ZvdDZ3YWJodU5MR3pMdy9ZK0xwZlRNMG5pRmorM3NlVlZiTExQOWxlRUx0Y2R5MnNIWDRQSU5KNApHcmNWM0lETjYrNGZOUWZkT1pjcGtIMjVkMmFKa01sM1YrdTFUbExTK0VSckRhQnNpOTJESXFkb0wxdlhwbm8xCjh6TnpYV2J3M1EyQ1dldWlOaW11eHNIWDM0MlpzRnNJY2FwYWhqa0MxVFZCbkNVOVowSXJSR2pVaW4rbkwvRVcKQUp2SHhCVEVMWkFmd1VkcG10ODBIcGFGVDNZMlcxYW1VWmR2b2w1V0RUaE83T3R4eGpUeTVrSXAwVlhIL1Q2WApRalRLb0RIUERsUWVNc01aQ1BjQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFWFMrWUs1ampsOWNSc3hPQW9qNktMWTRkVGpNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2h1MFhnZ1d1MkdqZStId3BTQwpSaE5kbUVwWnVPSjcyVlhrUGlsTUxJeHZ6Rys1NG1PbWZPOFNjRTJqaitTVG90VXk2N1hvMklMU1loNHpydFRXCitPcnFYMmJqaC9rZTRhOU5MSTJORjlmc1Z6ZS9YVElRQ0Uwbnlkb21lOC91b0s2b29DRGc1bm5FM1Y0NXFiRE0KdVJ4VGU5dUkvV1J1ZHVuSW95MXNPTHh2My9yZE1DeVZvRkljdm9ZazlES2NBU1F5Z09CRE1uaEc4RHBrZE44Ngo5eW01SDdYMVBvNkZVVCt0TCtKOHlmRFRhc0VXRDhRODRuVmRVckE3TGdtNnZYbmFGeTNCQ3dJUXZGRjNhbTlPCnZ3ZzJ5bzdPZ1RmdU9YUWpjVHZNWmpmNUp4OGlKQXc4WkN1aGkxVlpjQitpWnNDb2I1cUxHdENnbWxNMlNpTmMKaTVnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.80.10:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJWVF5Tkszb3lBa2N3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBM016QXhNekExTXpKYUZ3MHlOREEzTWpreE16QTFNelZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXJ2QXR6OHhxd1RBYUxjWk4KRmI4R1A3VlVranlESFRRRy95R3Q5WXVleUNIeUE5RG9pRFF6dkRnSStwSlFqMmx3QXhUVjR5N1k2U1VZM1BiTgpKd01Kd2F3VG1HZUowVmpuWThpbFF4RHAxdk5sM0k0bGc0VVFDanZUb0k0Y2doejM3Wk1yMVB3MmRVeHBwUGkxCjVHSjA0bTVVbUJPZWJrc1dOOWRpWk5FYmYxUWRHaENwZHFyRHAwMWRqNER2MFZFbEhsdDBzT0FmYkdvS2EreDEKTHlwckVvamJWQkE2NGVRRVFRWGJCcXlGZHpweTdPbWJCSG1vVnhVRXBFQ0dFb2JzRVV5eFFoRysxRmxnd01ZYQpzTkRtQnRDcW42SzVoMUMzV20wYzRmdElEM2pwYmUybzhLbVQrekdLYWJCYmJUY1kxZWJrRjBHWHcwcXY2WWNjCmIybEVtd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSRjB2bUN1WTQ1ZlhFYk1UZ0tJK2lpMk9IVQo0ekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBYVk3WllSVTJtT3JvcExGVGRuUjc5Q2ovQlI1UW4rTHU2cVErCkQ5ZVJ2WmxGQjB1TFl6OFNicWpwR1lCTmVvWU55bXlsd1NKZnl0V3p5WVpDQXRhQmNZZUNqbld2elhVQi82YVgKeTduaGJFVWRUUkVRRXZEc2M4eUxOVkx1OThEcjd1OUVIUWxZWm9NM2pIZFF6dFlQNW00M1JHVWxJTW1jN05NZgpsSk1tK1RvTmZkUHo0VlpqNmJjQ3VYbmtGdnZPc0VsUXNMViswZHVHQkpDM2JFZGNmR01najh6Qm1ML3QvWXIzCitMYWNpeFpQeVVCRjdKVzBNOUp0dFpzQ2hXbWZraHBHYm5qRElncXNnK1lzRldvempBMkMxcS9hUyttdUd2YjkKZ2JkVTZvOXA5alZmR0tEbFVDa2JYbDVId01YS09PZ0RQV3pVWFp0UEdTUVJpcjE0Ync9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcnZBdHo4eHF3VEFhTGNaTkZiOEdQN1ZVa2p5REhUUUcveUd0OVl1ZXlDSHlBOURvCmlEUXp2RGdJK3BKUWoybHdBeFRWNHk3WTZTVVkzUGJOSndNSndhd1RtR2VKMFZqblk4aWxReERwMXZObDNJNGwKZzRVUUNqdlRvSTRjZ2h6MzdaTXIxUHcyZFV4cHBQaTE1R0owNG01VW1CT2Via3NXTjlkaVpORWJmMVFkR2hDcApkcXJEcDAxZGo0RHYwVkVsSGx0MHNPQWZiR29LYSt4MUx5cHJFb2piVkJBNjRlUUVRUVhiQnF5RmR6cHk3T21iCkJIbW9WeFVFcEVDR0VvYnNFVXl4UWhHKzFGbGd3TVlhc05EbUJ0Q3FuNks1aDFDM1dtMGM0ZnRJRDNqcGJlMm8KOEttVCt6R0thYkJiYlRjWTFlYmtGMEdYdzBxdjZZY2NiMmxFbXdJREFRQUJBb0lCQUN1M3hoc2FJTXVxczhBZwp3SDdnd0RVSG9kengxbXBqNkNPMlRQMENLV29tWVk3bWxGWUZoYkJSNkp5R0dDL2V6NmxWZWFaT3ZOSjIvT0dyCm85Vk9BeEF0YXJBNW44MTdoRWdCaXB0YURMWTFHWTJtMEdVdnliUmxBeHdxcDZFMGtCa0ZJSDBYa3B4NXZpVUcKS3A2cXBEODZCMVlDQVNQYkMvQmttU2hNd2F4dDlNMkYzeVZNRExnN2RpYXlZZUx1MHhtNXd4VXVwUmVkU1hYdgpPcHppWE5tdGZGR01QUkRVWXdNUGoycUNzNlZUdHErQlhoOUVWQVU3OGlkOU50bU5KQ2M5Zk1MLzUzekg3OVlhCnJjb2VXZFRMNlNRYVB6YUlSWEx6Mm90VG5nVHJ2RnlNY2lrTWdVVVZ5M3ZndlpySUFRd3J4elQ5TEJXYWhVRkwKMFVRd0gzRUNnWUVBNUNXTC9jRGxaTGxGYUp5ZTFMNnhZK1ZOT2lnQStBVHVQWDdtczdjV2t0Slk4YjErQ3IzcwpUYTRmTmlpYUM1Zk9RT0RkcmhNdS9GUzBCcHVjRk44OVVCL2xaZSsrbStJY0tpVDRZM0lHTmYyKzBDT3Z0ZGFmCkkrZ2lIaW5JTnV2T3Fkek83TW5WUEc0Q2NubUJHU3NybnovTnI1TFJnREF1SWh6NEhhUGxTdFVDZ1lFQXhFdXEKSkl4c3RvMGVKWDdORnNUMW9TUFl5a3FTWnh4clB4TFdpdHFCSzQvV0NTMW1HWTJUemRkS3hGaTlnVWdBaitmNApWSmg0aXlrTXdKVWtJOUQ0YllPR2JqdW1XTXlMKzRZWm5zbFBIS2FwcVBkQkJiM0UzVlJTK1hyOHJxaEhxVEhpCms2ME9RN1Qya0Z6SWlySy9teWlMb2J1YnYxKzlVVytoL2xOekthOENnWUJhalh5Tzd5MGRXVnZ2TlpybEhmc1MKaDBTcnZJMEY1QThiWVc3NERjZHI1d2xlaWJPcFY5Q2UxR21XK1c2TEEybmQzbUtlWVFiWktGVjcrZTl0YVYzUQptNWhWYVY3aVNGQ2RlYWNNOFlqOWpRVmJYNDZ5UWNsUVd5YVBpazNwWHBiY1hNUFV3QmRlc050UHpHSXROekZOCk4rblBzaHB0SXJKczM4cXJHUTQ5TVFLQmdRQ0hYVTVsaWRqbVFvYUpnTm5aVzlXdlc5TUNIVTY4Z0dLTXltYmMKdGpYaFhuMVJNdGQzdzZRcmpNM29mUEdpRjQ4YnJmSVlGRlQ4VWtDVEJjWTRWTUVjZEZqZDU1Q2RKK0ZZZ0c5bQppcGhkdjZpNzlsWUdxWWo2d0UyLzhVb1MvOFQ3TG9WN0pSbnpJdlh0TTY2dnh2aE8vVFRkUVV6ME9nZUtBeHVKCkVPOFh6UUtCZ0FnOUpTTFBrRDV6NzdtcWN3Tk9pa1VBa1RUek52ZWxsRW9yK2RVWHRyVUlKdmtKT2xBbmZJekMKMlpRM3p4YzRpTVd1a3hHc2Z2VzFIZngxOUdBNzBVWXZkNFVab09mYjVRYWtBaGh2WUh6VEdlNnZ1VXBoZS9KTQo5QXdwQ3YzcEg5TW1VWk5wbzlhcWhQTGNnUzd5Uy9Xc1pVbFlpUzNrRUtYOUhOcUtiMHVsCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-incluster/configs/join.sh b/cicd/k8s-calico-incluster/configs/join.sh new file mode 100755 index 000000000..17a251f0e --- /dev/null +++ b/cicd/k8s-calico-incluster/configs/join.sh @@ -0,0 +1 @@ +kubeadm join 192.168.80.10:6443 --token wxki6c.cifh2d82k592rpwf --discovery-token-ca-cert-hash sha256:f581308b2a8fb3647d7e1297d2dac741529bb84c711d3ae9193ab4574fcb3aae diff --git a/cicd/k8s-calico-incluster/node_scripts/common.sh b/cicd/k8s-calico-incluster/node_scripts/common.sh new file mode 100644 index 000000000..cf1e66f1a --- /dev/null +++ b/cicd/k8s-calico-incluster/node_scripts/common.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" + +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl +curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg + +echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update -y +sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION" +sudo apt-get update -y +sudo apt-get install -y jq + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-calico-incluster/node_scripts/host.sh b/cicd/k8s-calico-incluster/node_scripts/host.sh new file mode 100755 index 000000000..adfc5c77d --- /dev/null +++ b/cicd/k8s-calico-incluster/node_scripts/host.sh @@ -0,0 +1,5 @@ +sudo su +echo "123.123.123.1 k8s-svc" >> /etc/hosts +ifconfig eth2 mtu 1450 +ip route add 123.123.123.0/24 via 192.168.90.10 +echo "Host is up" diff --git a/cicd/k8s-calico-incluster/node_scripts/loxilb.sh b/cicd/k8s-calico-incluster/node_scripts/loxilb.sh new file mode 100644 index 000000000..74e66ae9b --- /dev/null +++ b/cicd/k8s-calico-incluster/node_scripts/loxilb.sh @@ -0,0 +1,13 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest +echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc +echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc + +echo $LOXILB_IP > /vagrant/loxilb-ip diff --git a/cicd/k8s-calico-incluster/node_scripts/master.sh b/cicd/k8s-calico-incluster/node_scripts/master.sh new file mode 100644 index 000000000..1e7521d73 --- /dev/null +++ b/cicd/k8s-calico-incluster/node_scripts/master.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +# Install Calico Network Plugin + +curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O + +kubectl apply -f calico.yaml + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Metrics Server + +kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml + +# Install loxilb +kubectl apply -f /vagrant/yaml/loxilb.yml +kubectl apply -f /vagrant/yaml/loxilb-peer.yml +kubectl apply -f /vagrant/yaml/kube-loxilb.yml diff --git a/cicd/k8s-calico-incluster/node_scripts/worker.sh b/cicd/k8s-calico-incluster/node_scripts/worker.sh new file mode 100644 index 000000000..a5754170b --- /dev/null +++ b/cicd/k8s-calico-incluster/node_scripts/worker.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +EOF diff --git a/cicd/k8s-calico-incluster/rmconfig.sh b/cicd/k8s-calico-incluster/rmconfig.sh new file mode 100755 index 000000000..1eb0df750 --- /dev/null +++ b/cicd/k8s-calico-incluster/rmconfig.sh @@ -0,0 +1,5 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f host diff --git a/cicd/k8s-calico-incluster/validation.sh b/cicd/k8s-calico-incluster/validation.sh new file mode 100755 index 000000000..05debe23d --- /dev/null +++ b/cicd/k8s-calico-incluster/validation.sh @@ -0,0 +1,102 @@ +#!/bin/bash +source ../common.sh +echo k8s-calico + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +for((i=0; i<120; i++)) +do + extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-default") + read -a strarr <<< "$extLB" + len=${#strarr[*]} + if [[ $((len)) -lt 6 ]]; then + echo "Can't find tcp-lb service" + sleep 1 + continue + fi + if [[ ${strarr[3]} != *"none"* ]]; then + extIP=${strarr[3]} + break + fi + echo "No external LB allocated" + sleep 1 +done + +## Any routing updates ?? +sleep 30 + +echo Service IP : $extIP +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nLB List" +echo "******************************************************************************" +vagrant ssh loxilb -c 'sudo docker exec -it loxilb loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nEP List" +echo "******************************************************************************" +vagrant ssh loxilb -c 'sudo docker exec -it loxilb loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" + +echo -e "\nTEST RESULTS" +echo "******************************************************************************" +mode=( "default" "onearm" "fullnat" ) +tcp_port=( 55002 56002 57002 ) +udp_port=( 55003 56003 57003 ) +sctp_port=( 55004 56004 57004 ) +code=0 +for ((i=0;i<=2;i++)); do +out=$(curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]}) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K8s-calico TCP\t(${mode[i]})\t[OK]" +else + echo -e "K8s-calico TCP\t(${mode[i]})\t[FAILED]" + ## Dump some debug info + echo "llb1 lb-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null + echo "llb1 route-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null + code=1 +fi + +out=$(timeout 5 ../common/udp_client $extIP ${udp_port[i]}) +if [[ ${out} == *"Client"* ]]; then + echo -e "K8s-calico UDP\t(${mode[i]})\t[OK]" +else + echo -e "K8s-calico UDP\t(${mode[i]})\t[FAILED]" + ## Dump some debug info + echo "llb1 lb-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null + echo "llb1 route-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null + code=1 +fi + +out=$(timeout 5 ../common/sctp_client 192.168.90.1 34951 $extIP ${sctp_port[i]}) +if [[ ${out} == *"server1"* ]]; then + echo -e "K8s-calico SCTP\t(${mode[i]})\t[OK]" +else + echo -e "K8s-calico SCTP\t(${mode[i]})\t[FAILED]" + ## Dump some debug info + echo "llb1 lb-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null + echo "llb1 route-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null + code=1 +fi +done +exit $code diff --git a/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml new file mode 100644 index 000000000..96267551e --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/kube-loxilb.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --externalCIDR=123.123.123.1/24 + - --setBGP=64512 + - --listenBGPPort=1791 + - --setRoles + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml b/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml new file mode 100644 index 000000000..5b35cd2e5 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/loxilb-peer.yml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-peer + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-peer-app + template: + metadata: + name: loxilb-peer + labels: + app: loxilb-peer-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: DoesNotExist + - key: "node-role.kubernetes.io/control-plane" + operator: DoesNotExist + containers: + - name: loxilb-peer-app + image: "ghcr.io/loxilb-io/loxilb:latest" + command: [ "/root/loxilb-io/loxilb/loxilb", "--peer" ] + ports: + - containerPort: 11111 + - containerPort: 1791 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-peer-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-peer-app + ports: + - name: loxilb-peer-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-peer-bgp + port: 1791 + targetPort: 1791 + protocol: TCP + - name: loxilb-peer-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP + + diff --git a/cicd/k8s-calico-incluster/yaml/loxilb.yml b/cicd/k8s-calico-incluster/yaml/loxilb.yml new file mode 100644 index 000000000..2c325c433 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/loxilb.yml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + command: [ "/root/loxilb-io/loxilb/loxilb", "--bgp", "--egr-hooks", "--blacklist=cali.|tunl.|vxlan[.]calico|veth." ] + ports: + - containerPort: 11111 + - containerPort: 1791 + - containerPort: 50051 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP + - name: loxilb-app-bgp + port: 1791 + targetPort: 1791 + protocol: TCP + - name: loxilb-app-gobgp + port: 50051 + targetPort: 50051 + protocol: TCP diff --git a/cicd/k8s-calico-incluster/yaml/sctp.yml b/cicd/k8s-calico-incluster/yaml/sctp.yml new file mode 100644 index 000000000..c9a7d4afd --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/sctp.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-default-test + ports: + - port: 55004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-default-test + labels: + what: sctp-default-test +spec: + containers: + - name: sctp-default-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..b6eae03d5 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/sctp_fullnat.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-fullnat-test + ports: + - port: 57004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-incluster/yaml/sctp_onearm.yml b/cicd/k8s-calico-incluster/yaml/sctp_onearm.yml new file mode 100644 index 000000000..b4b736962 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/sctp_onearm.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-onearm-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-onearm-test + labels: + what: sctp-onearm-test +spec: + containers: + - name: sctp-onearm-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-incluster/yaml/settings.yaml b/cicd/k8s-calico-incluster/yaml/settings.yaml new file mode 100644 index 000000000..ffa7fe692 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/settings.yaml @@ -0,0 +1,44 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.9 + oloxilb_ip: 192.168.90.9 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.10 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 172.16.1.0/16 + service_cidr: 172.17.1.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.27.1-00 + os: xUbuntu_22.04 diff --git a/cicd/k8s-calico-incluster/yaml/tcp.yml b/cicd/k8s-calico-incluster/yaml/tcp.yml new file mode 100644 index 000000000..8c8983403 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/tcp.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-default-test + ports: + - port: 55002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-default-test + labels: + what: tcp-default-test +spec: + containers: + - name: tcp-default-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-incluster/yaml/tcp_fullnat.yml b/cicd/k8s-calico-incluster/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..3303ac35e --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/tcp_fullnat.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 57002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-incluster/yaml/tcp_onearm.yml b/cicd/k8s-calico-incluster/yaml/tcp_onearm.yml new file mode 100644 index 000000000..b3d345483 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/tcp_onearm.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 56002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-onearm-test + labels: + what: tcp-onearm-test +spec: + containers: + - name: tcp-onearm-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-incluster/yaml/udp.yml b/cicd/k8s-calico-incluster/yaml/udp.yml new file mode 100644 index 000000000..ac6ef997d --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/udp.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-default-test + ports: + - port: 55003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-default-test + labels: + what: udp-default-test +spec: + containers: + - name: udp-default-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-calico-incluster/yaml/udp_fullnat.yml b/cicd/k8s-calico-incluster/yaml/udp_fullnat.yml new file mode 100644 index 000000000..67b729019 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-fullnat-test + ports: + - port: 57003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-fullnat-test + labels: + what: udp-fullnat-test +spec: + containers: + - name: udp-fullnat-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-calico-incluster/yaml/udp_onearm.yml b/cicd/k8s-calico-incluster/yaml/udp_onearm.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-calico-incluster/yaml/udp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/common/common.go b/common/common.go index d6e028a00..2d5b812ad 100644 --- a/common/common.go +++ b/common/common.go @@ -596,8 +596,9 @@ type GoBGPGlobalConfig struct { // GoBGPNeighMod - Info related to goBGP neigh type GoBGPNeighMod struct { - Addr net.IP `json:"neighIP"` - RemoteAS uint32 `json:"remoteAS"` + Addr net.IP `json:"neighIP"` + RemoteAS uint32 `json:"remoteAS"` + RemotePort uint16 `json:"remotePort"` } // Equal - check if two session tunnel entries are equal diff --git a/loxinet/apiclient.go b/loxinet/apiclient.go index 2c3b8c277..921fedae1 100644 --- a/loxinet/apiclient.go +++ b/loxinet/apiclient.go @@ -590,7 +590,7 @@ func (na *NetAPIStruct) NetParamGet(param *cmn.ParamMod) (int, error) { // NetGoBGPNeighAdd - Add bgp neigh to gobgp func (na *NetAPIStruct) NetGoBGPNeighAdd(param *cmn.GoBGPNeighMod) (int, error) { if mh.bgp != nil { - return mh.bgp.BGPNeighMod(true, param.Addr, param.RemoteAS) + return mh.bgp.BGPNeighMod(true, param.Addr, param.RemoteAS, uint32(param.RemotePort)) } tk.LogIt(tk.LogDebug, "loxilb BGP mode is disabled \n") return 0, errors.New("loxilb BGP mode is disabled") @@ -600,7 +600,7 @@ func (na *NetAPIStruct) NetGoBGPNeighAdd(param *cmn.GoBGPNeighMod) (int, error) // NetGoBGPNeighDel - Del bgp neigh from gobgp func (na *NetAPIStruct) NetGoBGPNeighDel(param *cmn.GoBGPNeighMod) (int, error) { if mh.bgp != nil { - return mh.bgp.BGPNeighMod(false, param.Addr, param.RemoteAS) + return mh.bgp.BGPNeighMod(false, param.Addr, param.RemoteAS, uint32(param.RemotePort)) } tk.LogIt(tk.LogDebug, "loxilb BGP mode is disabled \n") return 0, errors.New("loxilb BGP mode is disabled") diff --git a/loxinet/gobgpclient.go b/loxinet/gobgpclient.go index 71863533f..4003184e2 100644 --- a/loxinet/gobgpclient.go +++ b/loxinet/gobgpclient.go @@ -739,7 +739,7 @@ func (gbh *GoBgpH) UpdateCIState(instance string, state int, vip net.IP) { } // BGPNeighMod - Routine to add BGP neigh to goBGP server -func (gbh *GoBgpH) BGPNeighMod(add bool, neigh net.IP, ras uint32) (int, error) { +func (gbh *GoBgpH) BGPNeighMod(add bool, neigh net.IP, ras uint32, rPort uint32) (int, error) { var peer *api.Peer var err error @@ -748,10 +748,17 @@ func (gbh *GoBgpH) BGPNeighMod(add bool, neigh net.IP, ras uint32) (int, error) State: &api.PeerState{}, RouteServer: &api.RouteServer{}, RouteReflector: &api.RouteReflector{}, + Transport: &api.Transport{}, } peer.Conf.NeighborAddress = neigh.String() peer.State.NeighborAddress = neigh.String() peer.Conf.PeerAsn = ras + if rPort != 0 { + peer.Transport.RemotePort = rPort + } else { + peer.Transport.RemotePort = 179 + } + if add { _, err = gbh.client.AddPeer(context.Background(), &api.AddPeerRequest{