Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-1.31] Rework loadbalancer server selection logic #11457

Merged
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Add hidden flag/var for supervisor/apiserver listen config
Add flags supervisor and apiserver ports and bind address so that we can add an e2e to cover supervisor and apiserver on separate ports, as used by rke2

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
(cherry picked from commit e143e0f)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
  • Loading branch information
brandond committed Dec 10, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit d1aa856519370527eb7483ac5d37a398a11cdf2a
23 changes: 22 additions & 1 deletion pkg/cli/cmds/server.go
Original file line number Diff line number Diff line change
@@ -188,14 +188,35 @@ var ServerFlags = []cli.Flag{
Value: 6443,
Destination: &ServerConfig.HTTPSPort,
},
&cli.IntFlag{
Name: "supervisor-port",
EnvVar: version.ProgramUpper + "_SUPERVISOR_PORT",
Usage: "(experimental) Supervisor listen port override",
Hidden: true,
Destination: &ServerConfig.SupervisorPort,
},
&cli.IntFlag{
Name: "apiserver-port",
EnvVar: version.ProgramUpper + "_APISERVER_PORT",
Usage: "(experimental) apiserver internal listen port override",
Hidden: true,
Destination: &ServerConfig.APIServerPort,
},
&cli.StringFlag{
Name: "apiserver-bind-address",
EnvVar: version.ProgramUpper + "_APISERVER_BIND_ADDRESS",
Usage: "(experimental) apiserver internal bind address override",
Hidden: true,
Destination: &ServerConfig.APIServerBindAddress,
},
&cli.StringFlag{
Name: "advertise-address",
Usage: "(listener) IPv4/IPv6 address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)",
Destination: &ServerConfig.AdvertiseIP,
},
&cli.IntFlag{
Name: "advertise-port",
Usage: "(listener) Port that apiserver uses to advertise to members of the cluster (default: listen-port)",
Usage: "(listener) Port that apiserver uses to advertise to members of the cluster (default: https-listen-port)",
Destination: &ServerConfig.AdvertisePort,
},
&cli.StringSliceFlag{
84 changes: 84 additions & 0 deletions tests/e2e/startup/startup_test.go
Original file line number Diff line number Diff line change
@@ -71,6 +71,12 @@ func KillK3sCluster(nodes []string) error {
if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil {
return err
}
if _, err := e2e.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1", node); err != nil {
return err
}
if _, err := e2e.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d", node); err != nil {
return err
}
if strings.Contains(node, "server") {
if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil {
return err
@@ -93,6 +99,83 @@ var _ = BeforeSuite(func() {
})

var _ = Describe("Various Startup Configurations", Ordered, func() {
Context("Verify dedicated supervisor port", func() {
It("Starts K3s with no issues", func() {
for _, node := range agentNodeNames {
cmd := "mkdir -p /etc/rancher/k3s/config.yaml.d; grep -F server: /etc/rancher/k3s/config.yaml | sed s/6443/9345/ > /tmp/99-server.yaml; sudo mv /tmp/99-server.yaml /etc/rancher/k3s/config.yaml.d/"
res, err := e2e.RunCmdOnNode(cmd, node)
By("checking command results: " + res)
Expect(err).NotTo(HaveOccurred())
}
supervisorPortYAML := "supervisor-port: 9345\napiserver-port: 6443\napiserver-bind-address: 0.0.0.0\ndisable: traefik\nnode-taint: node-role.kubernetes.io/control-plane:NoExecute"
err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), supervisorPortYAML, "")
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))

fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "360s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)

fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "360s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})

It("Returns pod metrics", func() {
cmd := "kubectl top pod -A"
Eventually(func() error {
_, err := e2e.RunCommand(cmd)
return err
}, "600s", "5s").Should(Succeed())
})

It("Returns node metrics", func() {
cmd := "kubectl top node"
_, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
})

It("Runs an interactive command a pod", func() {
cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})

It("Collects logs from a pod", func() {
cmd := "kubectl logs -n kube-system -l k8s-app=metrics-server -c metrics-server"
_, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
})

It("Kills the cluster", func() {
err := KillK3sCluster(append(serverNodeNames, agentNodeNames...))
Expect(err).NotTo(HaveOccurred())
})
})
Context("Verify CRI-Dockerd :", func() {
It("Starts K3s with no issues", func() {
dockerYAML := "docker: true"
@@ -311,6 +394,7 @@ var _ = AfterEach(func() {

var _ = AfterSuite(func() {
if failed {
AddReportEntry("config", e2e.GetConfig(append(serverNodeNames, agentNodeNames...)))
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...)))
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
13 changes: 13 additions & 0 deletions tests/e2e/testutils.go
Original file line number Diff line number Diff line change
@@ -377,6 +377,19 @@ func TailJournalLogs(lines int, nodes []string) string {
return logs.String()
}

func GetConfig(nodes []string) string {
config := &strings.Builder{}
for _, node := range nodes {
cmd := "tar -Pc /etc/rancher/k3s/ | tar -vxPO"
if c, err := RunCmdOnNode(cmd, node); err != nil {
fmt.Fprintf(config, "** failed to get config for node %s ***\n%v\n", node, err)
} else {
fmt.Fprintf(config, "** config for node %s ***\n%s\n", node, c)
}
}
return config.String()
}

// GetVagrantLog returns the logs of on vagrant commands that initialize the nodes and provision K3s on each node.
// It also attempts to fetch the systemctl logs of K3s on nodes where the k3s.service failed.
func GetVagrantLog(cErr error) string {
Loading