Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test kubernetes e2e lb tests #9

Merged
merged 10 commits into from
May 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,10 @@ name: e2e
on:
push:
pull_request:
branches: [ master ]
workflow_dispatch:

env:
GO_VERSION: "1.20.1"
K8S_VERSION: "v1.26.0"
KIND_VERSION: "v0.17.0"
KIND_CLUSTER_NAME: "kind-cloud"
GO_VERSION: "1.20.4"

jobs:
e2e:
Expand Down
190 changes: 190 additions & 0 deletions .github/workflows/k8s.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
name: k8s

on:
push:
pull_request:
workflow_dispatch:

env:
GO_VERSION: "1.20.4"
K8S_VERSION: "v1.27.1"
KIND_VERSION: "v0.18.0"
KIND_CLUSTER_NAME: "kind-cloud"

jobs:
k8s:
name: k8s
runs-on: ubuntu-latest
timeout-minutes: 100
strategy:
fail-fast: false
matrix:
# TODO add "dual", waiting on KEP https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/3705-cloud-node-ips
ipFamily: ["ipv4", "ipv6"]
env:
JOB_NAME: "cloud-provider-kind-e2e-${{ matrix.ipFamily }}"
IP_FAMILY: ${{ matrix.ipFamily }}
steps:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
id: go

- name: Check out code
uses: actions/checkout@v2

- name: Enable ipv4 and ipv6 forwarding
run: |
sudo sysctl -w net.ipv6.conf.all.forwarding=1
sudo sysctl -w net.ipv4.ip_forward=1

- name: Set up environment (download dependencies)
run: |
TMP_DIR=$(mktemp -d)
# Test binaries
curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/kubernetes-test-linux-amd64.tar.gz -o ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz
tar xvzf ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz \
--directory ${TMP_DIR} \
--strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test
# kubectl
curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
# kind
curl -Lo ${TMP_DIR}/kind https://kind.sigs.k8s.io/dl/${{ env.KIND_VERSION }}/kind-linux-amd64
# Install
sudo cp ${TMP_DIR}/ginkgo /usr/local/bin/ginkgo
sudo cp ${TMP_DIR}/e2e.test /usr/local/bin/e2e.test
sudo cp ${TMP_DIR}/kubectl /usr/local/bin/kubectl
sudo cp ${TMP_DIR}/kind /usr/local/bin/kind
sudo chmod +x /usr/local/bin/*
# Create folder to store artifacts
mkdir -p _artifacts

- name: Run cloud-provider-kind
run: |
make
nohup bin/cloud-provider-kind > ./_artifacts/ccm-kind.log 2>&1 &

- name: Create multi node cluster
run: |
# create cluster
cat <<EOF | /usr/local/bin/kind create cluster \
--name ${{ env.KIND_CLUSTER_NAME}} \
--image kindest/node:${{ env.K8S_VERSION }} \
-v7 --wait 1m --retain --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
ipFamily: ${IP_FAMILY}
nodes:
- role: control-plane
- role: worker
- role: worker
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
extraArgs:
cloud-provider: "external"
v: "5"
controllerManager:
extraArgs:
cloud-provider: "external"
v: "5"
---
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "external"
v: "5"
---
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "external"
v: "5"
EOF
/usr/local/bin/kind get kubeconfig --name ${{ env.KIND_CLUSTER_NAME}} > _artifacts/kubeconfig.conf

- name: Workaround CoreDNS for IPv6 airgapped
if: ${{ matrix.ipFamily == 'ipv6' }}
run: |
# Patch CoreDNS to work in Github CI
# 1. Github CI doesn´t offer IPv6 connectivity, so CoreDNS should be configured
# to work in an offline environment:
# https://github.com/coredns/coredns/issues/2494#issuecomment-457215452
# 2. Github CI adds following domains to resolv.conf search field:
# .net.
# CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL
# otherwise pods stops trying to resolve the domain.
# Get the current config
original_coredns=$(/usr/local/bin/kubectl get -oyaml -n=kube-system configmap/coredns)
echo "Original CoreDNS config:"
echo "${original_coredns}"
# Patch it
fixed_coredns=$(
printf '%s' "${original_coredns}" | sed \
-e 's/^.*kubernetes cluster\.local/& net/' \
-e '/^.*upstream$/d' \
-e '/^.*fallthrough.*$/d' \
-e '/^.*forward . \/etc\/resolv.conf$/d' \
-e '/^.*loop$/d' \
)
echo "Patched CoreDNS config:"
echo "${fixed_coredns}"
printf '%s' "${fixed_coredns}" | /usr/local/bin/kubectl apply -f -

- name: Get Cluster status
run: |
/usr/local/bin/kubectl get nodes -o yaml
/usr/local/bin/kubectl get pods -A -o wide
# wait network is ready
/usr/local/bin/kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns --timeout=3m
/usr/local/bin/kubectl get nodes -o wide
/usr/local/bin/kubectl get pods -A

- name: Run tests
run: |
export KUBERNETES_CONFORMANCE_TEST='y'
export E2E_REPORT_DIR=${PWD}/_artifacts

# Run tests and use aws (creates a null e2e provider) to not skip the loadbalancer tests
# "should be able to create LoadBalancer Service without NodePort and change it" : the LB implementation uses haproxy and NodePorts can not forward directly to the Pods
# "should be able to change the type and ports of a TCP service" : the test expects a connection refused haproxy seems to return EOF
# "loadbalancer source ranges" : fails on IPv6 only
/usr/local/bin/ginkgo --nodes=25 \
--focus="sig-network" \
--skip="Feature|Federation|PerformanceDNS|DualStack|Disruptive|Serial|KubeProxy|GCE|Netpol|NetworkPolicy|256.search.list.characters|LoadBalancer.Service.without.NodePort|type.and.ports.of.a.TCP.service|loadbalancer.source.ranges" \
/usr/local/bin/e2e.test \
-- \
--kubeconfig=${PWD}/_artifacts/kubeconfig.conf \
--provider=aws \
--dump-logs-on-failure=false \
--report-dir=${E2E_REPORT_DIR} \
--disable-log-dump=true

- name: Upload Junit Reports
if: always()
uses: actions/upload-artifact@v2
with:
name: kind-junit-${{ env.JOB_NAME }}-${{ github.run_id }}
path: './_artifacts/*.xml'

- name: Export logs
if: always()
run: |
/usr/local/bin/kind export logs --name ${{ env.KIND_CLUSTER_NAME}} --loglevel=debug ./_artifacts/logs
cp ./_artifacts/ccm-kind.log ./_artifacts/logs

- name: Upload logs
if: always()
uses: actions/upload-artifact@v2
with:
name: kind-logs-${{ env.JOB_NAME }}-${{ github.run_id }}
path: ./_artifacts/logs

- name: Publish Test Report
uses: mikepenz/action-junit-report@v2
if: always()
with:
report_paths: './_artifacts/*.xml'
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/component-helpers v0.26.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -595,6 +595,8 @@ k8s.io/cloud-provider v0.26.1 h1:qEZmsGWGptOtVSpeMdTsapHX2BEqIk7rc5MA4caBqE0=
k8s.io/cloud-provider v0.26.1/go.mod h1:6PheIxRySYuRBBxtTUADya8S2rbr18xKi+fhGbLkduc=
k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4=
k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU=
k8s.io/component-helpers v0.26.1 h1:Y5h1OYUJTGyHZlSAsc7mcfNsWF08S/MlrQyF/vn93mU=
k8s.io/component-helpers v0.26.1/go.mod h1:jxNTnHb1axLe93MyVuvKj9T/+f4nxBVrj/xf01/UNFk=
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
Expand Down
34 changes: 28 additions & 6 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
cloudprovider "k8s.io/cloud-provider"
nodecontroller "k8s.io/cloud-provider/controllers/node"
servicecontroller "k8s.io/cloud-provider/controllers/service"
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
"k8s.io/klog/v2"
Expand All @@ -30,6 +31,7 @@ type Controller struct {
type ccm struct {
factory informers.SharedInformerFactory
serviceController *servicecontroller.Controller
nodeController *nodecontroller.CloudNodeController
cancelFn context.CancelFunc
}

Expand Down Expand Up @@ -97,12 +99,12 @@ func (c *Controller) Run(ctx context.Context) {

klog.V(2).Infof("Creating new cloud provider for cluster %s", cluster)
cloud := provider.New(cluster, c.kind)
ccm, err := startServiceController(ctx, cluster, kubeClient, cloud)
ccm, err := startCloudControllerManager(ctx, cluster, kubeClient, cloud)
if err != nil {
klog.Errorf("Failed to start service controller for cluster %s: %v", cluster, err)
klog.Errorf("Failed to start cloud controller for cluster %s: %v", cluster, err)
continue
}
klog.Infof("Starting service controller for cluster %s", cluster)
klog.Infof("Starting cloud controller for cluster %s", cluster)
c.clusters[cluster] = ccm
}
// remove expired ones
Expand All @@ -119,9 +121,9 @@ func (c *Controller) Run(ctx context.Context) {
}
}

// TODO: implement leader election to not have 2 providers creating load balancers
// TODO: implement leader election to not have problems with multiple providers
// ref: https://github.com/kubernetes/kubernetes/blob/d97ea0f705847f90740cac3bc3dd8f6a4026d0b5/cmd/kube-scheduler/app/server.go#L211
func startServiceController(ctx context.Context, clusterName string, kubeClient kubernetes.Interface, cloud cloudprovider.Interface) (*ccm, error) {
func startCloudControllerManager(ctx context.Context, clusterName string, kubeClient kubernetes.Interface, cloud cloudprovider.Interface) (*ccm, error) {
client := kubeClient.Discovery().RESTClient()
// wait for health
err := wait.PollImmediateWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
Expand All @@ -139,6 +141,8 @@ func startServiceController(ctx context.Context, clusterName string, kubeClient
}

sharedInformers := informers.NewSharedInformerFactory(kubeClient, 60*time.Second)

ccmMetrics := controllersmetrics.NewControllerManagerMetrics(clusterName)
// Start the service controller
serviceController, err := servicecontroller.New(
cloud,
Expand All @@ -155,10 +159,28 @@ func startServiceController(ctx context.Context, clusterName string, kubeClient
}

ctx, cancel := context.WithCancel(ctx)
go serviceController.Run(ctx, 5, ccmMetrics)

// Start the node controller
nodeController, err := nodecontroller.NewCloudNodeController(
sharedInformers.Core().V1().Nodes(),
kubeClient,
cloud,
30*time.Second,
)
if err != nil {
// This error shouldn't fail. It lives like this as a legacy.
klog.Errorf("Failed to start node controller: %v", err)
cancel()
return nil, err
}
go nodeController.Run(ctx.Done(), ccmMetrics)

sharedInformers.Start(ctx.Done())
go serviceController.Run(ctx, 5, controllersmetrics.NewControllerManagerMetrics(clusterName))

return &ccm{
factory: sharedInformers,
serviceController: serviceController,
nodeController: nodeController,
cancelFn: cancel}, nil
}
49 changes: 49 additions & 0 deletions vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions vendor/k8s.io/client-go/util/retry/OWNERS

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading