From c1ff02e60b5f2bd689a9f69e2e9faf762ef138ea Mon Sep 17 00:00:00 2001 From: clyi Date: Thu, 9 Jan 2025 14:12:48 +0800 Subject: [PATCH 1/8] add lb:option prefer_local_backend Signed-off-by: clyi --- dist/images/Dockerfile.base | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dist/images/Dockerfile.base b/dist/images/Dockerfile.base index 94d6a4cceb1..1e63ee41b0f 100644 --- a/dist/images/Dockerfile.base +++ b/dist/images/Dockerfile.base @@ -65,7 +65,9 @@ RUN cd /usr/src/ && git clone -b branch-24.03 --depth=1 https://github.com/ovn-o # support dedicated BFD LRP curl -s https://github.com/kubeovn/ovn/commit/40345aa35d03c93cde877ccfa8111346291ebc7c.patch | git apply && \ # skip node local dns ip conntrack when set acl - curl -s https://github.com/kubeovn/ovn/commit/e7d3ba53cdcbc524bb29c54ddb07b83cc4258ed7.patch | git apply + curl -s https://github.com/kubeovn/ovn/commit/e7d3ba53cdcbc524bb29c54ddb07b83cc4258ed7.patch | git apply && \ + # select local backend first + curl -s https://github.com/kubeovn/ovn/commit/faa762818447a4ac470ec28c69bbcabf80091d3a.patch | git apply RUN apt install -y build-essential fakeroot \ autoconf automake bzip2 debhelper-compat dh-exec dh-python dh-sequence-python3 dh-sequence-sphinxdoc \ From f0796f845bc8058dfa9d1a477141ae5aedaa7131 Mon Sep 17 00:00:00 2001 From: clyi Date: Fri, 24 Jan 2025 10:10:18 +0800 Subject: [PATCH 2/8] support metallb underlay Signed-off-by: clyi --- .github/workflows/build-x86-image.yaml | 138 ++++++ Makefile | 8 + .../kube-ovn/templates/controller-deploy.yaml | 1 + charts/kube-ovn/templates/kube-ovn-crd.yaml | 2 + charts/kube-ovn/templates/ovn-CR.yaml | 14 +- charts/kube-ovn/values.yaml | 1 + dist/images/install.sh | 11 + e2e.mk | 9 + go.mod | 2 + go.sum | 53 +++ mocks/pkg/ovs/interface.go | 28 ++ pkg/apis/kubeovn/v1/subnet.go | 11 +- pkg/controller/config.go | 3 + pkg/controller/endpoint.go | 48 +- pkg/controller/init.go | 6 + pkg/controller/service.go | 54 ++- pkg/daemon/controller.go | 65 ++- pkg/daemon/controller_linux.go | 235 +++++++++- pkg/daemon/controller_windows.go | 4 + pkg/ovs/interface.go | 1 + pkg/ovs/ovn-nb-load_balancer.go | 37 ++ pkg/ovs/ovs-ofctl.go | 144 ++++++ pkg/util/const.go | 9 + test/e2e/framework/framework.go | 15 +- test/e2e/framework/metallb.go | 103 +++++ test/e2e/metallb/e2e_test.go | 418 ++++++++++++++++++ 26 files changed, 1394 insertions(+), 26 deletions(-) create mode 100644 pkg/ovs/ovs-ofctl.go create mode 100644 test/e2e/framework/metallb.go create mode 100644 test/e2e/metallb/e2e_test.go diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index 199622d1928..9286901b6dc 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -3144,6 +3144,143 @@ jobs: name: kube-ovn-connectivity-e2e-${{ matrix.mode }}-ko-log path: kube-ovn-connectivity-e2e-${{ matrix.mode }}-ko-log.tar.gz + + kube-ovn-metallb-e2e: + name: OVN METALLB E2E + needs: + - build-kube-ovn + - build-e2e-binaries + runs-on: ubuntu-24.04 + timeout-minutes: 15 + steps: + - uses: jlumbroso/free-disk-space@v1.3.1 + with: + android: true + dotnet: true + haskell: true + docker-images: false + large-packages: false + tool-cache: false + swap-storage: false + + - uses: actions/checkout@v4 + + - name: Create the default branch directory + if: (github.base_ref || github.ref_name) != github.event.repository.default_branch + run: mkdir -p test/e2e/source + + - name: Check out the default branch + if: (github.base_ref || github.ref_name) != github.event.repository.default_branch + uses: actions/checkout@v4 + with: + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source + + - name: Export E2E directory + run: | + if [ '${{ github.base_ref || github.ref_name }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi + + - uses: actions/setup-go@v5 + id: setup-go + with: + go-version-file: ${{ env.E2E_DIR }}/go.mod + check-latest: true + cache: false + + - name: Export Go full version + run: echo "GO_VERSION=${{ steps.setup-go.outputs.go-version }}" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache/restore@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-go-${{ env.GO_VERSION }}-x86-${{ hashFiles(format('{0}/**/go.sum', env.E2E_DIR)) }} + restore-keys: ${{ runner.os }}-e2e-go-${{ env.GO_VERSION }}-x86- + + - name: Install kind + uses: helm/kind-action@v1.12.0 + with: + version: ${{ env.KIND_VERSION }} + install_only: true + + - name: Install ginkgo + working-directory: ${{ env.E2E_DIR }} + run: go install -v -mod=mod github.com/onsi/ginkgo/v2/ginkgo + + - name: Download kube-ovn image + uses: actions/download-artifact@v4 + with: + name: kube-ovn + + - name: Load images + run: docker load -i kube-ovn.tar + + - name: Create kind cluster + run: | + pipx install jinjanator + make kind-init + + - name: Install Kube-OVN + id: install + run: make kind-install-metallb-pool-from-underlay + + - name: Run Ovn Metallb and Kube-OVN Combine E2E + id: kube-ovn-metallb-e2e + working-directory: ${{ env.E2E_DIR }} + env: + E2E_BRANCH: ${{ github.base_ref || github.ref_name }} + run: make kube-ovn-metallb-e2e + + - name: Collect k8s events + if: failure() && ( steps.ovn-metallb-e2e.conclusion == 'failure') + run: | + kubectl get events -A -o yaml > kube-ovn-metallb-e2e-events.yaml + tar zcf kube-ovn-metallb-e2e-events.tar.gz kube-ovn-metallb-e2e-events.yaml + + - name: Upload k8s events + uses: actions/upload-artifact@v4 + if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + with: + name: kube-ovn-metallb-e2e-events + path: kube-ovn-metallb-e2e-events.tar.gz + + - name: Collect apiserver audit logs + if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + run: | + docker cp kube-ovn-control-plane:/var/log/kubernetes/kube-apiserver-audit.log . + tar zcf kube-ovn-metallb-e2e-audit-log.tar.gz kube-apiserver-audit.log + + - name: Upload apiserver audit logs + uses: actions/upload-artifact@v4 + if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + with: + name: kube-ovn-metallb-e2e-audit-log + path: kube-ovn-metallb-e2e-audit-log.tar.gz + + - name: kubectl ko log + if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + run: | + make kubectl-ko-log + mv kubectl-ko-log.tar.gz kube-ovn-metallb-e2e-ko-log.tar.gz + + - name: upload kubectl ko log + uses: actions/upload-artifact@v4 + if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + with: + name: kube-ovn-metallb-e2e-ko-log + path: kube-ovn-metallb-e2e-ko-log.tar.gz + + - name: Check kube ovn pod restarts + if: ${{ success() || (failure() && (steps.install.conclusion == 'failure' || steps.kube-ovn-metallb-e2e.conclusion == 'failure')) }} + run: make check-kube-ovn-pod-restarts + push: name: Push Images needs: @@ -3153,6 +3290,7 @@ jobs: - kube-ovn-conformance-e2e - kube-ovn-ic-conformance-e2e - kube-ovn-ipsec-e2e + - kube-ovn-metallb-e2e - multus-conformance-e2e - vpc-egress-gateway-e2e - ovn-vpc-nat-gw-conformance-e2e diff --git a/Makefile b/Makefile index 13252fb69d5..7b02d66bb85 100644 --- a/Makefile +++ b/Makefile @@ -803,6 +803,9 @@ kind-install-metallb: --set speaker.frr.image.tag=$(FRR_VERSION) $(call kubectl_wait_exist_and_ready,metallb-system,deployment,metallb-controller) $(call kubectl_wait_exist_and_ready,metallb-system,daemonset,metallb-speaker) + +.PHONY: kind-configure-metallb +kind-configure-metallb: @metallb_pool=$(shell echo $(KIND_IPV4_SUBNET) | sed 's/.[^.]\+$$/.201/')-$(shell echo $(KIND_IPV4_SUBNET) | sed 's/.[^.]\+$$/.250/') \ jinjanate yamls/metallb-cr.yaml.j2 -o metallb-cr.yaml kubectl apply -f metallb-cr.yaml @@ -979,6 +982,11 @@ kind-install-anp: kind-load-image kubectl apply -f "$(BANP_CR_YAML)" @$(MAKE) ENABLE_ANP=true kind-install +.PHONY: kind-install-metallb-pool-from-underlay +kind-install-metallb-pool-from-underlay: kind-load-image + @$(MAKE) ENABLE_OVN_LB_PREFER_LOCAL=true LS_CT_SKIP_DST_LPORT_IPS=false kind-install + @$(MAKE) kind-install-metallb + .PHONY: kind-reload kind-reload: kind-reload-ovs kubectl delete pod -n kube-system -l app=kube-ovn-controller diff --git a/charts/kube-ovn/templates/controller-deploy.yaml b/charts/kube-ovn/templates/controller-deploy.yaml index 095f67eeb8d..3675dffca3d 100644 --- a/charts/kube-ovn/templates/controller-deploy.yaml +++ b/charts/kube-ovn/templates/controller-deploy.yaml @@ -140,6 +140,7 @@ spec: - --ovsdb-con-timeout={{- .Values.func.OVSDB_CON_TIMEOUT }} - --ovsdb-inactivity-timeout={{- .Values.func.OVSDB_INACTIVITY_TIMEOUT }} - --enable-live-migration-optimize={{- .Values.func.ENABLE_LIVE_MIGRATION_OPTIMIZE }} + - --enable-ovn-lb-prefer-local={{- .Values.func.ENABLE_OVN_LB_PREFER_LOCAL }} - --image={{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }} securityContext: runAsUser: {{ include "kubeovn.runAsUser" . }} diff --git a/charts/kube-ovn/templates/kube-ovn-crd.yaml b/charts/kube-ovn/templates/kube-ovn-crd.yaml index cab46f09965..6c176e6bf5a 100644 --- a/charts/kube-ovn/templates/kube-ovn-crd.yaml +++ b/charts/kube-ovn/templates/kube-ovn-crd.yaml @@ -2564,6 +2564,8 @@ spec: type: boolean enableMulticastSnoop: type: boolean + enableExternalLBAddress: + type: boolean routeTable: type: string namespaceSelectors: diff --git a/charts/kube-ovn/templates/ovn-CR.yaml b/charts/kube-ovn/templates/ovn-CR.yaml index a93d3305aab..ddf6dfd887d 100644 --- a/charts/kube-ovn/templates/ovn-CR.yaml +++ b/charts/kube-ovn/templates/ovn-CR.yaml @@ -295,21 +295,13 @@ rules: - list - patch - watch - - apiGroups: - - "kubeovn.io" - resources: - - ips - verbs: - - get - - update - apiGroups: - "" resources: - - events + - services verbs: - - create - - patch - - update + - list + - watch - apiGroups: - "" resources: diff --git a/charts/kube-ovn/values.yaml b/charts/kube-ovn/values.yaml index b98bc90a850..2df60cc301c 100644 --- a/charts/kube-ovn/values.yaml +++ b/charts/kube-ovn/values.yaml @@ -78,6 +78,7 @@ func: OVSDB_CON_TIMEOUT: 3 OVSDB_INACTIVITY_TIMEOUT: 10 ENABLE_LIVE_MIGRATION_OPTIMIZE: true + ENABLE_OVN_LB_PREFER_LOCAL: false ipv4: POD_CIDR: "10.16.0.0/16" diff --git a/dist/images/install.sh b/dist/images/install.sh index 2db55c7810b..5c5b771d130 100755 --- a/dist/images/install.sh +++ b/dist/images/install.sh @@ -46,6 +46,7 @@ SET_VXLAN_TX_OFF=${SET_VXLAN_TX_OFF:-false} OVSDB_CON_TIMEOUT=${OVSDB_CON_TIMEOUT:-3} OVSDB_INACTIVITY_TIMEOUT=${OVSDB_INACTIVITY_TIMEOUT:-10} ENABLE_LIVE_MIGRATION_OPTIMIZE=${ENABLE_LIVE_MIGRATION_OPTIMIZE:-true} +ENABLE_OVN_LB_PREFER_LOCAL=${ENABLE_OVN_LB_PREFER_LOCAL:-false} PROBE_HTTP_SCHEME="HTTP" if [ "$SECURE_SERVING" = "true" ]; then @@ -2815,6 +2816,8 @@ spec: type: boolean enableMulticastSnoop: type: boolean + enableExternalLBAddress: + type: boolean routeTable: type: string namespaceSelectors: @@ -3692,6 +3695,13 @@ rules: - create - patch - update + - apiGroups: + - "" + resources: + - services + verbs: + - list + - watch - apiGroups: - "" resources: @@ -4734,6 +4744,7 @@ spec: - --ovsdb-con-timeout=$OVSDB_CON_TIMEOUT - --ovsdb-inactivity-timeout=$OVSDB_INACTIVITY_TIMEOUT - --enable-live-migration-optimize=$ENABLE_LIVE_MIGRATION_OPTIMIZE + - --enable-ovn-lb-prefer-local=$ENABLE_OVN_LB_PREFER_LOCAL - --image=$REGISTRY/kube-ovn:$VERSION securityContext: runAsUser: ${RUN_AS_USER} diff --git a/e2e.mk b/e2e.mk index b94734cebaa..55b6452637a 100644 --- a/e2e.mk +++ b/e2e.mk @@ -251,3 +251,12 @@ kube-ovn-connectivity-e2e: E2E_NETWORK_MODE=$(E2E_NETWORK_MODE) \ ginkgo $(GINKGO_OUTPUT_OPT) --procs 2 --randomize-all -v \ --focus=CNI:Kube-OVN ./test/e2e/connectivity -- $(TEST_BIN_ARGS) + +.PHONY: kube-ovn-metallb-e2e +kube-ovn-metallb-e2e: + ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/metallb + E2E_BRANCH=$(E2E_BRANCH) \ + E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ + E2E_NETWORK_MODE=$(E2E_NETWORK_MODE) \ + ginkgo $(GINKGO_OUTPUT_OPT) $(GINKGO_PARALLEL_OPT) --randomize-all -v \ + --focus=CNI:Kube-OVN ./test/e2e/metallb/metallb.test -- $(TEST_BIN_ARGS) diff --git a/go.mod b/go.mod index fcd6070d8a5..933f42a0bb9 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/containerd/containerd v1.7.22 github.com/containernetworking/cni v1.2.3 github.com/containernetworking/plugins v1.6.0 + github.com/digitalocean/go-openvswitch v0.0.0-20240130171624-c0f7d42efe24 github.com/docker/docker v27.5.1+incompatible github.com/emicklei/go-restful/v3 v3.12.1 github.com/evanphx/json-patch/v5 v5.9.11 @@ -42,6 +43,7 @@ require ( github.com/stretchr/testify v1.10.0 github.com/vishvananda/netlink v1.3.1-0.20240905180732-b1ce50cfa9be go.uber.org/mock v0.5.0 + go.universe.tf/metallb v0.14.9 golang.org/x/mod v0.23.0 golang.org/x/sys v0.30.0 golang.org/x/time v0.10.0 diff --git a/go.sum b/go.sum index b6b101542e1..69f9a42b6c0 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ= @@ -109,6 +110,8 @@ github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMS github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/go-openvswitch v0.0.0-20240130171624-c0f7d42efe24 h1:TmvrZSP2gKAIYHq02BofeGXG3rPIIkNQjie5gN1pQ9M= +github.com/digitalocean/go-openvswitch v0.0.0-20240130171624-c0f7d42efe24/go.mod h1:OAtI/pEmN/EvxlkixiYp2nMQQEtEqzHcpWeE2AW2Bb8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= @@ -151,6 +154,7 @@ github.com/fatih/set v0.2.1 h1:nn2CaJyknWE/6txyUDGwysr3G5QC6xWB/PtVjPBbeaA= github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -259,7 +263,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= @@ -330,9 +336,18 @@ github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -416,9 +431,22 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= @@ -732,6 +760,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.universe.tf/metallb v0.14.9 h1:FYLII4Rpnju8pLY79H0BIoJVQI4mmEzeO9BJ0X44HX8= +go.universe.tf/metallb v0.14.9/go.mod h1:qUh1zVwYAfp3JLxhZrDH20j55QvYwCkI37QU4gUG3ns= gocv.io/x/gocv v0.39.0 h1:vWHupDE22LebZW6id2mVeT767j1YS8WqGt+ZiV7XJXE= gocv.io/x/gocv v0.39.0/go.mod h1:zYdWMj29WAEznM3Y8NsU3A0TRq/wR/cy75jeUypThqU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -792,14 +822,22 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -853,26 +891,41 @@ golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/mocks/pkg/ovs/interface.go b/mocks/pkg/ovs/interface.go index 4b27e0d6653..e585db40fa5 100644 --- a/mocks/pkg/ovs/interface.go +++ b/mocks/pkg/ovs/interface.go @@ -1511,6 +1511,20 @@ func (mr *MockLoadBalancerMockRecorder) SetLoadBalancerAffinityTimeout(lbName, t return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLoadBalancerAffinityTimeout", reflect.TypeOf((*MockLoadBalancer)(nil).SetLoadBalancerAffinityTimeout), lbName, timeout) } +// SetLoadBalancerPreferLocalBackend mocks base method. +func (m *MockLoadBalancer) SetLoadBalancerPreferLocalBackend(lbName string, preferLocalBackend bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetLoadBalancerPreferLocalBackend", lbName, preferLocalBackend) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetLoadBalancerPreferLocalBackend indicates an expected call of SetLoadBalancerPreferLocalBackend. +func (mr *MockLoadBalancerMockRecorder) SetLoadBalancerPreferLocalBackend(lbName, preferLocalBackend any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLoadBalancerPreferLocalBackend", reflect.TypeOf((*MockLoadBalancer)(nil).SetLoadBalancerPreferLocalBackend), lbName, preferLocalBackend) +} + // MockLoadBalancerHealthCheck is a mock of LoadBalancerHealthCheck interface. type MockLoadBalancerHealthCheck struct { ctrl *gomock.Controller @@ -4704,6 +4718,20 @@ func (mr *MockNbClientMockRecorder) SetLoadBalancerAffinityTimeout(lbName, timeo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLoadBalancerAffinityTimeout", reflect.TypeOf((*MockNbClient)(nil).SetLoadBalancerAffinityTimeout), lbName, timeout) } +// SetLoadBalancerPreferLocalBackend mocks base method. +func (m *MockNbClient) SetLoadBalancerPreferLocalBackend(lbName string, preferLocalBackend bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetLoadBalancerPreferLocalBackend", lbName, preferLocalBackend) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetLoadBalancerPreferLocalBackend indicates an expected call of SetLoadBalancerPreferLocalBackend. +func (mr *MockNbClientMockRecorder) SetLoadBalancerPreferLocalBackend(lbName, preferLocalBackend any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLoadBalancerPreferLocalBackend", reflect.TypeOf((*MockNbClient)(nil).SetLoadBalancerPreferLocalBackend), lbName, preferLocalBackend) +} + // SetLogicalRouterPortHAChassisGroup mocks base method. func (m *MockNbClient) SetLogicalRouterPortHAChassisGroup(lrpName, haChassisGroupName string) error { m.ctrl.T.Helper() diff --git a/pkg/apis/kubeovn/v1/subnet.go b/pkg/apis/kubeovn/v1/subnet.go index 15ab98bdfaf..b6852f21cc2 100644 --- a/pkg/apis/kubeovn/v1/subnet.go +++ b/pkg/apis/kubeovn/v1/subnet.go @@ -80,11 +80,12 @@ type SubnetSpec struct { NatOutgoingPolicyRules []NatOutgoingPolicyRule `json:"natOutgoingPolicyRules,omitempty"` - U2OInterconnectionIP string `json:"u2oInterconnectionIP,omitempty"` - U2OInterconnection bool `json:"u2oInterconnection,omitempty"` - EnableLb *bool `json:"enableLb,omitempty"` - EnableEcmp bool `json:"enableEcmp,omitempty"` - EnableMulticastSnoop bool `json:"enableMulticastSnoop,omitempty"` + U2OInterconnectionIP string `json:"u2oInterconnectionIP,omitempty"` + U2OInterconnection bool `json:"u2oInterconnection,omitempty"` + EnableLb *bool `json:"enableLb,omitempty"` + EnableEcmp bool `json:"enableEcmp,omitempty"` + EnableMulticastSnoop bool `json:"enableMulticastSnoop,omitempty"` + EnableExternalLBAddress bool `json:"enableExternalLBAddress,omitempty"` RouteTable string `json:"routeTable,omitempty"` NamespaceSelectors []metav1.LabelSelector `json:"namespaceSelectors,omitempty"` diff --git a/pkg/controller/config.go b/pkg/controller/config.go index 5e375531d41..a9319a0bc93 100644 --- a/pkg/controller/config.go +++ b/pkg/controller/config.go @@ -93,6 +93,7 @@ type Configuration struct { EnableEcmp bool EnableKeepVMIP bool EnableLbSvc bool + EnableOVNLBPreferLocal bool EnableMetrics bool EnableANP bool EnableOVNIPSec bool @@ -175,6 +176,7 @@ func ParseFlags() (*Configuration, error) { argEnableEcmp = pflag.Bool("enable-ecmp", false, "Enable ecmp route for centralized subnet") argKeepVMIP = pflag.Bool("keep-vm-ip", true, "Whether to keep ip for kubevirt pod when pod is rebuild") argEnableLbSvc = pflag.Bool("enable-lb-svc", false, "Whether to support loadbalancer service") + argEnableOVNLBPreferLocal = pflag.Bool("enable-ovn-lb-prefer-local", false, "Whether to support ovn loadbalancer prefer local") argEnableMetrics = pflag.Bool("enable-metrics", true, "Whether to support metrics query") argEnableANP = pflag.Bool("enable-anp", false, "Enable support for admin network policy and baseline admin network policy") argEnableOVNIPSec = pflag.Bool("enable-ovn-ipsec", false, "Whether to enable ovn ipsec") @@ -271,6 +273,7 @@ func ParseFlags() (*Configuration, error) { GCInterval: *argGCInterval, InspectInterval: *argInspectInterval, EnableLbSvc: *argEnableLbSvc, + EnableOVNLBPreferLocal: *argEnableOVNLBPreferLocal, EnableMetrics: *argEnableMetrics, EnableOVNIPSec: *argEnableOVNIPSec, EnableLiveMigrationOptimize: *argEnableLiveMigrationOptimize, diff --git a/pkg/controller/endpoint.go b/pkg/controller/endpoint.go index f9bebd8ac82..1159f0bafc1 100644 --- a/pkg/controller/endpoint.go +++ b/pkg/controller/endpoint.go @@ -75,6 +75,7 @@ func (c *Controller) handleUpdateEndpoint(key string) error { vip, vpcName, subnetName string ok bool ignoreHealthCheck = true + isPreferLocalBackend = false ) if vip, ok = svc.Annotations[util.SwitchLBRuleVipsAnnotation]; ok { @@ -93,6 +94,21 @@ func (c *Controller) handleUpdateEndpoint(key string) error { return nil } + if c.config.EnableLb && c.config.EnableOVNLBPreferLocal { + if svc.Spec.Type == v1.ServiceTypeLoadBalancer && svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + if len(svc.Status.LoadBalancer.Ingress) > 0 { + for _, ingress := range svc.Status.LoadBalancer.Ingress { + if ingress.IP != "" { + lbVips = append(lbVips, ingress.IP) + } + } + } + isPreferLocalBackend = true + } else if svc.Spec.Type == v1.ServiceTypeClusterIP && svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal { + isPreferLocalBackend = true + } + } + if pods, err = c.podsLister.Pods(namespace).List(labels.Set(svc.Spec.Selector).AsSelector()); err != nil { klog.Errorf("failed to get pods for service %s in namespace %s: %v", name, namespace, err) return err @@ -168,8 +184,12 @@ func (c *Controller) handleUpdateEndpoint(key string) error { } } - ipPortMapping, backends = getIPPortMappingBackend(ep, pods, port, lbVip, checkIP, ignoreHealthCheck) - + if isPreferLocalBackend { + // only use the ipportmapping's lsp to ip map when the backend is local + checkIP = util.MasqueradeCheckIP + } + isGenIPPortMapping := !ignoreHealthCheck || isPreferLocalBackend + ipPortMapping, backends = getIPPortMappingBackend(ep, pods, port, lbVip, checkIP, isGenIPPortMapping) // for performance reason delete lb with no backends if len(backends) != 0 { vip = util.JoinHostPort(lbVip, port.Port) @@ -178,6 +198,14 @@ func (c *Controller) handleUpdateEndpoint(key string) error { klog.Errorf("failed to add vip %s with backends %s to LB %s: %v", lbVip, backends, lb, err) return err } + + if isPreferLocalBackend && len(ipPortMapping) != 0 { + if err = c.OVNNbClient.LoadBalancerUpdateIPPortMapping(lb, vip, ipPortMapping); err != nil { + klog.Errorf("failed to update ip port mapping %s for vip %s to LB %s: %v", ipPortMapping, vip, lb, err) + return err + } + } + if !ignoreHealthCheck && len(ipPortMapping) != 0 { klog.Infof("add health check ip port mapping %v to LB %s", ipPortMapping, lb) if err = c.OVNNbClient.LoadBalancerAddHealthCheck(lb, vip, ignoreHealthCheck, ipPortMapping, externals); err != nil { @@ -186,6 +214,7 @@ func (c *Controller) handleUpdateEndpoint(key string) error { } } } else { + vip = util.JoinHostPort(lbVip, port.Port) klog.V(3).Infof("delete vip endpoint %s from LB %s", vip, lb) if err = c.OVNNbClient.LoadBalancerDeleteVip(lb, vip, ignoreHealthCheck); err != nil { klog.Errorf("failed to delete vip endpoint %s from LB %s: %v", vip, lb, err) @@ -197,6 +226,17 @@ func (c *Controller) handleUpdateEndpoint(key string) error { klog.Errorf("failed to delete vip %s from LB %s: %v", vip, oldLb, err) return err } + + if c.config.EnableOVNLBPreferLocal { + if err := c.OVNNbClient.LoadBalancerDeleteIPPortMapping(lb, vip); err != nil { + klog.Errorf("failed to delete ip port mapping for vip %s from LB %s: %v", vip, lb, err) + return err + } + if err := c.OVNNbClient.LoadBalancerDeleteIPPortMapping(oldLb, vip); err != nil { + klog.Errorf("failed to delete ip port mapping for vip %s from LB %s: %v", vip, lb, err) + return err + } + } } } } @@ -321,7 +361,7 @@ func (c *Controller) getHealthCheckVip(subnetName, lbVip string) (string, error) return checkIP, nil } -func getIPPortMappingBackend(endpoints *v1.Endpoints, pods []*v1.Pod, servicePort v1.ServicePort, serviceIP, checkVip string, ignoreHealthCheck bool) (map[string]string, []string) { +func getIPPortMappingBackend(endpoints *v1.Endpoints, pods []*v1.Pod, servicePort v1.ServicePort, serviceIP, checkVip string, isGenIPPortMapping bool) (map[string]string, []string) { var ( ipPortMapping = map[string]string{} backends = []string{} @@ -341,7 +381,7 @@ func getIPPortMappingBackend(endpoints *v1.Endpoints, pods []*v1.Pod, servicePor } for _, address := range subset.Addresses { - if !ignoreHealthCheck && address.TargetRef.Name != "" { + if isGenIPPortMapping && address.TargetRef.Name != "" { ipName := fmt.Sprintf("%s.%s", address.TargetRef.Name, endpoints.Namespace) ipPortMapping[address.IP] = fmt.Sprintf(util.HealthCheckNamedVipTemplate, ipName, checkVip) } diff --git a/pkg/controller/init.go b/pkg/controller/init.go index 50170d97c0f..121549acaf2 100644 --- a/pkg/controller/init.go +++ b/pkg/controller/init.go @@ -251,6 +251,12 @@ func (c *Controller) initLB(name, protocol string, sessionAffinity bool) error { } } + err = c.OVNNbClient.SetLoadBalancerPreferLocalBackend(name, c.config.EnableOVNLBPreferLocal) + if err != nil { + klog.Errorf("failed to set prefer local backend for load balancer %s: %v", name, err) + return err + } + return nil } diff --git a/pkg/controller/service.go b/pkg/controller/service.go index e672d3dfafe..5ed35eea071 100644 --- a/pkg/controller/service.go +++ b/pkg/controller/service.go @@ -63,7 +63,7 @@ func (c *Controller) enqueueDeleteService(obj interface{}) { klog.Infof("enqueue delete service %s/%s", svc.Namespace, svc.Name) vip, ok := svc.Annotations[util.SwitchLBRuleVipsAnnotation] - if ok || svc.Spec.ClusterIP != v1.ClusterIPNone && svc.Spec.ClusterIP != "" { + if ok || svc.Spec.ClusterIP != v1.ClusterIPNone && svc.Spec.ClusterIP != "" || svc.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { if c.config.EnableNP { netpols, err := c.svcMatchNetworkPolicies(svc) if err != nil { @@ -81,6 +81,12 @@ func (c *Controller) enqueueDeleteService(obj interface{}) { ips = strings.Split(vip, ",") } + if svc.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { + for _, ingress := range svc.Status.LoadBalancer.Ingress { + ips = append(ips, ingress.IP) + } + } + for _, port := range svc.Spec.Ports { vpcSvc := &vpcService{ Protocol: port.Protocol, @@ -177,6 +183,13 @@ func (c *Controller) handleDeleteService(service *vpcService) error { klog.Errorf("failed to delete vip %s from LB %s: %v", vip, lb, err) return err } + + if c.config.EnableOVNLBPreferLocal { + if err = c.OVNNbClient.LoadBalancerDeleteIPPortMapping(lb, vip); err != nil { + klog.Errorf("failed to delete ip port mapping for vip %s from LB %s: %v", vip, lb, err) + return err + } + } } } @@ -325,6 +338,10 @@ func (c *Controller) handleUpdateService(svcObject *updateSvcObject) error { return err } + if err := c.checkServiceLBIPBelongToSubnet(svc); err != nil { + return err + } + if needUpdateEndpointQueue { c.addOrUpdateEndpointQueue.Add(key) } @@ -492,6 +509,11 @@ func getVipIps(svc *v1.Service) []string { ips = strings.Split(vip, ",") } else { ips = util.ServiceClusterIPs(*svc) + if svc.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { + for _, ingress := range svc.Status.LoadBalancer.Ingress { + ips = append(ips, ingress.IP) + } + } } return ips } @@ -512,3 +534,33 @@ func diffSvcPorts(oldPorts, newPorts []v1.ServicePort) (toDel []v1.ServicePort) return toDel } + +func (c *Controller) checkServiceLBIPBelongToSubnet(svc *v1.Service) error { + subnets, err := c.subnetsLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list subnets: %v", err) + return err + } + + isServiceExternalIPFromSubnet := false + for _, subnet := range subnets { + for _, ingress := range svc.Status.LoadBalancer.Ingress { + if util.CIDRContainIP(subnet.Spec.CIDRBlock, ingress.IP) { + svc.Annotations[util.ServiceExternalIPFromSubnetAnnotation] = subnet.Name + isServiceExternalIPFromSubnet = true + break + } + } + } + + if !isServiceExternalIPFromSubnet { + delete(svc.Annotations, util.ServiceExternalIPFromSubnetAnnotation) + } + klog.Infof("Service %s/%s external IP belongs to subnet: %v", svc.Namespace, svc.Name, isServiceExternalIPFromSubnet) + if _, err = c.config.KubeClient.CoreV1().Services(svc.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{}); err != nil { + klog.Errorf("failed to update service %s/%s: %v", svc.Namespace, svc.Name, err) + return err + } + + return nil +} diff --git a/pkg/daemon/controller.go b/pkg/daemon/controller.go index e16042a0385..4f254d9db29 100644 --- a/pkg/daemon/controller.go +++ b/pkg/daemon/controller.go @@ -58,6 +58,10 @@ type Controller struct { nodesLister listerv1.NodeLister nodesSynced cache.InformerSynced + servicesLister listerv1.ServiceLister + servicesSynced cache.InformerSynced + serviceQueue workqueue.TypedRateLimitingInterface[*serviceEvent] + recorder record.EventRecorder protocol string @@ -82,13 +86,13 @@ func NewController(config *Configuration, stopCh <-chan struct{}, podInformerFac eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: config.KubeClient.CoreV1().Events(v1.NamespaceAll)}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: config.NodeName}) - providerNetworkInformer := kubeovnInformerFactory.Kubeovn().V1().ProviderNetworks() vlanInformer := kubeovnInformerFactory.Kubeovn().V1().Vlans() subnetInformer := kubeovnInformerFactory.Kubeovn().V1().Subnets() ovnEipInformer := kubeovnInformerFactory.Kubeovn().V1().OvnEips() podInformer := podInformerFactory.Core().V1().Pods() nodeInformer := nodeInformerFactory.Core().V1().Nodes() + servicesInformer := nodeInformerFactory.Core().V1().Services() controller := &Controller{ config: config, @@ -115,6 +119,10 @@ func NewController(config *Configuration, stopCh <-chan struct{}, podInformerFac nodesLister: nodeInformer.Lister(), nodesSynced: nodeInformer.Informer().HasSynced, + servicesLister: servicesInformer.Lister(), + servicesSynced: servicesInformer.Informer().HasSynced, + serviceQueue: newTypedRateLimitingQueue[*serviceEvent]("Service", nil), + recorder: recorder, k8sExec: k8sexec.New(), } @@ -135,7 +143,7 @@ func NewController(config *Configuration, stopCh <-chan struct{}, podInformerFac if !cache.WaitForCacheSync(stopCh, controller.providerNetworksSynced, controller.vlansSynced, controller.subnetsSynced, - controller.podsSynced, controller.nodesSynced) { + controller.podsSynced, controller.nodesSynced, controller.servicesSynced) { util.LogFatalAndExit(nil, "failed to wait for caches to sync") } @@ -158,6 +166,14 @@ func NewController(config *Configuration, stopCh <-chan struct{}, podInformerFac }); err != nil { return nil, err } + if _, err = servicesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controller.enqueueAddService, + DeleteFunc: controller.enqueueDeleteService, + UpdateFunc: controller.enqueueUpdateService, + }); err != nil { + util.LogFatalAndExit(err, "failed to add service event handler") + } + if _, err = podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: controller.enqueuePod, }); err != nil { @@ -420,6 +436,10 @@ type subnetEvent struct { oldObj, newObj interface{} } +type serviceEvent struct { + oldObj, newObj interface{} +} + func (c *Controller) enqueueAddSubnet(obj interface{}) { c.subnetQueue.Add(&subnetEvent{newObj: obj}) } @@ -437,6 +457,23 @@ func (c *Controller) runSubnetWorker() { } } +func (c *Controller) enqueueAddService(obj interface{}) { + c.serviceQueue.Add(&serviceEvent{newObj: obj}) +} + +func (c *Controller) enqueueUpdateService(oldObj, newObj interface{}) { + c.serviceQueue.Add(&serviceEvent{oldObj: oldObj, newObj: newObj}) +} + +func (c *Controller) enqueueDeleteService(obj interface{}) { + c.serviceQueue.Add(&serviceEvent{oldObj: obj}) +} + +func (c *Controller) runAddOrUpdateServicekWorker() { + for c.processNextServiceWorkItem() { + } +} + func (c *Controller) processNextSubnetWorkItem() bool { obj, shutdown := c.subnetQueue.Get() if shutdown { @@ -459,6 +496,28 @@ func (c *Controller) processNextSubnetWorkItem() bool { return true } +func (c *Controller) processNextServiceWorkItem() bool { + obj, shutdown := c.serviceQueue.Get() + if shutdown { + return false + } + + err := func(obj *serviceEvent) error { + defer c.serviceQueue.Done(obj) + if err := c.reconcileServices(obj); err != nil { + c.serviceQueue.AddRateLimited(obj) + return fmt.Errorf("error syncing %v: %w, requeuing", obj, err) + } + c.serviceQueue.Forget(obj) + return nil + }(obj) + if err != nil { + utilruntime.HandleError(err) + return true + } + return true +} + func (c *Controller) enqueuePod(oldObj, newObj interface{}) { oldPod := oldObj.(*v1.Pod) newPod := newObj.(*v1.Pod) @@ -555,6 +614,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) { defer c.addOrUpdateProviderNetworkQueue.ShutDown() defer c.deleteProviderNetworkQueue.ShutDown() defer c.subnetQueue.ShutDown() + defer c.serviceQueue.ShutDown() defer c.podQueue.ShutDown() go wait.Until(ovs.CleanLostInterface, time.Minute, stopCh) @@ -570,6 +630,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) { go wait.Until(c.loopOvnExt0Check, 5*time.Second, stopCh) go wait.Until(c.loopTunnelCheck, 5*time.Second, stopCh) go wait.Until(c.runAddOrUpdateProviderNetworkWorker, time.Second, stopCh) + go wait.Until(c.runAddOrUpdateServicekWorker, time.Second, stopCh) go wait.Until(c.runDeleteProviderNetworkWorker, time.Second, stopCh) go wait.Until(c.runSubnetWorker, time.Second, stopCh) go wait.Until(c.runPodWorker, time.Second, stopCh) diff --git a/pkg/daemon/controller_linux.go b/pkg/daemon/controller_linux.go index 3576d7ace0a..04122457350 100644 --- a/pkg/daemon/controller_linux.go +++ b/pkg/daemon/controller_linux.go @@ -12,6 +12,7 @@ import ( "strings" "syscall" + ovsutil "github.com/digitalocean/go-openvswitch/ovs" nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils" "github.com/kubeovn/felix/ipsets" "github.com/kubeovn/go-iptables/iptables" @@ -45,7 +46,17 @@ type ControllerRuntime struct { ipsets map[string]*ipsets.IPSets gwCounters map[string]*util.GwIPtableCounters - nmSyncer *networkManagerSyncer + nmSyncer *networkManagerSyncer + ovsClient *ovsutil.Client +} + +type LbServiceRules struct { + IP string + Port uint16 + Protocol string + BridgeName string + DstMac string + UnderlayNic string } func evalCommandSymlinks(cmd string) (string, error) { @@ -89,6 +100,7 @@ func (c *Controller) initRuntime() error { c.gwCounters = make(map[string]*util.GwIPtableCounters) c.k8siptables = make(map[string]k8siptables.Interface) c.k8sipsets = k8sipset.New(c.k8sExec) + c.ovsClient = ovsutil.New() if c.protocol == kubeovnv1.ProtocolIPv4 || c.protocol == kubeovnv1.ProtocolDual { ipt, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) @@ -143,6 +155,57 @@ func (c *Controller) initRuntime() error { return nil } +func (c *Controller) handleEnableExternalLBAddressChange(oldSubnet, newSubnet *kubeovnv1.Subnet) error { + var subnetName string + var action string + + switch { + case oldSubnet != nil && newSubnet != nil: + subnetName = oldSubnet.Name + if oldSubnet.Spec.EnableExternalLBAddress != newSubnet.Spec.EnableExternalLBAddress { + klog.Infof("EnableExternalLBAddress changed for subnet %s", newSubnet.Name) + if newSubnet.Spec.EnableExternalLBAddress { + action = "add" + } else { + action = "remove" + } + } + case oldSubnet != nil: + subnetName = oldSubnet.Name + if oldSubnet.Spec.EnableExternalLBAddress { + klog.Infof("EnableExternalLBAddress removed for subnet %s", oldSubnet.Name) + action = "remove" + } + case newSubnet != nil: + subnetName = newSubnet.Name + if newSubnet.Spec.EnableExternalLBAddress { + klog.Infof("EnableExternalLBAddress added for subnet %s", newSubnet.Name) + action = "add" + } + } + + if action != "" { + services, err := c.servicesLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list services: %v", err) + return err + } + + for _, svc := range services { + if svc.Annotations[util.ServiceExternalIPFromSubnetAnnotation] == subnetName { + klog.Infof("Service %s/%s has external LB address pool annotation from subnet %s, action: %s", svc.Namespace, svc.Name, subnetName, action) + switch action { + case "add": + c.serviceQueue.Add(&serviceEvent{newObj: svc}) + case "remove": + c.serviceQueue.Add(&serviceEvent{oldObj: svc}) + } + } + } + } + return nil +} + func (c *Controller) reconcileRouters(event *subnetEvent) error { subnets, err := c.subnetsLister.List(labels.Everything()) if err != nil { @@ -166,6 +229,10 @@ func (c *Controller) reconcileRouters(event *subnetEvent) error { } } + if err = c.handleEnableExternalLBAddressChange(oldSubnet, newSubnet); err != nil { + klog.Errorf("failed to handle enable external lb address change: %v", err) + return err + } // handle policy routing rulesToAdd, rulesToDel, routesToAdd, routesToDel, err := c.diffPolicyRouting(oldSubnet, newSubnet) if err != nil { @@ -285,6 +352,172 @@ func (c *Controller) reconcileRouters(event *subnetEvent) error { return nil } +func genLBServiceRules(service *v1.Service, bridgeName, underlayNic string) []LbServiceRules { + var lbServiceRules []LbServiceRules + for _, ingress := range service.Status.LoadBalancer.Ingress { + for _, port := range service.Spec.Ports { + lbServiceRules = append(lbServiceRules, LbServiceRules{ + IP: ingress.IP, + Port: uint16(port.Port), // #nosec G115 + Protocol: string(port.Protocol), + DstMac: util.MasqueradeExternalLBAccessMac, + UnderlayNic: underlayNic, + BridgeName: bridgeName, + }) + } + } + return lbServiceRules +} + +func (c *Controller) diffExternalLBServiceRules(oldService, newService *v1.Service, isSubnetExternalLBEnabled bool) (lbServiceRulesToAdd, lbServiceRulesToDel []LbServiceRules, err error) { + var oldlbServiceRules, newlbServiceRules []LbServiceRules + + if oldService != nil && oldService.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { + oldBridgeName, underlayNic, err := c.getExtInfoBySubnet(oldService.Annotations[util.ServiceExternalIPFromSubnetAnnotation]) + if err != nil { + klog.Errorf("failed to get provider network by subnet %s: %v", oldService.Annotations[util.ServiceExternalIPFromSubnetAnnotation], err) + return nil, nil, err + } + + oldlbServiceRules = genLBServiceRules(oldService, oldBridgeName, underlayNic) + } + + if isSubnetExternalLBEnabled && newService != nil && newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { + newBridgeName, underlayNic, err := c.getExtInfoBySubnet(newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation]) + if err != nil { + klog.Errorf("failed to get provider network by subnet %s: %v", newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation], err) + return nil, nil, err + } + newlbServiceRules = genLBServiceRules(newService, newBridgeName, underlayNic) + } + + for _, oldRule := range oldlbServiceRules { + found := false + for _, newRule := range newlbServiceRules { + if oldRule == newRule { + found = true + break + } + } + if !found { + lbServiceRulesToDel = append(lbServiceRulesToDel, oldRule) + } + } + + for _, newRule := range newlbServiceRules { + found := false + for _, oldRule := range oldlbServiceRules { + if newRule == oldRule { + found = true + break + } + } + if !found { + lbServiceRulesToAdd = append(lbServiceRulesToAdd, newRule) + } + } + + return lbServiceRulesToAdd, lbServiceRulesToDel, nil +} + +func (c *Controller) getExtInfoBySubnet(subnetName string) (string, string, error) { + subnet, err := c.subnetsLister.Get(subnetName) + if err != nil { + klog.Errorf("failed to get subnet %s: %v", subnetName, err) + return "", "", err + } + + vlanName := subnet.Spec.Vlan + if vlanName == "" { + return "", "", errors.New("vlan not specified in subnet") + } + + vlan, err := c.vlansLister.Get(vlanName) + if err != nil { + klog.Errorf("failed to get vlan %s: %v", vlanName, err) + return "", "", err + } + + providerNetworkName := vlan.Spec.Provider + if providerNetworkName == "" { + return "", "", errors.New("provider network not specified in vlan") + } + + pn, err := c.providerNetworksLister.Get(providerNetworkName) + if err != nil { + klog.Errorf("failed to get provider network %s: %v", providerNetworkName, err) + return "", "", err + } + + underlayNic := pn.Spec.DefaultInterface + for _, item := range pn.Spec.CustomInterfaces { + if slices.Contains(item.Nodes, c.config.NodeName) { + underlayNic = item.Interface + break + } + } + klog.Infof("Provider network: %s, Underlay NIC: %s", providerNetworkName, underlayNic) + return util.ExternalBridgeName(providerNetworkName), underlayNic, nil +} + +func (c *Controller) reconcileServices(event *serviceEvent) error { + if event == nil { + return nil + } + var ok bool + var oldService, newService *v1.Service + if event.oldObj != nil { + if oldService, ok = event.oldObj.(*v1.Service); !ok { + klog.Errorf("expected old service in serviceEvent but got %#v", event.oldObj) + return nil + } + } + + if event.newObj != nil { + if newService, ok = event.newObj.(*v1.Service); !ok { + klog.Errorf("expected new service in serviceEvent but got %#v", event.newObj) + return nil + } + } + + // check is the lb service IP related subnet's EnableExternalLBAddress + isSubnetExternalLBEnabled := false + if newService != nil && newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation] != "" { + subnet, err := c.subnetsLister.Get(newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation]) + if err != nil { + klog.Errorf("failed to get subnet %s: %v", newService.Annotations[util.ServiceExternalIPFromSubnetAnnotation], err) + return err + } + isSubnetExternalLBEnabled = subnet.Spec.EnableExternalLBAddress + } + + lbServiceRulesToAdd, lbServiceRulesToDel, err := c.diffExternalLBServiceRules(oldService, newService, isSubnetExternalLBEnabled) + if err != nil { + klog.Errorf("failed to get ip port difference: %v", err) + return err + } + + if len(lbServiceRulesToAdd) > 0 { + for _, rule := range lbServiceRulesToAdd { + klog.Infof("Adding LB service rule: %+v", rule) + if err := ovs.AddOrUpdateUnderlaySubnetSvcLocalOpenFlow(c.ovsClient, rule.BridgeName, rule.IP, rule.Protocol, rule.DstMac, rule.UnderlayNic, rule.Port); err != nil { + klog.Errorf("failed to add or update underlay subnet svc local openflow: %v", err) + } + } + } + + if len(lbServiceRulesToDel) > 0 { + for _, rule := range lbServiceRulesToDel { + klog.Infof("Delete LB service rule: %+v", rule) + if err := ovs.DeleteUnderlaySubnetSvcLocalOpenFlow(c.ovsClient, rule.BridgeName, rule.IP, rule.Protocol, rule.UnderlayNic, rule.Port); err != nil { + klog.Errorf("failed to delete underlay subnet svc local openflow: %v", err) + } + } + } + + return nil +} + func getNicExistRoutes(nic netlink.Link, gateway string) ([]netlink.Route, error) { var routes, existRoutes []netlink.Route var err error diff --git a/pkg/daemon/controller_windows.go b/pkg/daemon/controller_windows.go index 4aaebbb7f9d..a100538761e 100644 --- a/pkg/daemon/controller_windows.go +++ b/pkg/daemon/controller_windows.go @@ -25,6 +25,10 @@ func (c *Controller) initRuntime() error { return nil } +func (c *Controller) reconcileServices(_ *serviceEvent) error { + return nil +} + func (c *Controller) reconcileRouters(_ *subnetEvent) error { klog.Info("reconcile routes") node, err := c.nodesLister.Get(c.config.NodeName) diff --git a/pkg/ovs/interface.go b/pkg/ovs/interface.go index ece421d73b4..99c5b89640f 100644 --- a/pkg/ovs/interface.go +++ b/pkg/ovs/interface.go @@ -123,6 +123,7 @@ type LoadBalancer interface { LoadBalancerAddHealthCheck(lbName, vip string, ignoreHealthCheck bool, ipPortMapping, externals map[string]string) error LoadBalancerDeleteHealthCheck(lbName, uuid string) error SetLoadBalancerAffinityTimeout(lbName string, timeout int) error + SetLoadBalancerPreferLocalBackend(lbName string, preferLocalBackend bool) error DeleteLoadBalancers(filter func(lb *ovnnb.LoadBalancer) bool) error GetLoadBalancer(lbName string, ignoreNotFound bool) (*ovnnb.LoadBalancer, error) ListLoadBalancers(filter func(lb *ovnnb.LoadBalancer) bool) ([]ovnnb.LoadBalancer, error) diff --git a/pkg/ovs/ovn-nb-load_balancer.go b/pkg/ovs/ovn-nb-load_balancer.go index 50ae4d16a26..ea21e4708ba 100644 --- a/pkg/ovs/ovn-nb-load_balancer.go +++ b/pkg/ovs/ovn-nb-load_balancer.go @@ -228,6 +228,43 @@ func (c *OVNNbClient) SetLoadBalancerAffinityTimeout(lbName string, timeout int) return nil } +// SetLoadBalancerPreferLocalBackend sets the LB's affinity timeout in seconds +func (c *OVNNbClient) SetLoadBalancerPreferLocalBackend(lbName string, preferLocalBackend bool) error { + var ( + options map[string]string + lb *ovnnb.LoadBalancer + value string + err error + ) + + if lb, err = c.GetLoadBalancer(lbName, false); err != nil { + klog.Errorf("failed to get lb: %v", err) + return err + } + + if preferLocalBackend { + value = "true" + } else { + value = "false" + } + if len(lb.Options) != 0 && lb.Options["prefer_local_backend"] == value { + return nil + } + + options = make(map[string]string, len(lb.Options)+1) + for k, v := range lb.Options { + options[k] = v + } + options["prefer_local_backend"] = value + + lb.Options = options + if err = c.UpdateLoadBalancer(lb, &lb.Options); err != nil { + klog.Error(err) + return fmt.Errorf("failed to set prefer local backend of lb %s to %s: %w", lbName, value, err) + } + return nil +} + // DeleteLoadBalancers delete several loadbalancer once func (c *OVNNbClient) DeleteLoadBalancers(filter func(lb *ovnnb.LoadBalancer) bool) error { var ( diff --git a/pkg/ovs/ovs-ofctl.go b/pkg/ovs/ovs-ofctl.go new file mode 100644 index 00000000000..b9c670171d4 --- /dev/null +++ b/pkg/ovs/ovs-ofctl.go @@ -0,0 +1,144 @@ +package ovs + +import ( + "fmt" + "net" + + ovs "github.com/digitalocean/go-openvswitch/ovs" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + + kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +func AddOrUpdateUnderlaySubnetSvcLocalOpenFlow(client *ovs.Client, bridgeName, lbServiceIP, protocol, dstMAC, underlayNic string, lbServicePort uint16) error { + isIPv6 := util.CheckProtocol(lbServiceIP) == kubeovnv1.ProtocolIPv6 + var inPortID, outPortID int + var lrpMacAddr net.HardwareAddr + var err error + var cookie uint64 + + portInfo, err := client.OpenFlow.DumpPort(bridgeName, underlayNic) + if err != nil { + klog.Errorf("failed to dump bridge %s port %s: %v", bridgeName, underlayNic, err) + return err + } + inPortID = portInfo.PortID + klog.V(3).Infof("underlayNic %s's portID is %d", underlayNic, inPortID) + + portInfo, err = client.OpenFlow.DumpPort(bridgeName, "patch-localnet.") + if err != nil { + klog.Errorf("failed to dump bridge %s port %s: %v", bridgeName, "patch-localnet.", err) + return err + } + outPortID = portInfo.PortID + + lrpMacAddr, err = net.ParseMAC(dstMAC) + if err != nil { + klog.Errorf("failed to parse MAC address %s: %v", dstMAC, err) + return err + } + + cookie = util.UnderlaySvcLocalOpenFlowCookieV4 + if isIPv6 { + cookie = util.UnderlaySvcLocalOpenFlowCookieV6 + } + + var protocolType ovs.Protocol + switch protocol { + case string(v1.ProtocolTCP): + protocolType = ovs.ProtocolTCPv4 + if isIPv6 { + protocolType = ovs.ProtocolTCPv6 + } + case string(v1.ProtocolUDP): + protocolType = ovs.ProtocolUDPv4 + if isIPv6 { + protocolType = ovs.ProtocolUDPv6 + } + default: + return fmt.Errorf("unsupported protocol %s", protocol) + } + + flow := &ovs.Flow{ + Priority: util.UnderlaySvcLocalOpenFlowPriority, + Protocol: protocolType, + InPort: inPortID, + Actions: []ovs.Action{ovs.ModDataLinkDestination(lrpMacAddr), ovs.Output(outPortID)}, + Matches: []ovs.Match{ + ovs.NetworkDestination(lbServiceIP), + ovs.TransportDestinationMaskedPort(lbServicePort, 0xffff), + }, + Cookie: cookie, + } + + klog.Infof("add bridge %s svc local policy openflow rule", bridgeName) + err = client.OpenFlow.AddFlow(bridgeName, flow) + if err != nil { + return err + } + + return nil +} + +func DeleteUnderlaySubnetSvcLocalOpenFlow(client *ovs.Client, bridgeName, lbServiceIP, protocol, underlayNic string, lbServicePort uint16) error { + isIPv6 := util.CheckProtocol(lbServiceIP) == kubeovnv1.ProtocolIPv6 + var inPortID int + var cookie uint64 + + cookie = util.UnderlaySvcLocalOpenFlowCookieV4 + if isIPv6 { + cookie = util.UnderlaySvcLocalOpenFlowCookieV6 + } + + var protocolType ovs.Protocol + switch protocol { + case string(v1.ProtocolTCP): + protocolType = ovs.ProtocolTCPv4 + if isIPv6 { + protocolType = ovs.ProtocolTCPv6 + } + case string(v1.ProtocolUDP): + protocolType = ovs.ProtocolUDPv4 + if isIPv6 { + protocolType = ovs.ProtocolUDPv6 + } + default: + return fmt.Errorf("unsupported protocol %s", protocol) + } + + portInfo, err := client.OpenFlow.DumpPort(bridgeName, underlayNic) + if err != nil { + klog.Errorf("failed to dump bridge %s port %s: %v", bridgeName, underlayNic, err) + return err + } + inPortID = portInfo.PortID + klog.V(3).Infof("underlayNic %s's portID is %d", underlayNic, inPortID) + + match := &ovs.MatchFlow{ + Protocol: protocolType, + InPort: inPortID, + Matches: []ovs.Match{ + ovs.NetworkDestination(lbServiceIP), + ovs.TransportDestinationMaskedPort(lbServicePort, 0xffff), + }, + Cookie: cookie, + } + + oldflows, err := client.OpenFlow.DumpFlowsWithFlowArgs(bridgeName, match) + if err != nil { + klog.Errorf("failed to dump flows: %v", err) + return err + } + + if len(oldflows) > 0 { + klog.Infof("remove bridge %s old svc local policy openflow rule", bridgeName) + err = client.OpenFlow.DelFlows(bridgeName, match) + if err != nil { + klog.Errorf("failed to remove old svc local policy openflow rule: %v", err) + return err + } + } + return nil +} diff --git a/pkg/util/const.go b/pkg/util/const.go index d9aa017fd82..71e472df825 100644 --- a/pkg/util/const.go +++ b/pkg/util/const.go @@ -117,6 +117,8 @@ const ( VpcLastName = "ovn.kubernetes.io/last_vpc_name" VpcLastPolicies = "ovn.kubernetes.io/last_policies" + ServiceExternalIPFromSubnetAnnotation = "ovn.kubernetes.io/service_external_ip_from_subnet" + ProtocolTCP = "tcp" ProtocolUDP = "udp" ProtocolSCTP = "sctp" @@ -322,4 +324,11 @@ const ( DefaultOVSCACertKeyPath = "/var/lib/openvswitch/pki/switchca/private/cakey.pem" SignerName = "kubeovn.io/signer" + + UnderlaySvcLocalOpenFlowPriority = 10000 + UnderlaySvcLocalOpenFlowCookieV4 = 0x1000 + UnderlaySvcLocalOpenFlowCookieV6 = 0x1001 + + MasqueradeExternalLBAccessMac = "00:00:00:01:00:01" + MasqueradeCheckIP = "0.0.0.0" ) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index b2923a0853a..38236d1b1ff 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -8,6 +8,7 @@ import ( "time" nad "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + "github.com/onsi/ginkgo/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/test/e2e/framework" @@ -15,8 +16,6 @@ import ( admissionapi "k8s.io/pod-security-admission/api" "kubevirt.io/client-go/kubecli" - "github.com/onsi/ginkgo/v2" - kubeovncs "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned" "github.com/kubeovn/kube-ovn/pkg/util" ) @@ -50,6 +49,7 @@ type Framework struct { *framework.Framework KubeOVNClientSet kubeovncs.Interface KubeVirtClientSet kubecli.KubevirtClient + MetallbClientSet *MetallbClientSet AttachNetClient nad.Interface // master/release-1.10/... ClusterVersion string @@ -211,6 +211,17 @@ func (f *Framework) BeforeEach() { ExpectNoError(err) } + if f.MetallbClientSet == nil { + ginkgo.By("Creating a MetalLB client") + config, err := framework.LoadConfig() + ExpectNoError(err) + + config.QPS = f.Options.ClientQPS + config.Burst = f.Options.ClientBurst + f.MetallbClientSet, err = NewMetallbClientSet(config) + ExpectNoError(err) + } + if f.KubeOVNImage == "" && f.ClientSet != nil { framework.Logf("Getting Kube-OVN image") f.KubeOVNImage = GetKubeOvnImage(f.ClientSet) diff --git a/test/e2e/framework/metallb.go b/test/e2e/framework/metallb.go new file mode 100644 index 00000000000..526b1e93222 --- /dev/null +++ b/test/e2e/framework/metallb.go @@ -0,0 +1,103 @@ +package framework + +import ( + "context" + + metallbv1beta1 "go.universe.tf/metallb/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +type MetallbClientSet struct { + client *rest.RESTClient +} + +func NewMetallbClientSet(config *rest.Config) (*MetallbClientSet, error) { + if err := metallbv1beta1.AddToScheme(scheme.Scheme); err != nil { + return nil, err + } + config.ContentConfig.GroupVersion = &metallbv1beta1.GroupVersion + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + client, err := rest.RESTClientFor(config) + if err != nil { + return nil, err + } + + return &MetallbClientSet{client: client}, nil +} + +func (c *MetallbClientSet) CreateIPAddressPool(pool *metallbv1beta1.IPAddressPool) (*metallbv1beta1.IPAddressPool, error) { + result := &metallbv1beta1.IPAddressPool{} + err := c.client.Post(). + Namespace("metallb-system"). + Resource("ipaddresspools"). + Body(pool). + Do(context.TODO()). + Into(result) + return result, err +} + +func (c *MetallbClientSet) CreateL2Advertisement(advertisement *metallbv1beta1.L2Advertisement) (*metallbv1beta1.L2Advertisement, error) { + result := &metallbv1beta1.L2Advertisement{} + err := c.client.Post(). + Namespace("metallb-system"). + Resource("l2advertisements"). + Body(advertisement). + Do(context.TODO()). + Into(result) + return result, err +} + +func (c *MetallbClientSet) MakeL2Advertisement(name string, ipAddressPools []string) *metallbv1beta1.L2Advertisement { + return &metallbv1beta1.L2Advertisement{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: metallbv1beta1.L2AdvertisementSpec{ + IPAddressPools: ipAddressPools, + }, + } +} + +func (c *MetallbClientSet) MakeIPAddressPool(name string, addresses []string, autoAssign bool) *metallbv1beta1.IPAddressPool { + return &metallbv1beta1.IPAddressPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: metallbv1beta1.IPAddressPoolSpec{ + Addresses: addresses, + AutoAssign: &autoAssign, + }, + } +} + +func (c *MetallbClientSet) DeleteIPAddressPool(name string) error { + return c.client.Delete(). + Namespace("metallb-system"). + Resource("ipaddresspools"). + Name(name). + Do(context.TODO()). + Error() +} + +func (c *MetallbClientSet) DeleteL2Advertisement(name string) error { + return c.client.Delete(). + Namespace("metallb-system"). + Resource("l2advertisements"). + Name(name). + Do(context.TODO()). + Error() +} + +func (c *MetallbClientSet) ListServiceL2Statuses() (*metallbv1beta1.ServiceL2StatusList, error) { + result := &metallbv1beta1.ServiceL2StatusList{} + err := c.client.Get(). + Namespace("metallb-system"). + Resource("servicel2statuses"). + Do(context.TODO()). + Into(result) + return result, err +} diff --git a/test/e2e/metallb/e2e_test.go b/test/e2e/metallb/e2e_test.go new file mode 100644 index 00000000000..7dc1f9ae68d --- /dev/null +++ b/test/e2e/metallb/e2e_test.go @@ -0,0 +1,418 @@ +package kubevirt + +import ( + "context" + "flag" + "fmt" + "net" + "strconv" + "strings" + "testing" + "time" + + dockernetwork "github.com/docker/docker/api/types/network" + "github.com/onsi/ginkgo/v2" + metallbv1beta1 "go.universe.tf/metallb/api/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/ipam" + "github.com/kubeovn/kube-ovn/pkg/util" + "github.com/kubeovn/kube-ovn/test/e2e/framework" + "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" + "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" + "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" +) + +const ( + dockerNetworkName = "kube-ovn-vlan" + curlListenPort = 80 +) + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + k8sframework.RegisterCommonFlags(flag.CommandLine) + k8sframework.RegisterClusterFlags(flag.CommandLine) +} + +func TestE2E(t *testing.T) { + k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) + e2e.RunE2ETests(t) +} + +func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, linkMap map[string]*iproute.Link) *apiv1.ProviderNetwork { + var defaultInterface string + customInterfaces := make(map[string][]string, 0) + for node, link := range linkMap { + if !strings.ContainsRune(node, '-') { + continue + } + + if defaultInterface == "" { + defaultInterface = link.IfName + } else if link.IfName != defaultInterface { + customInterfaces[link.IfName] = append(customInterfaces[link.IfName], node) + } + } + + return framework.MakeProviderNetwork(providerNetworkName, exchangeLinkName, defaultInterface, customInterfaces, nil) +} + +var _ = framework.Describe("[group:metallb]", func() { + f := framework.NewDefaultFramework("metallb") + + var cs clientset.Interface + var nodeNames []string + var clusterName, providerNetworkName, vlanName, subnetName, deployName, containerName, serviceName, containerID, metallbIPPoolName string + var linkMap map[string]*iproute.Link + var routeMap map[string][]iproute.Route + var subnetClient *framework.SubnetClient + var vlanClient *framework.VlanClient + var serviceClient *framework.ServiceClient + var providerNetworkClient *framework.ProviderNetworkClient + var dockerNetwork *dockernetwork.Inspect + var deployClient *framework.DeploymentClient + var clientip string + + ginkgo.BeforeEach(func() { + f.SkipVersionPriorTo(1, 14, "This feature was introduced in v1.14.") + cs = f.ClientSet + deployClient = f.DeploymentClient() + serviceClient = f.ServiceClient() + subnetName = "subnet-" + framework.RandomSuffix() + vlanName = "vlan-" + framework.RandomSuffix() + providerNetworkName = "pn-" + framework.RandomSuffix() + subnetClient = f.SubnetClient() + vlanClient = f.VlanClient() + providerNetworkClient = f.ProviderNetworkClient() + containerName = "client-" + framework.RandomSuffix() + deployName = "deploy-" + framework.RandomSuffix() + metallbIPPoolName = "metallb-ip-pool-" + framework.RandomSuffix() + serviceName = "service-" + framework.RandomSuffix() + + if clusterName == "" { + ginkgo.By("Getting k8s nodes") + k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) + framework.ExpectNoError(err) + + cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) + if !ok { + ginkgo.Skip("underlay spec only runs on kind clusters") + } + clusterName = cluster + } + + if dockerNetwork == nil { + ginkgo.By("Ensuring docker network " + dockerNetworkName + " exists") + network, err := docker.NetworkCreate(dockerNetworkName, true, true) + framework.ExpectNoError(err, "creating docker network "+dockerNetworkName) + dockerNetwork = network + } + + ginkgo.By("Getting kind nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + framework.ExpectNotEmpty(nodes) + + ginkgo.By("Connecting nodes to the docker network") + err = kind.NetworkConnect(dockerNetwork.ID, nodes) + framework.ExpectNoError(err, "connecting nodes to network "+dockerNetworkName) + + ginkgo.By("Getting nodes") + nodes, err = kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in cluster") + + ginkgo.By("Getting node links that belong to the docker network") + linkMap = make(map[string]*iproute.Link, len(nodes)) + routeMap = make(map[string][]iproute.Route, len(nodes)) + nodeNames = make([]string, 0, len(nodes)) + for _, node := range nodes { + links, err := node.ListLinks() + framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) + routes, err := node.ListRoutes(true) + framework.ExpectNoError(err, "failed to list routes on node %s: %v", node.Name(), err) + for _, link := range links { + if link.Address == node.NetworkSettings.Networks[dockerNetworkName].MacAddress { + linkMap[node.ID] = &link + break + } + } + framework.ExpectHaveKey(linkMap, node.ID) + link := linkMap[node.ID] + for _, route := range routes { + if route.Dev == link.IfName { + r := iproute.Route{ + Dst: route.Dst, + Gateway: route.Gateway, + Dev: route.Dev, + Flags: route.Flags, + } + routeMap[node.ID] = append(routeMap[node.ID], r) + } + } + framework.ExpectHaveKey(routeMap, node.ID) + linkMap[node.Name()] = linkMap[node.ID] + routeMap[node.Name()] = routeMap[node.ID] + nodeNames = append(nodeNames, node.Name()) + } + + ginkgo.By("Creating a new kind node as Client and connecting it to the docker network") + cmd := []string{"sh", "-c", "sleep 600"} + containerInfo, err := docker.ContainerCreate(containerName, f.KubeOVNImage, dockerNetworkName, cmd) + framework.ExpectNoError(err) + containerID = containerInfo.ID + ContainerInspect, err := docker.ContainerInspect(containerID) + framework.ExpectNoError(err) + clientip = ContainerInspect.NetworkSettings.Networks[dockerNetworkName].IPAddress + }) + ginkgo.AfterEach(func() { + ginkgo.By("Deleting the IPAddressPool for metallb") + f.MetallbClientSet.DeleteIPAddressPool(metallbIPPoolName) // nolint:errcheck + + ginkgo.By("Deleting the l2 advertisement for metallb") + f.MetallbClientSet.DeleteL2Advertisement(metallbIPPoolName) // nolint:errcheck + + ginkgo.By("Deleting the deployment " + deployName) + deployClient.DeleteSync(deployName) + + ginkgo.By("Deleting subnet " + subnetName) + subnetClient.DeleteSync(subnetName) + + ginkgo.By("Deleting vlan " + vlanName) + vlanClient.Delete(vlanName) + + ginkgo.By("Deleting provider network " + providerNetworkName) + providerNetworkClient.DeleteSync(providerNetworkName) + + ginkgo.By("Getting nodes") + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in cluster") + + ginkgo.By("Waiting for ovs bridge to disappear") + deadline := time.Now().Add(time.Minute) + for _, node := range nodes { + err = node.WaitLinkToDisappear(util.ExternalBridgeName(providerNetworkName), 2*time.Second, deadline) + framework.ExpectNoError(err, "timed out waiting for ovs bridge to disappear in node %s", node.Name()) + } + + if dockerNetwork != nil { + ginkgo.By("Disconnecting nodes from the docker network") + err = kind.NetworkDisconnect(dockerNetwork.ID, nodes) + framework.ExpectNoError(err, "disconnecting nodes from network "+dockerNetworkName) + } + + if containerID != "" { + ginkgo.By("Deleting the client container") + err = docker.ContainerRemove(containerID) + framework.ExpectNoError(err, "removing container "+containerID) + } + }) + + framework.ConformanceIt("should support metallb and underlay combine", func() { + underlayCidr := make([]string, 0, 2) + gateway := make([]string, 0, 2) + var metallbVIPv4s, metallbVIPv6s []string + var metallbVIPv4Str, metallbVIPv6Str string + var err error + + ginkgo.By("Creating provider network " + providerNetworkName) + pn := makeProviderNetwork(providerNetworkName, false, linkMap) + _ = providerNetworkClient.CreateSync(pn) + + ginkgo.By("Getting docker network " + dockerNetworkName) + network, err := docker.NetworkInspect(dockerNetworkName) + framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) + + ginkgo.By("Creating vlan " + vlanName) + vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) + _ = vlanClient.Create(vlan) + + ginkgo.By("Creating underlay subnet " + subnetName) + var cidrV4, cidrV6, gatewayV4, gatewayV6 string + for _, config := range dockerNetwork.IPAM.Config { + switch util.CheckProtocol(config.Subnet) { + case apiv1.ProtocolIPv4: + if f.HasIPv4() { + cidrV4 = config.Subnet + gatewayV4 = config.Gateway + } + case apiv1.ProtocolIPv6: + if f.HasIPv6() { + cidrV6 = config.Subnet + gatewayV6 = config.Gateway + } + } + } + + if f.HasIPv4() { + underlayCidr = append(underlayCidr, cidrV4) + gateway = append(gateway, gatewayV4) + for index := 0; index < 5; index++ { + startIP := strings.Split(cidrV4, "/")[0] + ip, _ := ipam.NewIP(startIP) + metallbVIPv4s = append(metallbVIPv4s, ip.Add(100+int64(index)).String()) + } + metallbVIPv4Str = fmt.Sprintf("%s-%s", metallbVIPv4s[0], metallbVIPv4s[len(metallbVIPv4s)-1]) + } + if f.HasIPv6() { + underlayCidr = append(underlayCidr, cidrV6) + gateway = append(gateway, gatewayV6) + for index := 0; index < 5; index++ { + startIP := strings.Split(cidrV6, "/")[0] + ip, _ := ipam.NewIP(startIP) + metallbVIPv6s = append(metallbVIPv6s, ip.Add(100+int64(index)).String()) + } + metallbVIPv6Str = fmt.Sprintf("%s-%s", metallbVIPv6s[0], metallbVIPv6s[len(metallbVIPv6s)-1]) + } + + excludeIPs := make([]string, 0, len(network.Containers)*2) + for _, container := range network.Containers { + if container.IPv4Address != "" && f.HasIPv4() { + excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) + if len(metallbVIPv4s) > 0 { + excludeIPs = append(excludeIPs, metallbVIPv4s...) + } + } + if container.IPv6Address != "" && f.HasIPv6() { + excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) + if len(metallbVIPv6s) > 0 { + excludeIPs = append(excludeIPs, metallbVIPv6s...) + } + } + } + + ginkgo.By("Creating an IPAddressPool for metallb with address " + metallbVIPv4Str + " and " + metallbVIPv6Str) + ipAddressPool := &metallbv1beta1.IPAddressPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: metallbIPPoolName, + }, + Spec: metallbv1beta1.IPAddressPoolSpec{ + Addresses: []string{}, + }, + } + if metallbVIPv4Str != "" { + ipAddressPool.Spec.Addresses = append(ipAddressPool.Spec.Addresses, metallbVIPv4Str) + } + if metallbVIPv6Str != "" { + ipAddressPool.Spec.Addresses = append(ipAddressPool.Spec.Addresses, metallbVIPv6Str) + } + _, err = f.MetallbClientSet.CreateIPAddressPool(ipAddressPool) + framework.ExpectNoError(err) + + ginkgo.By("Creating an L2Advertisement for metallb") + l2Advertisement := &metallbv1beta1.L2Advertisement{ + ObjectMeta: metav1.ObjectMeta{ + Name: metallbIPPoolName, + }, + Spec: metallbv1beta1.L2AdvertisementSpec{}, + } + _, err = f.MetallbClientSet.CreateL2Advertisement(l2Advertisement) + framework.ExpectNoError(err) + + ginkgo.By("Creating underlay subnet " + subnetName) + subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(underlayCidr, ","), strings.Join(gateway, ","), "", "", excludeIPs, nil, []string{}) + subnet.Spec.EnableExternalLBAddress = true + _ = subnetClient.CreateSync(subnet) + + ginkgo.By("Create deploy in underlay subnet") + annoations := map[string]string{ + util.LogicalSwitchAnnotation: subnetName, + } + podLabels := map[string]string{"app": "nginx"} + + args := []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)} + deploy := framework.MakeDeployment(deployName, 3, podLabels, annoations, "nginx", framework.AgnhostImage, "") + deploy.Spec.Template.Spec.Containers[0].Args = args + _ = deployClient.CreateSync(deploy) + + ginkgo.By("Creating a service for the deployment") + ports := []corev1.ServicePort{ + { + Name: "http", + Port: 80, + TargetPort: intstr.FromInt(80), + Protocol: corev1.ProtocolTCP, + }, + } + service := framework.MakeService(serviceName, corev1.ServiceTypeLoadBalancer, nil, podLabels, ports, "") + service.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal + _ = serviceClient.CreateSync(service, func(s *corev1.Service) (bool, error) { + return len(s.Status.LoadBalancer.Ingress) != 0, nil + }, "lb service ip is not empty") + + ginkgo.By("Checking the service is reachable") + service = f.ServiceClient().Get(serviceName) + lbsvcIP := service.Status.LoadBalancer.Ingress[0].IP + + checkReachable(f, containerID, clientip, lbsvcIP, "80", clusterName, true) + }) +}) + +func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, targetPort, clusterName string, expectReachable bool) { + ginkgo.GinkgoHelper() + ginkgo.By("checking curl reachable") + cmd := strings.Fields(fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(targetIP, targetPort))) + output, _, err := docker.Exec(containerID, nil, cmd...) + if expectReachable { + framework.ExpectNoError(err) + client, _, err := net.SplitHostPort(strings.TrimSpace(string(output))) + framework.ExpectNoError(err) + // check packet has not SNAT + framework.ExpectEqual(sourceIP, client) + } else { + framework.ExpectError(err) + } + + ginkgo.By("checking vip node is same as backend pod's host") + cmd = strings.Fields(fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/hostname", net.JoinHostPort(targetIP, targetPort))) + output, _, err = docker.Exec(containerID, nil, cmd...) + framework.ExpectNoError(err) + backendPodName := strings.TrimSpace(string(output)) + framework.Logf("Packet reached backend: %s", backendPodName) + + cmd = strings.Fields(fmt.Sprintf("ip neigh show %s", targetIP)) + output, _, err = docker.Exec(containerID, nil, cmd...) + framework.ExpectNoError(err) + framework.Logf("ip neigh: %s", string(output)) + lines := strings.Split(string(output), "\n") + var vipMac string + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) >= 4 && fields[0] == targetIP { + vipMac = fields[4] + framework.Logf("VIP MAC address: %s", vipMac) + break + } + } + + var vipNode string + nodes, err := kind.ListNodes(clusterName, "") + framework.ExpectNoError(err, "getting nodes in kind cluster") + for _, node := range nodes { + for _, networkSettings := range node.NetworkSettings.Networks { + if networkSettings.MacAddress == vipMac { + vipNode = node.Name() + break + } + } + } + + framework.ExpectNotEqual(vipNode, "", "Failed to find the node with MAC address: %s", vipMac) + framework.Logf("Node with MAC address %s is %s", vipMac, vipNode) + + ginkgo.By("Checking the backend pod's host is same as the metallb vip's node") + backendPod := f.PodClient().GetPod(backendPodName) + backendPodNode := backendPod.Spec.NodeName + framework.ExpectEqual(backendPodNode, vipNode) +} From 203c98c4226bbd48a4b06009ac223604893eefa4 Mon Sep 17 00:00:00 2001 From: clyi Date: Wed, 5 Feb 2025 11:30:26 +0800 Subject: [PATCH 3/8] fix Signed-off-by: clyi --- .github/workflows/build-x86-image.yaml | 48 +++++++++++++++----------- charts/kube-ovn/templates/ovn-CR.yaml | 15 ++++++-- dist/images/Dockerfile.base | 2 +- dist/images/install.sh | 8 +---- e2e.mk | 5 +-- test/e2e/metallb/e2e_test.go | 6 ++-- 6 files changed, 48 insertions(+), 36 deletions(-) diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index 9286901b6dc..74fda48454f 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -3145,7 +3145,7 @@ jobs: path: kube-ovn-connectivity-e2e-${{ matrix.mode }}-ko-log.tar.gz - kube-ovn-metallb-e2e: + kube-ovn-underlay-metallb-e2e: name: OVN METALLB E2E needs: - build-kube-ovn @@ -3222,6 +3222,14 @@ jobs: - name: Load images run: docker load -i kube-ovn.tar + - name: Set environment variables + run: | + if [ $(($RANDOM%2)) -ne 0 ]; then + # run as root and use valgrind to debug memory leak + echo "VERSION=$(cat VERSION)-debug" >> "$GITHUB_ENV" + echo "DEBUG_WRAPPER=valgrind" >> "$GITHUB_ENV" + fi + - name: Create kind cluster run: | pipx install jinjanator @@ -3232,53 +3240,53 @@ jobs: run: make kind-install-metallb-pool-from-underlay - name: Run Ovn Metallb and Kube-OVN Combine E2E - id: kube-ovn-metallb-e2e + id: kube-ovn-underlay-metallb-e2e working-directory: ${{ env.E2E_DIR }} env: E2E_BRANCH: ${{ github.base_ref || github.ref_name }} - run: make kube-ovn-metallb-e2e + run: make kube-ovn-underlay-metallb-e2e - name: Collect k8s events if: failure() && ( steps.ovn-metallb-e2e.conclusion == 'failure') run: | - kubectl get events -A -o yaml > kube-ovn-metallb-e2e-events.yaml - tar zcf kube-ovn-metallb-e2e-events.tar.gz kube-ovn-metallb-e2e-events.yaml + kubectl get events -A -o yaml > kube-ovn-underlay-metallb-e2e-events.yaml + tar zcf kube-ovn-underlay-metallb-e2e-events.tar.gz kube-ovn-underlay-metallb-e2e-events.yaml - name: Upload k8s events uses: actions/upload-artifact@v4 - if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + if: failure() && (steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure') with: - name: kube-ovn-metallb-e2e-events - path: kube-ovn-metallb-e2e-events.tar.gz + name: kube-ovn-underlay-metallb-e2e-events + path: kube-ovn-underlay-metallb-e2e-events.tar.gz - name: Collect apiserver audit logs - if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + if: failure() && (steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure') run: | docker cp kube-ovn-control-plane:/var/log/kubernetes/kube-apiserver-audit.log . - tar zcf kube-ovn-metallb-e2e-audit-log.tar.gz kube-apiserver-audit.log + tar zcf kube-ovn-underlay-metallb-e2e-audit-log.tar.gz kube-apiserver-audit.log - name: Upload apiserver audit logs uses: actions/upload-artifact@v4 - if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + if: failure() && (steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure') with: - name: kube-ovn-metallb-e2e-audit-log - path: kube-ovn-metallb-e2e-audit-log.tar.gz + name: kube-ovn-underlay-metallb-e2e-audit-log + path: kube-ovn-underlay-metallb-e2e-audit-log.tar.gz - name: kubectl ko log - if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + if: failure() && (steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure') run: | make kubectl-ko-log - mv kubectl-ko-log.tar.gz kube-ovn-metallb-e2e-ko-log.tar.gz + mv kubectl-ko-log.tar.gz kube-ovn-underlay-metallb-e2e-ko-log.tar.gz - name: upload kubectl ko log uses: actions/upload-artifact@v4 - if: failure() && (steps.kube-ovn-metallb-e2e.conclusion == 'failure') + if: failure() && (steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure') with: - name: kube-ovn-metallb-e2e-ko-log - path: kube-ovn-metallb-e2e-ko-log.tar.gz + name: kube-ovn-underlay-metallb-e2e-ko-log + path: kube-ovn-underlay-metallb-e2e-ko-log.tar.gz - name: Check kube ovn pod restarts - if: ${{ success() || (failure() && (steps.install.conclusion == 'failure' || steps.kube-ovn-metallb-e2e.conclusion == 'failure')) }} + if: ${{ success() || (failure() && (steps.install.conclusion == 'failure' || steps.kube-ovn-underlay-metallb-e2e.conclusion == 'failure')) }} run: make check-kube-ovn-pod-restarts push: @@ -3290,7 +3298,7 @@ jobs: - kube-ovn-conformance-e2e - kube-ovn-ic-conformance-e2e - kube-ovn-ipsec-e2e - - kube-ovn-metallb-e2e + - kube-ovn-underlay-metallb-e2e - multus-conformance-e2e - vpc-egress-gateway-e2e - ovn-vpc-nat-gw-conformance-e2e diff --git a/charts/kube-ovn/templates/ovn-CR.yaml b/charts/kube-ovn/templates/ovn-CR.yaml index ddf6dfd887d..9edef4b32e2 100644 --- a/charts/kube-ovn/templates/ovn-CR.yaml +++ b/charts/kube-ovn/templates/ovn-CR.yaml @@ -290,18 +290,27 @@ rules: - nodes - nodes/status - pods + - services verbs: - get - list - patch - watch + - apiGroups: + - "kubeovn.io" + resources: + - ips + verbs: + - get + - update - apiGroups: - "" resources: - - services + - events verbs: - - list - - watch + - create + - patch + - update - apiGroups: - "" resources: diff --git a/dist/images/Dockerfile.base b/dist/images/Dockerfile.base index 1e63ee41b0f..0c17cff5b23 100644 --- a/dist/images/Dockerfile.base +++ b/dist/images/Dockerfile.base @@ -67,7 +67,7 @@ RUN cd /usr/src/ && git clone -b branch-24.03 --depth=1 https://github.com/ovn-o # skip node local dns ip conntrack when set acl curl -s https://github.com/kubeovn/ovn/commit/e7d3ba53cdcbc524bb29c54ddb07b83cc4258ed7.patch | git apply && \ # select local backend first - curl -s https://github.com/kubeovn/ovn/commit/faa762818447a4ac470ec28c69bbcabf80091d3a.patch | git apply + curl -s https://github.com/kubeovn/ovn/commit/1fed88af496939d6e526b38897bc10d56557c5c4.patch | git apply RUN apt install -y build-essential fakeroot \ autoconf automake bzip2 debhelper-compat dh-exec dh-python dh-sequence-python3 dh-sequence-sphinxdoc \ diff --git a/dist/images/install.sh b/dist/images/install.sh index 5c5b771d130..01f1b4a870c 100755 --- a/dist/images/install.sh +++ b/dist/images/install.sh @@ -3675,6 +3675,7 @@ rules: - nodes - nodes/status - pods + - services verbs: - get - list @@ -3695,13 +3696,6 @@ rules: - create - patch - update - - apiGroups: - - "" - resources: - - services - verbs: - - list - - watch - apiGroups: - "" resources: diff --git a/e2e.mk b/e2e.mk index 55b6452637a..2a8ebb71103 100644 --- a/e2e.mk +++ b/e2e.mk @@ -82,6 +82,7 @@ e2e-build: ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/kubevirt ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/webhook ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/connectivity + ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/metallb .PHONY: k8s-conformance-e2e k8s-conformance-e2e: @@ -252,8 +253,8 @@ kube-ovn-connectivity-e2e: ginkgo $(GINKGO_OUTPUT_OPT) --procs 2 --randomize-all -v \ --focus=CNI:Kube-OVN ./test/e2e/connectivity -- $(TEST_BIN_ARGS) -.PHONY: kube-ovn-metallb-e2e -kube-ovn-metallb-e2e: +.PHONY: kube-ovn-underlay-metallb-e2e +kube-ovn-underlay-metallb-e2e: ginkgo build $(E2E_BUILD_FLAGS) ./test/e2e/metallb E2E_BRANCH=$(E2E_BRANCH) \ E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ diff --git a/test/e2e/metallb/e2e_test.go b/test/e2e/metallb/e2e_test.go index 7dc1f9ae68d..3506f200340 100644 --- a/test/e2e/metallb/e2e_test.go +++ b/test/e2e/metallb/e2e_test.go @@ -326,13 +326,13 @@ var _ = framework.Describe("[group:metallb]", func() { _ = subnetClient.CreateSync(subnet) ginkgo.By("Create deploy in underlay subnet") - annoations := map[string]string{ + annotations := map[string]string{ util.LogicalSwitchAnnotation: subnetName, } podLabels := map[string]string{"app": "nginx"} args := []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)} - deploy := framework.MakeDeployment(deployName, 3, podLabels, annoations, "nginx", framework.AgnhostImage, "") + deploy := framework.MakeDeployment(deployName, 3, podLabels, annotations, "nginx", framework.AgnhostImage, "") deploy.Spec.Template.Spec.Containers[0].Args = args _ = deployClient.CreateSync(deploy) @@ -408,7 +408,7 @@ func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, tar } } - framework.ExpectNotEqual(vipNode, "", "Failed to find the node with MAC address: %s", vipMac) + framework.ExpectNotEmpty(vipNode, "Failed to find the node with MAC address: %s", vipMac) framework.Logf("Node with MAC address %s is %s", vipMac, vipNode) ginkgo.By("Checking the backend pod's host is same as the metallb vip's node") From 300f25db922bf00a63c1a707f095ed4769a0001b Mon Sep 17 00:00:00 2001 From: clyi Date: Wed, 5 Feb 2025 15:40:00 +0800 Subject: [PATCH 4/8] add log Signed-off-by: clyi --- pkg/controller/service.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controller/service.go b/pkg/controller/service.go index 5ed35eea071..c14c2bc8773 100644 --- a/pkg/controller/service.go +++ b/pkg/controller/service.go @@ -339,6 +339,7 @@ func (c *Controller) handleUpdateService(svcObject *updateSvcObject) error { } if err := c.checkServiceLBIPBelongToSubnet(svc); err != nil { + klog.Error(err) return err } From d091293a8cab2e86716c78af17074752c5a59f52 Mon Sep 17 00:00:00 2001 From: clyi Date: Mon, 10 Feb 2025 22:05:41 +0800 Subject: [PATCH 5/8] fix Signed-off-by: clyi --- dist/images/Dockerfile.base | 2 +- pkg/controller/endpoint.go | 2 +- test/e2e/metallb/e2e_test.go | 15 +++++++++++++-- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/dist/images/Dockerfile.base b/dist/images/Dockerfile.base index 0c17cff5b23..eba3bffe2f2 100644 --- a/dist/images/Dockerfile.base +++ b/dist/images/Dockerfile.base @@ -67,7 +67,7 @@ RUN cd /usr/src/ && git clone -b branch-24.03 --depth=1 https://github.com/ovn-o # skip node local dns ip conntrack when set acl curl -s https://github.com/kubeovn/ovn/commit/e7d3ba53cdcbc524bb29c54ddb07b83cc4258ed7.patch | git apply && \ # select local backend first - curl -s https://github.com/kubeovn/ovn/commit/1fed88af496939d6e526b38897bc10d56557c5c4.patch | git apply + curl -s https://github.com/kubeovn/ovn/commit/78a66777c0634eb98645f5de32262b36df3d235d.patch | git apply RUN apt install -y build-essential fakeroot \ autoconf automake bzip2 debhelper-compat dh-exec dh-python dh-sequence-python3 dh-sequence-sphinxdoc \ diff --git a/pkg/controller/endpoint.go b/pkg/controller/endpoint.go index 1159f0bafc1..fcc8b10f4b2 100644 --- a/pkg/controller/endpoint.go +++ b/pkg/controller/endpoint.go @@ -382,7 +382,7 @@ func getIPPortMappingBackend(endpoints *v1.Endpoints, pods []*v1.Pod, servicePor for _, address := range subset.Addresses { if isGenIPPortMapping && address.TargetRef.Name != "" { - ipName := fmt.Sprintf("%s.%s", address.TargetRef.Name, endpoints.Namespace) + ipName := fmt.Sprintf("%s.%s", address.TargetRef.Name, address.TargetRef.Namespace) ipPortMapping[address.IP] = fmt.Sprintf(util.HealthCheckNamedVipTemplate, ipName, checkVip) } if address.TargetRef == nil || address.TargetRef.Kind != "Pod" { diff --git a/test/e2e/metallb/e2e_test.go b/test/e2e/metallb/e2e_test.go index 3506f200340..36bf31ab6c9 100644 --- a/test/e2e/metallb/e2e_test.go +++ b/test/e2e/metallb/e2e_test.go @@ -355,11 +355,11 @@ var _ = framework.Describe("[group:metallb]", func() { service = f.ServiceClient().Get(serviceName) lbsvcIP := service.Status.LoadBalancer.Ingress[0].IP - checkReachable(f, containerID, clientip, lbsvcIP, "80", clusterName, true) + checkReachable(f, containerID, clientip, lbsvcIP, "80", clusterName, serviceName, true) }) }) -func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, targetPort, clusterName string, expectReachable bool) { +func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, targetPort, clusterName, serviceName string, expectReachable bool) { ginkgo.GinkgoHelper() ginkgo.By("checking curl reachable") cmd := strings.Fields(fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(targetIP, targetPort))) @@ -415,4 +415,15 @@ func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, tar backendPod := f.PodClient().GetPod(backendPodName) backendPodNode := backendPod.Spec.NodeName framework.ExpectEqual(backendPodNode, vipNode) + + l2status, err := f.MetallbClientSet.ListServiceL2Statuses() + framework.ExpectNoError(err) + + for _, l2ss := range l2status.Items { + if l2ss.Status.ServiceName == serviceName { + framework.Logf("service %s VIP node: %s", serviceName, vipNode) + framework.ExpectEqual(l2ss.Status.Node, vipNode) + break + } + } } From 4250649c86531571167a036d4e047f615e1a0e29 Mon Sep 17 00:00:00 2001 From: clyi Date: Tue, 11 Feb 2025 10:37:47 +0800 Subject: [PATCH 6/8] fix mem leak Signed-off-by: clyi --- dist/images/Dockerfile.base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dist/images/Dockerfile.base b/dist/images/Dockerfile.base index eba3bffe2f2..f7f4bd5eae3 100644 --- a/dist/images/Dockerfile.base +++ b/dist/images/Dockerfile.base @@ -67,7 +67,7 @@ RUN cd /usr/src/ && git clone -b branch-24.03 --depth=1 https://github.com/ovn-o # skip node local dns ip conntrack when set acl curl -s https://github.com/kubeovn/ovn/commit/e7d3ba53cdcbc524bb29c54ddb07b83cc4258ed7.patch | git apply && \ # select local backend first - curl -s https://github.com/kubeovn/ovn/commit/78a66777c0634eb98645f5de32262b36df3d235d.patch | git apply + curl -s https://github.com/kubeovn/ovn/commit/a9e009136a42cf6d985f97e2bf1ec41df6b5ca29.patch | git apply RUN apt install -y build-essential fakeroot \ autoconf automake bzip2 debhelper-compat dh-exec dh-python dh-sequence-python3 dh-sequence-sphinxdoc \ From 2dae46c3d2e75431f87e4356bee2a0ee88c54643 Mon Sep 17 00:00:00 2001 From: clyi Date: Tue, 11 Feb 2025 11:34:31 +0800 Subject: [PATCH 7/8] fix e2e Signed-off-by: clyi --- test/e2e/metallb/e2e_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/metallb/e2e_test.go b/test/e2e/metallb/e2e_test.go index 36bf31ab6c9..6de940cebb8 100644 --- a/test/e2e/metallb/e2e_test.go +++ b/test/e2e/metallb/e2e_test.go @@ -315,7 +315,9 @@ var _ = framework.Describe("[group:metallb]", func() { ObjectMeta: metav1.ObjectMeta{ Name: metallbIPPoolName, }, - Spec: metallbv1beta1.L2AdvertisementSpec{}, + Spec: metallbv1beta1.L2AdvertisementSpec{ + IPAddressPools: []string{metallbIPPoolName}, + }, } _, err = f.MetallbClientSet.CreateL2Advertisement(l2Advertisement) framework.ExpectNoError(err) From eb3848327fa53dbdc64971acdee8ab7a67bef216 Mon Sep 17 00:00:00 2001 From: clyi Date: Tue, 11 Feb 2025 15:46:42 +0800 Subject: [PATCH 8/8] l2servicestatus is sometimes error, it should be metallb's bug not check l2servicestatus Signed-off-by: clyi --- test/e2e/metallb/e2e_test.go | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/test/e2e/metallb/e2e_test.go b/test/e2e/metallb/e2e_test.go index 6de940cebb8..5672e0dd76f 100644 --- a/test/e2e/metallb/e2e_test.go +++ b/test/e2e/metallb/e2e_test.go @@ -357,11 +357,11 @@ var _ = framework.Describe("[group:metallb]", func() { service = f.ServiceClient().Get(serviceName) lbsvcIP := service.Status.LoadBalancer.Ingress[0].IP - checkReachable(f, containerID, clientip, lbsvcIP, "80", clusterName, serviceName, true) + checkReachable(f, containerID, clientip, lbsvcIP, "80", clusterName, true) }) }) -func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, targetPort, clusterName, serviceName string, expectReachable bool) { +func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, targetPort, clusterName string, expectReachable bool) { ginkgo.GinkgoHelper() ginkgo.By("checking curl reachable") cmd := strings.Fields(fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(targetIP, targetPort))) @@ -417,15 +417,4 @@ func checkReachable(f *framework.Framework, containerID, sourceIP, targetIP, tar backendPod := f.PodClient().GetPod(backendPodName) backendPodNode := backendPod.Spec.NodeName framework.ExpectEqual(backendPodNode, vipNode) - - l2status, err := f.MetallbClientSet.ListServiceL2Statuses() - framework.ExpectNoError(err) - - for _, l2ss := range l2status.Items { - if l2ss.Status.ServiceName == serviceName { - framework.Logf("service %s VIP node: %s", serviceName, vipNode) - framework.ExpectEqual(l2ss.Status.Node, vipNode) - break - } - } }