diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index c19611405a7..766637b70f2 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -534,8 +534,8 @@ jobs: run: make kube-ovn-ic-conformance-e2e chart-installation-test: - needs: build-kube-ovn name: Chart Installation Test + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -573,45 +573,9 @@ jobs: - name: Cleanup run: sh dist/images/cleanup.sh - ha-installation-test: - needs: build-kube-ovn - name: HA Installation Test - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - - - name: Install kind - run: | - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - - name: Download image - uses: actions/download-artifact@v3 - with: - name: kube-ovn - - - name: Load image - run: docker load --input kube-ovn.tar - - - name: Create kind cluster - run: | - sudo pip3 install j2cli - sudo pip3 install "j2cli[yaml]" - sudo PATH=~/.local/bin:$PATH make kind-init-ha - sudo cp -r /root/.kube/ ~/.kube/ - sudo chown -R $(id -un). ~/.kube/ - - - name: Install Kube-OVN - run: sudo ENABLE_SSL=true make kind-install - - - name: Cleanup - run: sh dist/images/cleanup.sh - underlay-logical-gateway-installation-test: - needs: build-kube-ovn name: Underlay Logical Gateway Installation Test + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -646,8 +610,8 @@ jobs: run: sh dist/images/cleanup.sh no-ovn-lb-test: - needs: build-kube-ovn name: Disable OVN LB Test + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -684,8 +648,8 @@ jobs: run: sh dist/images/cleanup.sh no-np-test: - needs: build-kube-ovn name: Disable Network Policy Test + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -722,10 +686,10 @@ jobs: run: sh dist/images/cleanup.sh lb-svc-e2e: + name: LB Service E2E needs: - build-kube-ovn - build-vpc-nat-gateway - name: LB Service E2E runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -811,8 +775,8 @@ jobs: run: make kube-ovn-lb-svc-conformance-e2e installation-compatibility-test: - needs: build-kube-ovn name: Installation Compatibility Test + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 10 steps: @@ -847,8 +811,8 @@ jobs: run: sh dist/images/cleanup.sh cilium-chaining-e2e: - needs: build-kube-ovn name: Cilium Chaining E2E + needs: build-kube-ovn runs-on: ubuntu-22.04 timeout-minutes: 30 steps: @@ -934,7 +898,106 @@ jobs: - name: Cleanup run: sh dist/images/cleanup.sh + kube-ovn-security-e2e: + name: Kube-OVN Security E2E + needs: build-kube-ovn + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + ssl: + - "true" + - "false" + bind-local: + - "true" + - "false" + ip-family: + - ipv4 + - ipv6 + - dual + steps: + - uses: actions/checkout@v3 + + - name: Create the default branch directory + run: mkdir -p test/e2e/source + + - name: Check out the default branch + uses: actions/checkout@v3 + with: + ref: ${{ github.event.repository.default_branch }} + fetch-depth: 1 + path: test/e2e/source + + - name: Export E2E directory + run: | + if [ '${{ github.base_ref || github.ref_name }}' = '${{ github.event.repository.default_branch }}' ]; then + echo "E2E_DIR=." >> "$GITHUB_ENV" + else + echo "E2E_DIR=test/e2e/source" >> "$GITHUB_ENV" + fi + + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('${{ env.E2E_DIR }}/**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Build e2e binaries + working-directory: ${{ env.E2E_DIR }} + run: make e2e-compile + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Download image + uses: actions/download-artifact@v3 + with: + name: kube-ovn + + - name: Load image + run: docker load --input kube-ovn.tar + + - name: Create kind cluster + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-ha-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + run: | + sudo ENABLE_SSL=${{ matrix.ssl }} ENABLE_BIND_LOCAL_IP=${{ matrix.bind-local }} \ + make kind-install-${{ matrix.ip-family }} + + - name: Run E2E + working-directory: ${{ env.E2E_DIR }} + env: + E2E_BRANCH: ${{ github.base_ref || github.ref_name }} + E2E_IP_FAMILY: ${{ matrix.ip-family }} + run: make kube-ovn-security-e2e + + - name: Cleanup + run: sh dist/images/cleanup.sh + push: + name: Push Images needs: - build-centos-compile - k8s-conformance-e2e @@ -943,14 +1006,13 @@ jobs: - kube-ovn-conformance-e2e - kube-ovn-ic-conformance-e2e - lb-svc-e2e - - ha-installation-test - underlay-logical-gateway-installation-test - chart-installation-test - installation-compatibility-test - no-ovn-lb-test - no-np-test - cilium-chaining-e2e - name: push + - kube-ovn-security-e2e runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/scheduled-e2e.yaml b/.github/workflows/scheduled-e2e.yaml index 3a72e3d9abd..a9ebdad20e8 100644 --- a/.github/workflows/scheduled-e2e.yaml +++ b/.github/workflows/scheduled-e2e.yaml @@ -394,10 +394,6 @@ jobs: fail-fast: false matrix: branch: - - master - - release-1.11 - - release-1.10 - - release-1.9 - release-1.8 steps: - uses: actions/checkout@v3 @@ -799,6 +795,91 @@ jobs: working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} run: sh dist/images/cleanup.sh + kube-ovn-security-e2e: + name: Kube-OVN Security E2E + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + branch: + - master + - release-1.11 + - release-1.10 + - release-1.9 + ssl: + - "true" + - "false" + bind-local: + - "true" + - "false" + ip-family: + - ipv4 + - ipv6 + - dual + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true + id: go + + - name: Export Go full version + run: echo "GO_FULL_VER=$(go version | awk '{print $3}')" >> "$GITHUB_ENV" + + - name: Go cache + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-e2e-${{ env.GO_FULL_VER }}-x86- + + - name: Create branch directory + run: mkdir -p test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Check out branch + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 1 + path: test/e2e/kube-ovn/branches/${{ matrix.branch }} + + - name: Install kind + run: | + curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv kind /usr/local/bin + + - name: Create kind cluster + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + sudo pip3 install j2cli + sudo pip3 install "j2cli[yaml]" + sudo PATH=~/.local/bin:$PATH make kind-init-ha-${{ matrix.ip-family }} + sudo cp -r /root/.kube/ ~/.kube/ + sudo chown -R $(id -un). ~/.kube/ + + - name: Install Kube-OVN + working-directory: test/e2e/kube-ovn/branches/${{ matrix.branch }} + run: | + version=$(grep -E '^VERSION="v([0-9]+\.){2}[0-9]+"$' dist/images/install.sh | head -n1 | awk -F= '{print $2}' | tr -d '"') + docker pull kubeovn/kube-ovn:$version + sudo VERSION=$version ENABLE_SSL=${{ matrix.ssl }} \ + ENABLE_BIND_LOCAL_IP=${{ matrix.bind-local }} \ + make kind-install-${{ matrix.ip-family }} + + - name: Run E2E + env: + E2E_BRANCH: ${{ matrix.branch }} + E2E_IP_FAMILY: ${{ matrix.ip-family }} + run: make kube-ovn-security-e2e + + - name: Cleanup + run: sh dist/images/cleanup.sh + helm-direct-upgrade-e2e: name: Helm Upgrade E2E runs-on: ubuntu-22.04 diff --git a/Makefile b/Makefile index af652178388..7fae622fd0e 100644 --- a/Makefile +++ b/Makefile @@ -245,9 +245,20 @@ kind-init-iptables: @kube_proxy_mode=iptables $(MAKE) kind-init .PHONY: kind-init-ha -kind-init-ha: +kind-init-ha: kind-init-ha-ipv4 + +.PHONY: kind-init-ha-ipv4 +kind-init-ha-ipv4: @ha=true $(MAKE) kind-init +.PHONY: kind-init-ha-ipv6 +kind-init-ha-ipv6: + @ip_family=ipv6 $(MAKE) kind-init-ha + +.PHONY: kind-init-ha-dual +kind-init-ha-dual: + @ip_family=dual $(MAKE) kind-init-ha + .PHONY: kind-init-single kind-init-single: @single=true $(MAKE) kind-init diff --git a/Makefile.e2e b/Makefile.e2e index 758ad3814f1..10d419fb1db 100644 --- a/Makefile.e2e +++ b/Makefile.e2e @@ -30,6 +30,7 @@ e2e-compile: go test ./test/e2e/kube-ovn -c -o test/e2e/kube-ovn/e2e.test go test ./test/e2e/ovn-ic -c -o test/e2e/ovn-ic/e2e.test go test ./test/e2e/lb-svc -c -o test/e2e/lb-svc/e2e.test + go test ./test/e2e/security -c -o test/e2e/security/e2e.test .PHONY: k8s-conformance-e2e k8s-conformance-e2e: @@ -82,3 +83,11 @@ kube-ovn-lb-svc-conformance-e2e: E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ E2E_NETWORK_MODE=$(E2E_NETWORK_MODE) \ ./test/e2e/lb-svc/e2e.test --ginkgo.focus=CNI:Kube-OVN + +.PHONY: kube-ovn-security-e2e +kube-ovn-security-e2e: + go test ./test/e2e/kube-ovn -c -o test/e2e/kube-ovn/e2e.test + E2E_BRANCH=$(E2E_BRANCH) \ + E2E_IP_FAMILY=$(E2E_IP_FAMILY) \ + E2E_NETWORK_MODE=$(E2E_NETWORK_MODE) \ + ./test/e2e/security/e2e.test --ginkgo.focus=CNI:Kube-OVN diff --git a/dist/images/Dockerfile.base b/dist/images/Dockerfile.base index a172c861202..18e1b1e5031 100644 --- a/dist/images/Dockerfile.base +++ b/dist/images/Dockerfile.base @@ -40,6 +40,8 @@ RUN dpkg -i /usr/src/python3-openvswitch*.deb /usr/src/libopenvswitch*.deb RUN cd /usr/src/ && git clone -b branch-22.03 --depth=1 https://github.com/ovn-org/ovn.git && \ cd ovn && \ + # fix ssl listen address + curl -s https://github.com/kubeovn/ovn/commit/62d4969877712c26fe425698d898b440f91b44bf.patch | git apply && \ # ovn-controller: Add a generic way to check if the daemon started recently. curl -s https://github.com/kubeovn/ovn/commit/367d7ab6239089ce99dda20818c8833c69ffd77f.patch | git apply && \ # patch.c: Avoid patch interface deletion & recreation during restart. diff --git a/dist/images/install.sh b/dist/images/install.sh index 6bf66af08b0..696da640ffe 100755 --- a/dist/images/install.sh +++ b/dist/images/install.sh @@ -2030,6 +2030,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "$ENABLE_BIND_LOCAL_IP" resources: requests: cpu: 300m @@ -2524,6 +2530,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "$ENABLE_BIND_LOCAL_IP" resources: requests: cpu: 300m diff --git a/dist/images/ovn-is-leader.sh b/dist/images/ovn-is-leader.sh index 6bcf1cdfc79..43ea52a8abd 100755 --- a/dist/images/ovn-is-leader.sh +++ b/dist/images/ovn-is-leader.sh @@ -8,12 +8,14 @@ ovn-ctl status_northd ovn-ctl status_ovnnb ovn-ctl status_ovnsb +BIND_LOCAL_ADDR=[${POD_IP:-127.0.0.1}] + # For data consistency, only store leader address in endpoint # Store ovn-nb leader to svc kube-system/ovn-nb if [[ "$ENABLE_SSL" == "false" ]]; then - nb_leader=$(ovsdb-client query tcp:127.0.0.1:6641 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Northbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") + nb_leader=$(ovsdb-client query tcp:$BIND_LOCAL_ADDR:6641 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Northbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") else - nb_leader=$(ovsdb-client -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert query ssl:127.0.0.1:6641 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Northbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") + nb_leader=$(ovsdb-client -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert query ssl:$BIND_LOCAL_ADDR:6641 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Northbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") fi if [[ $nb_leader =~ "true" ]] @@ -34,9 +36,9 @@ fi # Store ovn-sb leader to svc kube-system/ovn-sb if [[ "$ENABLE_SSL" == "false" ]]; then - sb_leader=$(ovsdb-client query tcp:127.0.0.1:6642 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Southbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") + sb_leader=$(ovsdb-client query tcp:$BIND_LOCAL_ADDR:6642 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Southbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") else - sb_leader=$(ovsdb-client -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert query ssl:127.0.0.1:6642 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Southbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") + sb_leader=$(ovsdb-client -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert query ssl:$BIND_LOCAL_ADDR:6642 "[\"_Server\",{\"table\":\"Database\",\"where\":[[\"name\",\"==\", \"OVN_Southbound\"]],\"columns\": [\"leader\"],\"op\":\"select\"}]") fi if [[ $sb_leader =~ "true" ]] @@ -51,9 +53,9 @@ then if [ "$northd_leader" == "" ]; then # no available northd leader try to release the lock if [[ "$ENABLE_SSL" == "false" ]]; then - ovsdb-client -v -t 1 steal tcp:127.0.0.1:6642 ovn_northd + ovsdb-client -v -t 1 steal tcp:$BIND_LOCAL_ADDR:6642 ovn_northd else - ovsdb-client -v -t 1 -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert steal ssl:127.0.0.1:6642 ovn_northd + ovsdb-client -v -t 1 -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert steal ssl:$BIND_LOCAL_ADDR:6642 ovn_northd fi fi fi diff --git a/dist/images/start-db.sh b/dist/images/start-db.sh index 77291db7309..302e4d9c829 100755 --- a/dist/images/start-db.sh +++ b/dist/images/start-db.sh @@ -25,6 +25,14 @@ DB_NB_PORT=${DB_NB_PORT:-6641} DB_SB_ADDR=${DB_SB_ADDR:-::} DB_SB_PORT=${DB_SB_PORT:-6642} ENABLE_SSL=${ENABLE_SSL:-false} +ENABLE_BIND_LOCAL_IP=${ENABLE_BIND_LOCAL_IP:-false} +BIND_LOCAL_ADDR=[::] +if [[ $ENABLE_BIND_LOCAL_IP == "true" ]]; then + POD_IPS_LIST=(${POD_IPS//,/ }) + if [[ ${#POD_IPS_LIST[@]} == 1 ]]; then + BIND_LOCAL_ADDR="[${POD_IP}]" + fi +fi . /usr/share/openvswitch/scripts/ovs-lib || exit 1 @@ -177,8 +185,10 @@ if [[ "$ENABLE_SSL" == "false" ]]; then --db-sb-create-insecure-remote=yes \ --db-nb-cluster-local-addr="[${POD_IP}]" \ --db-sb-cluster-local-addr="[${POD_IP}]" \ - --db-nb-addr=[::] \ - --db-sb-addr=[::] \ + --db-nb-addr=$BIND_LOCAL_ADDR \ + --db-sb-addr=$BIND_LOCAL_ADDR \ + --db-nb-use-remote-in-db=no \ + --db-sb-use-remote-in-db=no \ --ovn-northd-nb-db="$(gen_conn_str 6641)" \ --ovn-northd-sb-db="$(gen_conn_str 6642)" \ start_northd @@ -222,8 +232,10 @@ if [[ "$ENABLE_SSL" == "false" ]]; then --db-sb-cluster-local-addr="[${POD_IP}]" \ --db-nb-cluster-remote-addr="[${nb_leader_ip}]" \ --db-sb-cluster-remote-addr="[${sb_leader_ip}]" \ - --db-nb-addr=[::] \ - --db-sb-addr=[::] \ + --db-nb-addr=$BIND_LOCAL_ADDR \ + --db-sb-addr=$BIND_LOCAL_ADDR \ + --db-nb-use-remote-in-db=no \ + --db-sb-use-remote-in-db=no \ --ovn-northd-nb-db="$(gen_conn_str 6641)" \ --ovn-northd-sb-db="$(gen_conn_str 6642)" \ start_northd @@ -277,16 +289,18 @@ else --ovn-northd-ssl-ca-cert=/var/run/tls/cacert \ --db-nb-cluster-local-addr="[${POD_IP}]" \ --db-sb-cluster-local-addr="[${POD_IP}]" \ - --db-nb-addr=[::] \ - --db-sb-addr=[::] \ + --db-nb-addr=$BIND_LOCAL_ADDR \ + --db-sb-addr=$BIND_LOCAL_ADDR \ + --db-nb-use-remote-in-db=no \ + --db-sb-use-remote-in-db=no \ --ovn-northd-nb-db="$(gen_conn_str 6641)" \ --ovn-northd-sb-db="$(gen_conn_str 6642)" \ start_northd - ovn-nbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set-connection pssl:"${DB_NB_PORT}":[::] + ovn-nbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set-connection pssl:"${DB_NB_PORT}":["${DB_NB_ADDR}"] ovn-nbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set Connection . inactivity_probe=180000 ovn-nbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set NB_Global . options:use_logical_dp_groups=true - ovn-sbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set-connection pssl:"${DB_SB_PORT}":[::] + ovn-sbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set-connection pssl:"${DB_SB_PORT}":["${DB_SB_ADDR}"] ovn-sbctl --no-leader-only -p /var/run/tls/key -c /var/run/tls/cert -C /var/run/tls/cacert set Connection . inactivity_probe=180000 else # get leader if cluster exists @@ -328,8 +342,10 @@ else --db-sb-cluster-local-addr="[${POD_IP}]" \ --db-nb-cluster-remote-addr="[${nb_leader_ip}]" \ --db-sb-cluster-remote-addr="[${sb_leader_ip}]" \ - --db-nb-addr=[::] \ - --db-sb-addr=[::] \ + --db-nb-addr=$BIND_LOCAL_ADDR \ + --db-sb-addr=$BIND_LOCAL_ADDR \ + --db-nb-use-remote-in-db=no \ + --db-sb-use-remote-in-db=no \ --ovn-northd-nb-db="$(gen_conn_str 6641)" \ --ovn-northd-sb-db="$(gen_conn_str 6642)" \ start_northd diff --git a/kubeovn-helm/templates/central-deploy.yaml b/kubeovn-helm/templates/central-deploy.yaml index e37539f7b47..f209ed3966f 100644 --- a/kubeovn-helm/templates/central-deploy.yaml +++ b/kubeovn-helm/templates/central-deploy.yaml @@ -65,6 +65,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs + - name: ENABLE_BIND_LOCAL_IP + value: "{{- .Values.func.ENABLE_BIND_LOCAL_IP }}" resources: requests: cpu: 300m diff --git a/pkg/ovn_leader_checker/ovn.go b/pkg/ovn_leader_checker/ovn.go index dd93e5bccdb..b3026b51480 100755 --- a/pkg/ovn_leader_checker/ovn.go +++ b/pkg/ovn_leader_checker/ovn.go @@ -5,9 +5,11 @@ import ( "flag" "fmt" "io" + "net" "os" "os/exec" "reflect" + "strconv" "strings" "syscall" "time" @@ -136,79 +138,35 @@ func checkOvnIsAlive() bool { return true } -func checkNbIsLeader() bool { - var command []string - if os.Getenv(EnvSSL) == "false" { - command = []string{ - "query", - "tcp:127.0.0.1:6641", - `["_Server",{"table":"Database","where":[["name","==","OVN_Northbound"]],"columns":["leader"],"op":"select"}]`, - } - } else { - command = []string{ - "-p", - "/var/run/tls/key", - "-c", - "/var/run/tls/cert", - "-C", - "/var/run/tls/cacert", - "query", - "ssl:127.0.0.1:6641", - `["_Server",{"table":"Database","where":[["name","==","OVN_Northbound"]],"columns":["leader"],"op":"select"}]`, - } - } +func isDBLeader(dbName string, port int) bool { + addr := net.JoinHostPort(os.Getenv("POD_IP"), strconv.Itoa(port)) + query := fmt.Sprintf(`["_Server",{"table":"Database","where":[["name","==","%s"]],"columns":["leader"],"op":"select"}]`, dbName) - output, err := exec.Command("ovsdb-client", command...).CombinedOutput() - if err != nil { - klog.Errorf("CheckNbIsLeader execute err %v error msg %v", err, string(output)) - return false - } - - if len(output) == 0 { - klog.Errorf("CheckNbIsLeader no output") - return false - } - - klog.V(5).Infof("CheckNbIsLeader: output %s", string(output)) - result := strings.TrimSpace(string(output)) - return strings.Contains(result, "true") -} - -func checkSbIsLeader() bool { - var command []string + var cmd []string if os.Getenv(EnvSSL) == "false" { - command = []string{ - "query", - "tcp:127.0.0.1:6642", - `["_Server",{"table":"Database","where":[["name","==","OVN_Southbound"]],"columns":["leader"],"op":"select"}]`, - } + cmd = []string{"query", fmt.Sprintf("tcp:%s", addr), query} } else { - command = []string{ - "-p", - "/var/run/tls/key", - "-c", - "/var/run/tls/cert", - "-C", - "/var/run/tls/cacert", - "query", - "ssl:127.0.0.1:6642", - `["_Server",{"table":"Database","where":[["name","==","OVN_Southbound"]],"columns":["leader"],"op":"select"}]`, + cmd = []string{ + "-p", "/var/run/tls/key", + "-c", "/var/run/tls/cert", + "-C", "/var/run/tls/cacert", + "query", fmt.Sprintf("ssl:%s", addr), query, } } - output, err := exec.Command("ovsdb-client", command...).CombinedOutput() + output, err := exec.Command("ovsdb-client", cmd...).CombinedOutput() if err != nil { - klog.Errorf("CheckSbIsLeader execute err %v error msg %v", err, string(output)) + klog.Errorf("failed to execute cmd %q: err=%v, msg=%v", strings.Join(cmd, " "), err, string(output)) return false } - if len(output) == 0 { - klog.Errorf("CheckSbIsLeader no output") + result := strings.TrimSpace(string(output)) + if len(result) == 0 { + klog.Errorf("cmd %q no output", strings.Join(cmd, " ")) return false } - klog.V(5).Infof("CheckSbIsLeader: output %s", string(output)) - result := strings.TrimSpace(string(output)) + klog.V(5).Infof("cmd %q output: %s", strings.Join(cmd, " "), string(output)) return strings.Contains(result, "true") } @@ -382,7 +340,9 @@ func doOvnLeaderCheck(cfg *Configuration, podName string, podNamespace string) { for k, v := range cachedPod.Labels { labels[k] = v } - nbLeader, sbLeader, northdLeader := checkNbIsLeader(), checkSbIsLeader(), checkNorthdActive() + nbLeader := isDBLeader("OVN_Northbound", 6641) + sbLeader := isDBLeader("OVN_Southbound", 6642) + northdLeader := checkNorthdActive() updatePodLabels(labels, "ovn-nb-leader", nbLeader) updatePodLabels(labels, "ovn-sb-leader", sbLeader) updatePodLabels(labels, "ovn-northd-leader", northdLeader) diff --git a/test/e2e/security/e2e_test.go b/test/e2e/security/e2e_test.go new file mode 100644 index 00000000000..c3222f8e120 --- /dev/null +++ b/test/e2e/security/e2e_test.go @@ -0,0 +1,147 @@ +package security + +import ( + "bytes" + "context" + "flag" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e" + k8sframework "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + "k8s.io/kubernetes/test/e2e/framework/deployment" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + + "github.com/onsi/ginkgo/v2" + + "github.com/kubeovn/kube-ovn/test/e2e/framework" +) + +func init() { + klog.SetOutput(ginkgo.GinkgoWriter) + + // Register flags. + config.CopyFlags(config.Flags, flag.CommandLine) + k8sframework.RegisterCommonFlags(flag.CommandLine) + k8sframework.RegisterClusterFlags(flag.CommandLine) + + // Parse all the flags + flag.Parse() + if k8sframework.TestContext.KubeConfig == "" { + k8sframework.TestContext.KubeConfig = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) +} + +func TestE2E(t *testing.T) { + e2e.RunE2ETests(t) +} + +func checkDeployment(cs clientset.Interface, name, process string, ports ...string) { + ginkgo.By("Getting deployment " + name) + deploy, err := cs.AppsV1().Deployments(framework.KubeOvnNamespace).Get(context.TODO(), name, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to to get deployment") + err = deployment.WaitForDeploymentComplete(cs, deploy) + framework.ExpectNoError(err, "deployment failed to complete") + + ginkgo.By("Getting pods") + pods, err := deployment.GetPodsForDeployment(cs, deploy) + framework.ExpectNoError(err, "failed to get pods") + framework.ExpectNotEmpty(pods.Items) + + checkPods(pods.Items, process, ports...) +} + +func checkPods(pods []corev1.Pod, process string, ports ...string) { + ginkgo.By("Parsing environment variable") + var listenPodIP bool + if len(pods[0].Status.PodIPs) == 1 { + var envValue string + for _, env := range pods[0].Spec.Containers[0].Env { + if env.Name == "ENABLE_BIND_LOCAL_IP" { + envValue = env.Value + break + } + } + if envValue == "" { + envValue = "false" + } + var err error + listenPodIP, err = strconv.ParseBool(envValue) + framework.ExpectNoError(err) + } + + ginkgo.By("Validating " + process + " listen addresses") + cmd := fmt.Sprintf(`ss -Hntpl | grep -wE pid=$(pidof %s | sed "s/ /|pid=/g") | awk '{print $4}'`, process) + if len(ports) != 0 { + cmd += fmt.Sprintf(`| grep -E ':%s$'`, strings.Join(ports, `$|:`)) + } + for _, pod := range pods { + stdout, _, err := framework.KubectlExec(pod.Namespace, pod.Name, cmd) + framework.ExpectNoError(err) + + listenAddresses := strings.Split(string(bytes.TrimSpace(stdout)), "\n") + podIPPrefix := strings.TrimSuffix(net.JoinHostPort(pod.Status.PodIP, "999"), "999") + for _, addr := range listenAddresses { + if listenPodIP { + framework.ExpectTrue(strings.HasPrefix(addr, podIPPrefix)) + } else { + framework.ExpectTrue(strings.HasPrefix(addr, "*:")) + } + } + } +} + +var _ = framework.Describe("[group:security]", func() { + f := framework.NewDefaultFramework("security") + f.SkipNamespaceCreation = true + + var cs clientset.Interface + ginkgo.BeforeEach(func() { + f.SkipVersionPriorTo(1, 9, "Support for listening on Pod IP was introduced in v1.9") + cs = f.ClientSet + }) + + framework.ConformanceIt("ovn db should listen on specified addresses for client connections", func() { + checkDeployment(cs, "ovn-central", "ovsdb-server", "6641", "6642") + }) + + framework.ConformanceIt("kube-ovn-controller should listen on specified addresses", func() { + checkDeployment(cs, "kube-ovn-controller", "kube-ovn-controller") + }) + + framework.ConformanceIt("kube-ovn-monitor should listen on specified addresses", func() { + checkDeployment(cs, "kube-ovn-monitor", "kube-ovn-monitor") + }) + + framework.ConformanceIt("kube-ovn-cni should listen on specified addresses", func() { + ginkgo.By("Getting nodes") + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + framework.ExpectNotEmpty(nodeList.Items) + + ginkgo.By("Getting daemonset kube-ovn-cni") + ds, err := cs.AppsV1().DaemonSets(framework.KubeOvnNamespace).Get(context.TODO(), "kube-ovn-cni", metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to to get daemonset") + + ginkgo.By("Getting kube-ovn-cni pods") + pods := make([]corev1.Pod, 0, len(nodeList.Items)) + for _, node := range nodeList.Items { + pod, err := framework.GetPodOnNodeForDaemonSet(cs, ds, node.Name) + framework.ExpectNoError(err, "failed to get kube-ovn-cni pod running on node %s", node.Name) + pods = append(pods, *pod) + } + + checkPods(pods, "kube-ovn-daemon") + }) +}) diff --git a/yamls/ovn-dpdk.yaml b/yamls/ovn-dpdk.yaml index e5ef2623b3e..dbf23eee055 100644 --- a/yamls/ovn-dpdk.yaml +++ b/yamls/ovn-dpdk.yaml @@ -225,6 +225,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs resources: requests: cpu: 500m diff --git a/yamls/ovn-ha.yaml b/yamls/ovn-ha.yaml index 72cc373e186..854f54557e5 100644 --- a/yamls/ovn-ha.yaml +++ b/yamls/ovn-ha.yaml @@ -244,6 +244,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs resources: requests: cpu: 500m diff --git a/yamls/ovn.yaml b/yamls/ovn.yaml index 6919e8754bd..fff3cb9119d 100644 --- a/yamls/ovn.yaml +++ b/yamls/ovn.yaml @@ -254,6 +254,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_IPS + valueFrom: + fieldRef: + fieldPath: status.podIPs resources: requests: cpu: 500m