diff --git a/.github/workflows/perfromance-testing.yaml b/.github/workflows/perfromance-testing.yaml new file mode 100644 index 000000000..2fdaf6241 --- /dev/null +++ b/.github/workflows/perfromance-testing.yaml @@ -0,0 +1,70 @@ +--- +name: performance-testing +on: + push: + branches: + - 'release/**' +jobs: + ### INTERDOMAIN CLUSTER + interdomain-kind: + runs-on: ubuntu-latest + env: + KUBERNETES_VERSION: ${{ secrets.NSM_KUBERNETES_VERSION }} + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.9.0 + with: + access_token: ${{ github.token }} + - uses: actions/setup-go@v1 + with: + go-version: 1.16 + github-token: ${{ github.token }} + - name: Set go env + run: | + echo GOPATH=$GITHUB_WORKSPACE >> $GITHUB_ENV + echo GO111MODULE=on >> $GITHUB_ENV + echo $GITHUB_WORKSPACE/bin >> $GITHUB_PATH + - uses: actions/checkout@v2 + with: + path: ${{ github.workspace }}/src/github.com/${{ github.repository }} + - name: Get kind + run: go get sigs.k8s.io/kind@v0.11.1 + - name: Create kind clusters + run: | + if [[ $KUBERNETES_VERSION=="" ]]; then + KUBERNETES_VERSION="v1.22.1" + fi + for (( i = 1; i <= 2; i++ )); do + kind create cluster --name "kind-${i}" --config cluster-config-interdomain.yaml --image="kindest/node:$KUBERNETES_VERSION" + configPath=${{ github.workspace }}/src/github.com/${{ github.repository }}/config${i} + kind get kubeconfig --name "kind-${i}" > ${configPath} + echo KUBECONFIG${i}=${configPath} >> $GITHUB_ENV + echo CLUSTER${i}_CIDR="172.18.${i}.128/25" >> $GITHUB_ENV + done + working-directory: ${{ github.workspace }}/src/github.com/${{ github.repository }} + - name: Performance tests + run: | + performance_testing/scripts/full_ci_run.sh "$NSM_VERSION" "$ARTIFACTS_DIR" "$QPS_LIST" "$DURATION" "$CONNECTIONS" "$ITERATIONS" + env: + NSM_VERSION: ${{ github.ref_name }} + ARTIFACTS_DIR: perf-test-results + QPS_LIST: 100 1000 1000000 + DURATION: 60s + CONNECTIONS: 1 + ITERATIONS: 3 + working-directory: ${{ github.workspace }}/src/github.com/${{ github.repository }} + - name: Print results + run: | + performance_testing/scripts/print_all_summaries.sh "$ARTIFACTS_DIR" + env: + ARTIFACTS_DIR: perf-test-results + working-directory: ${{ github.workspace }}/src/github.com/${{ github.repository }} + - name: Cleanup resources + if: ${{ success() || failure() || cancelled() }} + run: kind delete clusters $(kind get clusters) + - name: Upload artifacts + if: ${{ success() || failure() || cancelled() }} + uses: actions/upload-artifact@v2 + with: + name: Performance tests results and logs + path: ${{ github.workspace }}/src/github.com/${{ github.repository }}/perf-test-results diff --git a/performance_testing/README.md b/performance_testing/README.md new file mode 100644 index 000000000..77149079e --- /dev/null +++ b/performance_testing/README.md @@ -0,0 +1,65 @@ + +# Performance testing + +This folder contains deployment yaml files and scripts +that deploy, run and clear applications for performance testing. + +# Parameters + +Parameters to be considered are: + +1. `qps_list`: requested load of the system +2. `duration`: duration of a single test +3. `connections`: the amount of simultaneous connections from test client to test server +4. `iterations`: how many times to run each test + +To inspect results you can install Fortio and run `fortio server`. +In the web ui you will be able to see graphs for different runs and compare them. + +Alternatively you can simply open .json files and inspect them for QPS and different latency percentiles. + +# Running the tests manually locally + +Make sure that you have load ballancer in you cluster. +For Kind and bare metal clusters you can use metallb installation script: +```bash +./performance_testing/scripts/setup_metallb.sh +``` + +Prepare DNS and Spire: +```bash +./performance_testing/scripts/nsm_setup_dns.sh && +./performance_testing/scripts/nsm_setup_spire.sh +``` + +Test interdomain vl3: +```bash +./performance_testing/scripts/run_test_suite.sh \ + vl3 \ + ./performance_testing/results/raw/ \ + 3 \ + "http://nginx.my-vl3-network:80" \ + "./performance_testing/use-cases/vl3/deploy.sh" \ + "./performance_testing/use-cases/vl3/clear.sh" \ + "v1.8.0" \ + "./performance_testing/nsm" +``` + +Test interdomain wireguard: +```bash +./performance_testing/scripts/run_test_suite.sh \ + k2wireguard2k \ + ./performance_testing/results/raw/ \ + 3 \ + "http://172.16.1.2:80" \ + "./performance_testing/use-cases/k2wireguard2k/deploy.sh" \ + "./performance_testing/use-cases/k2wireguard2k/clear.sh" \ + "v1.8.0" \ + "./performance_testing/nsm" +``` + +Clear cluster if needed: +```bash +./performance_testing/scripts/nsm_clear_spire.sh +./performance_testing/scripts/nsm_clear_dns.sh +``` diff --git a/performance_testing/known-results.md b/performance_testing/known-results.md new file mode 100644 index 000000000..8456c3723 --- /dev/null +++ b/performance_testing/known-results.md @@ -0,0 +1,57 @@ + +# Known results + +This file contains info about results we already have. + +There are several different QPS targets. Each target has its own result expectations. + +# NSM v1.8.0, vl3 + +vl3 tests in v1.8.0 seems to be CPU throttled by github, which affects max latency. + +1. Target QPS == 100 + Actual QPS: 100 + Min latency: 0.3-0.35 ms + Max latency: 100-250 ms + Avg latency: 4-5 ms + p50 latency: 1.5-2.5 ms + p99 atency: 90-150 ms +2. Target QPS == 1000 + Actual QPS: 350-450 + Min latency: 0.25-0.3 ms + Max latency: 100-300 ms + Avg latency: 2-2.5 ms + p50 latency: 0.7-1.3 ms + p99 atency: 40-80 ms +3. Target QPS == 1000000 + Actual QPS: 350-450 + Min latency: 0.25-0.3 ms + Max latency: 100-300 + Avg latency: 2-2.5 ms + p50 latency: 0.7-1.3 ms + p99 atency: 40-80 ms + +# NSM v1.8.0, wireguard + +1. Target QPS == 100 + Actual QPS: 100 + Min latency: 0.3 ms + Max latency: 20-50 ms + Avg latency: 1-2 ms + p50 latency: 0.6 ms + p99 atency: 15-35 ms +2. Target QPS == 1000 + Actual QPS: 1000 + Min latency: 0.2 ms + Max latency: 30-50 ms + Avg latency: 0.8 ms + p50 latency: 0.4-0.5 ms + p99 atency: 12-15 ms +3. Target QPS == 1000000 + Actual QPS: 1200-1400 + Min latency: 0.2 ms + Max latency: 40-50 ms + Avg latency: 0.7 ms + p50 latency: 0.4 ms + p99 atency: 12-15 ms + diff --git a/performance_testing/nsm/c1/.gitignore b/performance_testing/nsm/c1/.gitignore new file mode 100644 index 000000000..1cbe99618 --- /dev/null +++ b/performance_testing/nsm/c1/.gitignore @@ -0,0 +1 @@ +/kustomization.yaml diff --git a/performance_testing/nsm/c1/forwarder-patch.yaml b/performance_testing/nsm/c1/forwarder-patch.yaml new file mode 100644 index 000000000..c7566d76c --- /dev/null +++ b/performance_testing/nsm/c1/forwarder-patch.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: forwarder-vpp + labels: + app: forwarder-vpp +spec: + template: + spec: + containers: + - name: forwarder-vpp + resources: + limits: + cpu: null diff --git a/performance_testing/nsm/c2/.gitignore b/performance_testing/nsm/c2/.gitignore new file mode 100644 index 000000000..1cbe99618 --- /dev/null +++ b/performance_testing/nsm/c2/.gitignore @@ -0,0 +1 @@ +/kustomization.yaml diff --git a/performance_testing/nsm/c2/forwarder-patch.yaml b/performance_testing/nsm/c2/forwarder-patch.yaml new file mode 100644 index 000000000..c7566d76c --- /dev/null +++ b/performance_testing/nsm/c2/forwarder-patch.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: forwarder-vpp + labels: + app: forwarder-vpp +spec: + template: + spec: + containers: + - name: forwarder-vpp + resources: + limits: + cpu: null diff --git a/performance_testing/nsm/nsm_clear_nsm.sh b/performance_testing/nsm/nsm_clear_nsm.sh new file mode 100755 index 000000000..2f5991b90 --- /dev/null +++ b/performance_testing/nsm/nsm_clear_nsm.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +WH=$(kubectl "--kubeconfig=$KUBECONFIG1" get pods -l app=admission-webhook-k8s -n nsm-system --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') +kubectl "--kubeconfig=$KUBECONFIG1" delete mutatingwebhookconfiguration "${WH}" +kubectl "--kubeconfig=$KUBECONFIG1" delete ns nsm-system + +WH=$(kubectl "--kubeconfig=$KUBECONFIG2" get pods -l app=admission-webhook-k8s -n nsm-system --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') +kubectl "--kubeconfig=$KUBECONFIG2" delete mutatingwebhookconfiguration "${WH}" +kubectl "--kubeconfig=$KUBECONFIG2" delete ns nsm-system + +true diff --git a/performance_testing/nsm/nsm_setup_nsm.sh b/performance_testing/nsm/nsm_setup_nsm.sh new file mode 100755 index 000000000..39d4d772c --- /dev/null +++ b/performance_testing/nsm/nsm_setup_nsm.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +parent_path=$( cd "$(dirname "$0")" ; pwd -P ) || exit + +if [ -z "$1" ]; then echo 1st arg 'nsm_version' is missing; exit 1; fi + +nsm_version=$1 + +echo nsm_version is "$nsm_version" + +######################### + +cat < "$parent_path/c1/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- https://github.com/networkservicemesh/deployments-k8s/examples/interdomain/nsm/cluster1?ref=$nsm_version + +patchesStrategicMerge: +- forwarder-patch.yaml +EOF + +cat < "$parent_path/c2/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: +- https://github.com/networkservicemesh/deployments-k8s/examples/interdomain/nsm/cluster2?ref=$nsm_version + +patchesStrategicMerge: +- forwarder-patch.yaml +EOF + +kubectl "--kubeconfig=$KUBECONFIG1" apply -k "$parent_path/c1" || (sleep 10 && kubectl "--kubeconfig=$KUBECONFIG1" apply -k "$parent_path/c1") || exit +kubectl "--kubeconfig=$KUBECONFIG2" apply -k "$parent_path/c2" || (sleep 10 && kubectl "--kubeconfig=$KUBECONFIG2" apply -k "$parent_path/c2") || exit + +sleep 5 + +kubectl "--kubeconfig=$KUBECONFIG1" wait --for=condition=ready --timeout=1m pod -n nsm-system --all || exit +kubectl "--kubeconfig=$KUBECONFIG2" wait --for=condition=ready --timeout=1m pod -n nsm-system --all || exit diff --git a/performance_testing/scripts/fortio-config-template.json b/performance_testing/scripts/fortio-config-template.json new file mode 100644 index 000000000..352bce601 --- /dev/null +++ b/performance_testing/scripts/fortio-config-template.json @@ -0,0 +1,9 @@ +{ + "url": "", + "qps": "", + "r": "", + "c": "", + "t": "", + "headers": [], + "save": "on" +} diff --git a/performance_testing/scripts/full_ci_run.sh b/performance_testing/scripts/full_ci_run.sh new file mode 100755 index 000000000..5a2ec6483 --- /dev/null +++ b/performance_testing/scripts/full_ci_run.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +echo running "$0" + +parent_path=$( cd "$(dirname "$0")" ; pwd -P ) || exit + +if [ -z "$1" ]; then echo 1st arg 'nsm_version' is missing; exit 1; fi +if [ -z "$2" ]; then echo 2nd arg 'result_folder' is missing; exit 1; fi + +nsm_version=$1 +result_folder=$2 +qps_list=${3:-1000000} +duration=${4:-60s} +connections=${5:-1} +iterations=${6:-3} + +echo nsm_version: "$nsm_version" +echo result_folder: "$result_folder" +echo qps_list: "$qps_list" +echo duration: "$duration" +echo connections: "$connections" +echo iterations: "$iterations" + +"$parent_path/setup_metallb.sh" || exit + +"$parent_path/nsm_setup_dns.sh" || exit +"$parent_path/nsm_setup_spire.sh" || exit + +"$parent_path/run_test_suite.sh" \ + vl3 \ + "$result_folder" \ + "$iterations" \ + "http://nginx.my-vl3-network:80" \ + "$parent_path/../use-cases/vl3/deploy.sh" \ + "$parent_path/../use-cases/vl3/clear.sh" \ + "$nsm_version" \ + "$parent_path/../nsm" \ + "$qps_list" \ + "$duration" \ + "$connections" \ + || exit + +"$parent_path/run_test_suite.sh" \ + k2wireguard2k \ + "$result_folder" \ + "$iterations" \ + "http://172.16.1.2:80" \ + "$parent_path/../use-cases/k2wireguard2k/deploy.sh" \ + "$parent_path/../use-cases/k2wireguard2k/clear.sh" \ + "$nsm_version" \ + "$parent_path/../nsm" \ + "$qps_list" \ + "$duration" \ + "$connections" \ + || exit + +"$parent_path/nsm_clear_spire.sh" +"$parent_path/nsm_clear_dns.sh" + +true diff --git a/performance_testing/scripts/nsm_clear_dns.sh b/performance_testing/scripts/nsm_clear_dns.sh new file mode 100755 index 000000000..57842b18d --- /dev/null +++ b/performance_testing/scripts/nsm_clear_dns.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +kubectl "--kubeconfig=$KUBECONFIG1" delete service -n kube-system exposed-kube-dns +kubectl "--kubeconfig=$KUBECONFIG2" delete service -n kube-system exposed-kube-dns + +true diff --git a/performance_testing/scripts/nsm_clear_spire.sh b/performance_testing/scripts/nsm_clear_spire.sh new file mode 100755 index 000000000..54d474fd6 --- /dev/null +++ b/performance_testing/scripts/nsm_clear_spire.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +kubectl "--kubeconfig=$KUBECONFIG1" delete crd spiffeids.spiffeid.spiffe.io +kubectl "--kubeconfig=$KUBECONFIG1" delete ns spire + +kubectl "--kubeconfig=$KUBECONFIG2" delete crd spiffeids.spiffeid.spiffe.io +kubectl "--kubeconfig=$KUBECONFIG2" delete ns spire + +true diff --git a/performance_testing/scripts/nsm_setup_dns.sh b/performance_testing/scripts/nsm_setup_dns.sh new file mode 100755 index 000000000..31350bdd2 --- /dev/null +++ b/performance_testing/scripts/nsm_setup_dns.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +kubectl "--kubeconfig=$KUBECONFIG1" expose service kube-dns -n kube-system --port=53 --target-port=53 --protocol=TCP --name=exposed-kube-dns --type=LoadBalancer +kubectl "--kubeconfig=$KUBECONFIG2" expose service kube-dns -n kube-system --port=53 --target-port=53 --protocol=TCP --name=exposed-kube-dns --type=LoadBalancer + +kubectl "--kubeconfig=$KUBECONFIG1" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}' || sleep 10 +kubectl "--kubeconfig=$KUBECONFIG1" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}' || exit +echo + +kubectl "--kubeconfig=$KUBECONFIG2" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}' || sleep 10 +kubectl "--kubeconfig=$KUBECONFIG2" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}' || exit +echo + +ip1=$(kubectl "--kubeconfig=$KUBECONFIG1" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}') || exit +if [[ $ip1 == *"no value"* ]]; then + hostname1=$(kubectl "--kubeconfig=$KUBECONFIG1" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "hostname"}}') || exit + echo hostname1 is "$hostname1" + ip1=$(dig +short "$hostname1" | head -1) || exit +fi +# if IPv6 +if [[ $ip1 =~ ":" ]]; then ip1="[$ip1]"; fi + +echo Selected externalIP: "$ip1" for cluster1 + +if [[ -z "$ip1" ]]; then echo ip1 is empty; exit 1; fi + +ip2=$(kubectl "--kubeconfig=$KUBECONFIG2" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "ip"}}') || exit +if [[ $ip2 == *"no value"* ]]; then + hostname2=$(kubectl "--kubeconfig=$KUBECONFIG2" get services exposed-kube-dns -n kube-system -o go-template='{{index (index (index (index .status "loadBalancer") "ingress") 0) "hostname"}}') || exit + echo hostname2 is "$hostname2" + ip2=$(dig +short "$hostname2" | head -1) || exit +fi +# if IPv6 +if [[ $ip2 =~ ":" ]]; then ip2="[$ip2]"; fi + +echo Selected externalIP: "$ip2" for cluster2 + +if [[ -z "$ip2" ]]; then echo ip2 is empty; exit 1; fi + +cat > configmap.yaml < custom-configmap.yaml < configmap.yaml < custom-configmap.yaml <^$url^g" \ + -e "s//$qps/g" \ + -e "s//$resolution/g" \ + -e "s//$connections/g" \ + -e "s//$duration/g" \ + "$parent_path/fortio-config-template.json" +} + +function captureState() { + result_folder=$1 + k1 get pod -A -o wide > "$result_folder/pods-k1.log" + k1 get svc -A -o wide > "$result_folder/svc-k1.log" + k2 get pod -A -o wide > "$result_folder/pods-k2.log" + k2 get svc -A -o wide > "$result_folder/svc-k2.log" +} + +function runTest() { + iterations=${1:-3} + url=$2 + qps=$3 + connections=$4 + duration=$5 + deploy_script=$6 + clear_script=$7 + nsm_version=$8 + + config=$(makeConfig "$url" "$qps" 0.00005 "$connections" "$duration") || exit + config_name="q$qps-c$connections-d$duration" + + warmup_results=$result_folder/warmup + mkdir -p "$warmup_results" + + deploy_logs=$result_folder/deploy + mkdir -p "$deploy_logs" + + final_results=$result_folder/results + mkdir -p "$final_results" + + echo "config name: $config_name" + + echo "measure for $iterations iterations" + for i in $(seq -w 1 1 "$iterations") + do + echo "round $i" + test_full_name=$test_name-$config_name-$i + echo deploying nsm... + "$nsm_deploy_folder/nsm_setup_nsm.sh" > "$deploy_logs/$test_full_name-deploy-nsm.log" "$nsm_version" 2>&1 || exit + echo deploying apps... + "$deploy_script" > "$deploy_logs/$test_full_name-deploy-apps.log" "$nsm_version" 2>&1 || exit + echo doing warmup run... + curl -s -d "$config" "localhost:8080/fortio/rest/run" > "$warmup_results/$test_full_name-warmup.json" + echo doing main run... + curl -s -d "$config" "localhost:8080/fortio/rest/run" > "$final_results/$test_full_name.json" + result_code=$? + echo saving pod layout + k1 get pod -A -o wide > "$deploy_logs/$test_full_name-k1-pods.log" + k2 get pod -A -o wide > "$deploy_logs/$test_full_name-k2-pods.log" + echo clearing apps... + "$clear_script" > "$deploy_logs/$test_full_name-clear-apps.log" 2>&1 + echo clearing nsm... + "$nsm_deploy_folder/nsm_clear_nsm.sh" > "$deploy_logs/$test_full_name-clear-nsm.log" 2>&1 + (exit "$result_code") || exit + done +} + +runTest "$test_iterations" "$test_url" "$test_qps" "$test_connections" "$test_duration" "$deploy_script" "$clear_script" "$nsm_version" diff --git a/performance_testing/scripts/run_test_suite.sh b/performance_testing/scripts/run_test_suite.sh new file mode 100755 index 000000000..3cd02a895 --- /dev/null +++ b/performance_testing/scripts/run_test_suite.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +function k1() { kubectl --kubeconfig "$KUBECONFIG1" "$@" ; } +function k2() { kubectl --kubeconfig "$KUBECONFIG2" "$@" ; } + +parent_path=$( cd "$(dirname "$0")" ; pwd -P ) || exit + +if [ -z "$1" ]; then echo 1st arg 'name' is missing; exit 1; fi +if [ -z "$2" ]; then echo 2nd arg 'result_folder' is missing; exit 1; fi +if [ -z "$3" ]; then echo 3rd arg 'test_iterations' is missing; exit 1; fi +if [ -z "$4" ]; then echo 4th arg 'test_url' is missing; exit 1; fi +if [ -z "$5" ]; then echo 5th arg 'deploy_script' is missing; exit 1; fi +if [ -z "$6" ]; then echo 6th arg 'clear_script' is missing; exit 1; fi +if [ -z "$7" ]; then echo 7th arg 'nsm_version' is missing; exit 1; fi +if [ -z "$8" ]; then echo 8th arg 'nsm_deploy_folder' is missing; exit 1; fi +if [ -z "$9" ]; then echo 9th arg 'qps_list' is missing; exit 1; fi +if [ -z "${10}" ]; then echo 10th arg 'duration' is missing; exit 1; fi +if [ -z "${11}" ]; then echo 10th arg 'connections' is missing; exit 1; fi + +test_name=test-$(TZ=UTC date +%F-T%H-%M-%S)-$1 +result_folder=$2/$test_name +test_iterations=$3 +test_url=$4 +deploy_script=$5 +clear_script=$6 +nsm_version=$7 +nsm_deploy_folder=$8 +qps_list=$9 +duration=${10} +connections=${11} + +echo "test_name: $test_name" +echo "result_folder: $result_folder" +echo "test_iterations: $test_iterations" +echo "test_url: $test_url" +echo "deploy_script: $deploy_script" +echo "clear_script: $clear_script" +echo "nsm_version: $nsm_version" +echo "nsm_deploy_folder: $nsm_deploy_folder" +echo "qps_list: $qps_list" +echo "duration: $duration" +echo "connections: $connections" + +mkdir -p "$result_folder" || exit + +echo running tests for "$test_url" +# for current_qps in $qps3 +for current_qps in $qps_list +do + echo "testing qps $current_qps" + "$parent_path/run_test_single.sh" \ + "$test_name" \ + "$result_folder" \ + "$test_iterations" \ + "$test_url" \ + "$current_qps" \ + "$connections" \ + "$duration" \ + "$deploy_script" \ + "$clear_script" \ + "$nsm_version" \ + "$nsm_deploy_folder" \ + || exit +done diff --git a/performance_testing/scripts/setup_metallb.sh b/performance_testing/scripts/setup_metallb.sh new file mode 100755 index 000000000..b1134f348 --- /dev/null +++ b/performance_testing/scripts/setup_metallb.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +echo CLUSTER1_CIDR is "'$CLUSTER1_CIDR'" +echo CLUSTER2_CIDR is "'$CLUSTER2_CIDR'" + +if [[ -n $CLUSTER1_CIDR ]]; then + kubectl "--kubeconfig=$KUBECONFIG1" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml + kubectl "--kubeconfig=$KUBECONFIG1" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml + cat > metallb-config.yaml < metallb-config.yaml < "$parent_path/cluster1/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: perf-test-wg + +bases: +- https://github.com/networkservicemesh/deployments-k8s/apps/nse-kernel?ref=$nsm_version + +patchesStrategicMerge: +- patch-nse.yaml +EOF + +# Deploy nginx +k1 create ns perf-test-wg +k1 apply -k "$parent_path/cluster1" || exit + +# we need to wait a bit to make sure that pods are created, so that wait commands don't fail immediately +sleep 1 + +# Deploy fortio +k2 create ns perf-test-wg +k2 apply -n perf-test-wg -f "$parent_path/cluster2/fortio.yaml" || exit + +# we need to wait a bit to make sure that pods are created, so that wait commands don't fail immediately +sleep 5 +k1 -n perf-test-wg wait --for=condition=ready --timeout=1m pod -l app=nse-kernel || exit +k2 -n perf-test-wg wait --for=condition=ready --timeout=5m pod -l app=fortio || exit + +# open access to the test-load service on local machine +k2 -n perf-test-wg port-forward svc/fortio-service 8080:8080 & +# it can take some time for the background job to start listening to local port +sleep 5 diff --git a/performance_testing/use-cases/vl3/apps/fortio.yaml b/performance_testing/use-cases/vl3/apps/fortio.yaml new file mode 100644 index 000000000..cfa0a6483 --- /dev/null +++ b/performance_testing/use-cases/vl3/apps/fortio.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: fortio-service + labels: + app: fortio +spec: + ports: + - port: 8080 + name: http + selector: + app: fortio +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fortio-deployment +spec: + selector: + matchLabels: + app: fortio + replicas: 1 + template: + metadata: + annotations: + networkservicemesh.io: kernel://my-vl3-network@my.cluster1/nsm-1?dnsName=fortio + labels: + app: fortio + spec: + containers: + - name: fortio + image: fortio/fortio:1.40.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + name: http-fortio + - containerPort: 8079 + name: grpc-ping diff --git a/performance_testing/use-cases/vl3/apps/nginx.yaml b/performance_testing/use-cases/vl3/apps/nginx.yaml new file mode 100644 index 000000000..62889c300 --- /dev/null +++ b/performance_testing/use-cases/vl3/apps/nginx.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-service + labels: + app: nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + annotations: + networkservicemesh.io: kernel://my-vl3-network/nsm-1?dnsName=nginx + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.23.3 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 diff --git a/performance_testing/use-cases/vl3/clear.sh b/performance_testing/use-cases/vl3/clear.sh new file mode 100755 index 000000000..2389e77ac --- /dev/null +++ b/performance_testing/use-cases/vl3/clear.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +parent_path=$( cd "$(dirname "$0")" ; pwd -P ) || exit + +function k1() { kubectl --kubeconfig "$KUBECONFIG1" "$@" ; } +function k2() { kubectl --kubeconfig "$KUBECONFIG2" "$@" ; } + +echo running "$0" + +pkill -f "port-forward svc/fortio-service 8080:8080" + +# delete without waiting, to delete in parallel +k1 delete -k "$parent_path/vl3-dns" --wait=false +k1 delete ns perf-test-vl3 --wait=false +k2 delete ns perf-test-vl3 --wait=false + +# wait for everything to be deleted +k1 delete -k "$parent_path/vl3-dns" +k1 delete ns perf-test-vl3 +k2 delete ns perf-test-vl3 + +# previous command may have failed if the setup have failed and not all resources have been deployed +true diff --git a/performance_testing/use-cases/vl3/deploy.sh b/performance_testing/use-cases/vl3/deploy.sh new file mode 100755 index 000000000..333111ef5 --- /dev/null +++ b/performance_testing/use-cases/vl3/deploy.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +parent_path=$( cd "$(dirname "$0")" ; pwd -P ) || exit + +function k1() { kubectl --kubeconfig "$KUBECONFIG1" "$@" ; } +function k2() { kubectl --kubeconfig "$KUBECONFIG2" "$@" ; } + +echo running "$0" + +if [ -z "$1" ]; then echo 1st arg 'nsm_version' is missing; exit 1; fi + +nsm_version=$1 + +echo nsm_version is "$nsm_version" + +######################### + +# Specify vl3 NSE version +cat < "$parent_path/vl3-dns/kustomization.yaml" +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: ns-dns-vl3 + +bases: +- https://github.com/networkservicemesh/deployments-k8s/apps/nse-vl3-vpp?ref=$nsm_version +- https://github.com/networkservicemesh/deployments-k8s/apps/vl3-ipam?ref=$nsm_version + +resources: +- namespace.yaml +- vl3-netsvc.yaml + +patchesStrategicMerge: +- nse-patch.yaml +EOF + +# Start vl3 NSE +k1 apply -k "$parent_path/vl3-dns" || exit + +# we need to wait a bit to make sure that pods are created, so that wait commands don't fail immediately +sleep 1 +k1 -n ns-dns-vl3 wait --for=condition=ready --timeout=5m pod -l app=vl3-ipam || exit +k1 -n ns-dns-vl3 wait --for=condition=ready --timeout=5m pod -l app=nse-vl3-vpp || exit + +# Deploy test apps: +k1 create ns perf-test-vl3 +k1 apply -n perf-test-vl3 -f "$parent_path/apps/nginx.yaml" || exit + +k2 create ns perf-test-vl3 +k2 apply -n perf-test-vl3 -f "$parent_path/apps/fortio.yaml" || exit + +# we need to wait a bit to make sure that pods are created, so that wait commands don't fail immediately +sleep 5 +k1 -n perf-test-vl3 wait --for=condition=ready --timeout=5m pod -l app=nginx || exit +k2 -n perf-test-vl3 wait --for=condition=ready --timeout=5m pod -l app=fortio || exit + +# open access to the test-load service on local machine +k2 -n perf-test-vl3 port-forward svc/fortio-service 8080:8080 & +# it can take some time for the background job to start listening to local port +sleep 5 diff --git a/performance_testing/use-cases/vl3/vl3-dns/.gitignore b/performance_testing/use-cases/vl3/vl3-dns/.gitignore new file mode 100644 index 000000000..1cbe99618 --- /dev/null +++ b/performance_testing/use-cases/vl3/vl3-dns/.gitignore @@ -0,0 +1 @@ +/kustomization.yaml diff --git a/performance_testing/use-cases/vl3/vl3-dns/namespace.yaml b/performance_testing/use-cases/vl3/vl3-dns/namespace.yaml new file mode 100644 index 000000000..a2188fc1a --- /dev/null +++ b/performance_testing/use-cases/vl3/vl3-dns/namespace.yaml @@ -0,0 +1,5 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: ns-dns-vl3 diff --git a/performance_testing/use-cases/vl3/vl3-dns/nse-patch.yaml b/performance_testing/use-cases/vl3/vl3-dns/nse-patch.yaml new file mode 100644 index 000000000..2659075c6 --- /dev/null +++ b/performance_testing/use-cases/vl3/vl3-dns/nse-patch.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nse-vl3-vpp + labels: + app: nse-vl3-vpp +spec: + replicas: 1 + template: + spec: + containers: + - name: nse + env: + - name: NSM_SERVICE_NAMES + value: "my-vl3-network" + - name: NSM_REGISTER_SERVICE + value: "false" + - name: NSM_DNS_TEMPLATES + value: "{{ index .Labels \"dnsName\" }}.{{ .NetworkService }}." + resources: + limits: + cpu: null diff --git a/performance_testing/use-cases/vl3/vl3-dns/vl3-netsvc.yaml b/performance_testing/use-cases/vl3/vl3-dns/vl3-netsvc.yaml new file mode 100644 index 000000000..881890cc8 --- /dev/null +++ b/performance_testing/use-cases/vl3/vl3-dns/vl3-netsvc.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: networkservicemesh.io/v1 +kind: NetworkService +metadata: + name: my-vl3-network + namespace: nsm-system +spec: + payload: IP