diff --git a/.azure-pipelines/get_dut_version.py b/.azure-pipelines/get_dut_version.py old mode 100644 new mode 100755 index ecc1046568..e4773b6705 --- a/.azure-pipelines/get_dut_version.py +++ b/.azure-pipelines/get_dut_version.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import argparse import logging import os @@ -13,7 +15,7 @@ if ansible_path not in sys.path: sys.path.append(ansible_path) -from devutil.devices import init_localhost, init_testbed_sonichosts # noqa E402 +from devutil.devices.factory import init_localhost, init_testbed_sonichosts # noqa E402 logger = logging.getLogger(__name__) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index d4b7a54cb7..da41d94ca6 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -38,7 +38,7 @@ t0: - override_config_table/test_override_config_table.py - pc/test_po_cleanup.py - pc/test_po_update.py -# - platform_tests/test_advanced_reboot.py::test_warm_reboot + - platform_tests/test_advanced_reboot.py::test_warm_reboot - platform_tests/test_cpu_memory_usage.py - process_monitoring/test_critical_process_monitoring.py - radv/test_radv_ipv6_ra.py @@ -63,6 +63,7 @@ t0: - tacacs/test_ro_user.py - tacacs/test_rw_user.py - telemetry/test_telemetry.py + - telemetry/test_events.py - test_features.py - test_interfaces.py - test_procdockerstatsd.py diff --git a/.azure-pipelines/pytest-collect-only.yml b/.azure-pipelines/pytest-collect-only.yml index 93057efdb9..6d1d2bca9e 100644 --- a/.azure-pipelines/pytest-collect-only.yml +++ b/.azure-pipelines/pytest-collect-only.yml @@ -35,7 +35,7 @@ steps: set -x sudo docker exec -t -w /var/src/sonic-mgmt/tests sonic-mgmt-collect \ - pytest --inventory ../ansible/veos_vtb --host-pattern all \ + python3 -m pytest --inventory ../ansible/veos_vtb --host-pattern all \ --testbed_file vtestbed.yaml --testbed vms-kvm-t0 \ --ignore saitests --ignore ptftests --ignore acstests \ --ignore scripts --ignore k8s --ignore sai_qualify --ignore common \ diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml new file mode 100644 index 0000000000..550d211949 --- /dev/null +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -0,0 +1,266 @@ +parameters: + - name: TOPOLOGY + type: string + + - name: POLL_INTERVAL + type: number + default: 10 + + - name: POLL_TIMEOUT + type: number + default: 36000 + + - name: MIN_WORKER + type: string + default: 1 + + - name: MAX_WORKER + type: string + default: 1 + + - name: NUM_ASIC + type: number + default: 1 + + - name: TEST_SET + type: string + default: "" + + - name: DEPLOY_MG_EXTRA_PARAMS + type: string + default: "" + + - name: COMMON_EXTRA_PARAMS + type: string + default: "" + + - name: VM_TYPE + type: string + default: "ceos" + + - name: TESTBED_NAME + type: string + default: "" + + - name: IMAGE_URL + type: string + default: "" + + - name: HWSKU + type: string + default: "" + + - name: TEST_PLAN_TYPE + type: string + default: "" + + - name: PLATFORM + type: string + default: "" + + - name: SCRIPTS + type: string + default: "" + + - name: FEATURES + type: string + default: "" + + - name: SCRIPTS_EXCLUDE + type: string + default: "" + + - name: FEATURES_EXCLUDE + type: string + default: "" + + - name: REPO_NAME + type: string + default: "" + + - name: MGMT_BRANCH + type: string + default: "" + + - name: MGMT_URL + type: string + default: "https://raw.githubusercontent.com/sonic-net/sonic-mgmt" + + - name: STOP_ON_FAILURE + type: string + default: "" + + - name: RETRY_TIMES + type: string + default: "" + + - name: DUMP_KVM_IF_FAIL + type: string + default: "True" + values: + - "True" + - "False" + + - name: REQUESTER + type: string + default: "" + + - name: MAX_RUN_TEST_MINUTES + type: number + default: 480 + + - name: KVM_IMAGE_BRANCH + type: string + default: "" + + - name: EXPECTED_RESULT + type: string + default: "" + + +steps: + - ${{ if not(contains(variables['BUILD.REPOSITORY.NAME'], 'sonic-mgmt')) }}: + - script: | + # If not sonic-mgmt/sonic-mgmt-int repo, need to download test_plan.py and pr_test_scripts.yaml + set -ex + + curl "https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/test_plan.py" -o ./.azure-pipelines/test_plan.py + displayName: "Download test plan script" + - script: | + # If not sonic-mgmt/sonic-mgmt-int repo, need to download pr_test_scripts.yaml + set -ex + + # If public build image repo, download pr test scripts from public sonic-mgmt repo + if [[ "$(BUILD.REPOSITORY.NAME)" = "sonic-net/sonic-buildimage" ]]; then + curl "${{ parameters.MGMT_URL }}/${{ parameters.MGMT_BRANCH }}/.azure-pipelines/pr_test_scripts.yaml" -o ./.azure-pipelines/pr_test_scripts.yaml + + # Else, internal build image repo, download from internal sonic-mgmt repo + else + curl -u $(AZP_REPO_ACCESS_TOKEN) "${{ parameters.MGMT_URL }}&commitOrBranch=${{ parameters.MGMT_BRANCH }}&api-version=5.0-preview.1&path=.azure-pipelines%2Fpr_test_scripts.yaml" -o ./.azure-pipelines/pr_test_scripts.yaml + fi + displayName: "Download pr script" + - ${{ else }}: + - ${{ if ne(parameters.MGMT_BRANCH, 'master') }}: + - script: | + # Else, sonic-mgmt repo, if not master branch, need to download test_plan.py + set -ex + curl "https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/test_plan.py" -o ./.azure-pipelines/test_plan.py + displayName: "Download test plan script" + + - script: | + set -e + + pip install PyYAML + + rm -f new_test_plan_id.txt + + python ./.azure-pipelines/test_plan.py create \ + -t ${{ parameters.TOPOLOGY }} \ + -o new_test_plan_id.txt \ + --min-worker ${{ parameters.MIN_WORKER }} \ + --max-worker ${{ parameters.MAX_WORKER }} \ + --test-set ${{ parameters.TEST_SET }} \ + --kvm-build-id $(KVM_BUILD_ID) \ + --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ + --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ + --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \ + --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ + --image_url ${{ parameters.IMAGE_URL }} \ + --hwsku ${{ parameters.HWSKU }} \ + --test-plan-type ${{ parameters.TEST_PLAN_TYPE }} \ + --platform ${{ parameters.PLATFORM }} \ + --testbed-name "${{ parameters.TESTBED_NAME }}" \ + --scripts "${{ parameters.SCRIPTS }}" \ + --features "${{ parameters.FEATURES }}" \ + --scripts-exclude "${{ parameters.SCRIPTS_EXCLUDE }}" \ + --features-exclude "${{ parameters.FEATURES_EXCLUDE }}" \ + --repo-name ${{ parameters.REPO_NAME }} \ + --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ + --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ + --retry-times ${{ parameters.RETRY_TIMES }} \ + --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ + --requester "${{ parameters.REQUESTER }}" \ + --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) + + TEST_PLAN_ID=`cat new_test_plan_id.txt` + + echo "Created test plan $TEST_PLAN_ID" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + + echo "##vso[task.setvariable variable=TEST_PLAN_ID]$TEST_PLAN_ID" + displayName: "Trigger test" + + - script: | + set -e + echo "Lock testbed" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + + # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" + echo "##[group][test_plan.py] poll LOCK_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state LOCK_TESTBED + echo "##[endgroup]" + displayName: "Lock testbed" + + - script: | + set -e + echo "Prepare testbed" + echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + + # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" + echo "##[group][test_plan.py] poll PREPARE_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state PREPARE_TESTBED + echo "##[endgroup]" + displayName: "Prepare testbed" + + - script: | + set -e + echo "Run test" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + + # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" + echo "##[group][test_plan.py] poll EXECUTING status" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state EXECUTING --expected-result ${{ parameters.EXPECTED_RESULT }} + echo "##[endgroup]" + displayName: "Run test" + timeoutInMinutes: ${{ parameters.MAX_RUN_TEST_MINUTES }} + + - ${{ if eq(parameters.DUMP_KVM_IF_FAIL, 'True') }}: + - script: | + set -e + echo "KVM dump" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + + # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" + echo "##[group][test_plan.py] poll KVMDUMP status" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --expected-state KVMDUMP + echo "##[endgroup]" + condition: succeededOrFailed() + displayName: "KVM dump" + + - script: | + set -e + echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." + python ./.azure-pipelines/test_plan.py cancel -i "$(TEST_PLAN_ID)" + condition: always() + displayName: "Finalize running test plan" diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml index 96f6d5c4f6..f1c2479fe9 100644 --- a/.azure-pipelines/run-test-scheduler-template.yml +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -105,6 +105,10 @@ parameters: type: number default: 480 +- name: KVM_IMAGE_BRANCH + type: string + default: "" + steps: - script: | @@ -116,6 +120,7 @@ steps: python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} \ --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) \ + --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \ --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index 6dfcda5f11..86849dc10c 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -128,9 +128,9 @@ def __init__(self): super(FinishStatus, self).__init__(TestPlanStatus.FINISHED) -def get_scope(testbed_tools_url): +def get_scope(elastictest_url): scope = "api://sonic-testbed-tools-dev/.default" - if testbed_tools_url in [ + if elastictest_url in [ "http://sonic-testbed2-scheduler-backend.azurewebsites.net", "https://sonic-testbed2-scheduler-backend.azurewebsites.net", "http://sonic-elastictest-prod-scheduler-backend-webapp.azurewebsites.net", @@ -226,8 +226,10 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params # If triggered by buildimage repo, use image built from the buildId kvm_image_build_id = kvm_build_id + kvm_image_branch = kwargs.get("kvm_image_branch", "") if BUILDIMAGE_REPO_FLAG in kwargs.get("source_repo"): kvm_image_build_id = build_id + kvm_image_branch = "" payload = json.dumps({ "name": test_plan_name, @@ -253,7 +255,8 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params "image": { "url": image_url, "release": "", - "kvm_image_build_id": kvm_image_build_id + "kvm_image_build_id": kvm_image_build_id, + "kvm_image_branch": kvm_image_branch }, "sonic_mgmt": { "repo_url": sonic_mgmt_repo_url, @@ -335,7 +338,7 @@ def cancel(self, test_plan_id): print("Result of cancelling test plan at {}:".format(tp_url)) print(str(resp["data"])) - def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): + def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expected_result=None): print("Polling progress and status of test plan at {}/scheduler/testplan/{}" .format(self.frontend_url, test_plan_id)) print("Polling interval: {} seconds".format(interval)) @@ -396,9 +399,18 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): .format(test_plan_id, step_status, result, time.time() - start_time, self.frontend_url, test_plan_id)) - else: - print("Current status is {}".format(step_status)) - return + if expected_result: + if result != expected_result: + raise Exception("Test plan id: {}, status: {}, result: {} not match expected result: {}, " + "Elapsed {:.0f} seconds. " + "Check {}/scheduler/testplan/{} for test plan status" + .format(test_plan_id, step_status, result, + expected_result, time.time() - start_time, + self.frontend_url, + test_plan_id)) + + print("Current status is {}".format(step_status)) + return else: print("Current state is {}, waiting for the state {}".format(status, expected_state)) @@ -471,6 +483,16 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): required=False, help="Deploy minigraph extra params" ) + parser_create.add_argument( + "--kvm-image-branch", + type=str, + dest="kvm_image_branch", + nargs='?', + const="", + default="", + required=False, + help="KVM build branch." + ) parser_create.add_argument( "--kvm-build-id", type=str, @@ -723,6 +745,17 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): help="Expected state.", default="" ) + parser_poll.add_argument( + "--expected-result", + type=str, + dest="expected_result", + nargs='?', + const=None, + default=None, + required=False, + choices=['PENDING', 'EXECUTING', 'SUCCESS', 'FAILED', 'CANCELLED'], + help="If specify expected result, check test plan result after expected state matched." + ) parser_poll.add_argument( "--interval", type=int, @@ -753,17 +786,17 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): print("Test plan utils parameters: {}".format(args)) auth_env = ["TENANT_ID", "CLIENT_ID", "CLIENT_SECRET"] - required_env = ["TESTBED_TOOLS_URL"] + required_env = ["ELASTICTEST_SCHEDULER_BACKEND_URL"] if args.action in ["create", "cancel"]: required_env.extend(auth_env) env = { - "testbed_tools_url": os.environ.get("TESTBED_TOOLS_URL"), - "tenant_id": os.environ.get("TENANT_ID"), - "client_id": os.environ.get("CLIENT_ID"), - "client_secret": os.environ.get("CLIENT_SECRET"), - "frontend_url": os.environ.get("FRONTEND_URL", "https://www.testbed-tools.org"), + "elastictest_scheduler_backend_url": os.environ.get("ELASTICTEST_SCHEDULER_BACKEND_URL"), + "tenant_id": os.environ.get("ELASTICTEST_MSAL_TENANT_ID"), + "client_id": os.environ.get("ELASTICTEST_MSAL_CLIENT_ID"), + "client_secret": os.environ.get("ELASTICTEST_MSAL_CLIENT_SECRET"), + "frontend_url": os.environ.get("ELASTICTEST_FRONTEND_URL", "https://elastictest.org"), } env_missing = [k.upper() for k, v in env.items() if k.upper() in required_env and not v] if env_missing: @@ -772,7 +805,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): try: tp = TestPlanManager( - env["testbed_tools_url"], + env["elastictest_scheduler_backend_url"], env["frontend_url"], env["tenant_id"], env["client_id"], @@ -808,6 +841,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): test_plan_name=test_plan_name, deploy_mg_extra_params=args.deploy_mg_extra_params, kvm_build_id=args.kvm_build_id, + kvm_image_branch=args.kvm_image_branch, min_worker=args.min_worker, max_worker=args.max_worker, pr_id=pr_id, @@ -837,7 +871,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state=""): max_execute_seconds=args.max_execute_seconds, ) elif args.action == "poll": - tp.poll(args.test_plan_id, args.interval, args.timeout, args.expected_state) + tp.poll(args.test_plan_id, args.interval, args.timeout, args.expected_state, args.expected_result) elif args.action == "cancel": tp.cancel(args.test_plan_id) sys.exit(0) diff --git a/.azure-pipelines/upgrade_image.py b/.azure-pipelines/upgrade_image.py old mode 100644 new mode 100755 index cbc5ac2fb9..fa203fe4d0 --- a/.azure-pipelines/upgrade_image.py +++ b/.azure-pipelines/upgrade_image.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + """Script for upgrading SONiC image for nightly tests. Main purpose of this script is to upgrade SONiC image for nightly tests. Based on the arguments passed in, the script @@ -23,8 +25,8 @@ sys.path.append(ansible_path) -from devutil.devices import init_localhost, init_testbed_sonichosts # noqa E402 -from devutil.sonic_helpers import upgrade_image # noqa E402 +from devutil.devices.factory import init_localhost, init_testbed_sonichosts # noqa E402 +from devutil.devices.sonic import upgrade_image # noqa E402 from tests.common.plugins.pdu_controller.pdu_manager import pdu_manager_factory # noqa E402 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b6ef1d058b..e837adb443 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,10 @@ repos: hooks: - id: flake8 args: ["--max-line-length=120"] + exclude: ^spytest/ + - id: flake8 + files: ^spytest/.* + args: ["--max-line-length=120", "--ignore=E1,E2,E3,E5,E7,W5"] - repo: https://github.com/sonic-net/sonic-mgmt rev: 1.0.0+pre_commit diff --git a/ansible/TestbedProcessing.py b/ansible/TestbedProcessing.py old mode 100644 new mode 100755 index 6d6e479468..b999e07049 --- a/ansible/TestbedProcessing.py +++ b/ansible/TestbedProcessing.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + from shutil import copyfile import yaml import datetime @@ -50,7 +52,7 @@ main_file = "group_vars/vm_host/main.yml" vmHostCreds_file = "group_vars/vm_host/creds.yml" labLinks_file = "files/sonic_lab_links.csv" -testbed_file = "testbed.csv" +testbed_file = "testbed.yaml" devices_file = "files/sonic_lab_devices.csv" eosCred_file = "group_vars/eos/creds.yml" fanoutSecrets_file = "group_vars/fanout/secrets.yml" @@ -237,7 +239,7 @@ def makeSonicLabDevices(data, outfile): makeTestbed(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) @:parameter outfile - the file to write to -generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, +generates /testbed.yaml by pulling confName, groupName, topo, ptf_image_name, ptf_ip, ptf_ipv6, server, vm_base, dut, and comment error handling: checks if attribute values are None type or string "None" """ @@ -296,7 +298,7 @@ def makeTestbed(data, outfile): "," + vm_base + "," + dut + "," + comment f.write(row + "\n") except IOError: - print("I/O error: issue creating testbed.csv") + print("I/O error: issue creating testbed.yaml") """ @@ -1022,7 +1024,7 @@ def main(): # Generate sonic_lab_devices.csv (DEVICES) makeSonicLabDevices(devices, args.basedir + devices_file) print("\tCREATING TEST BED: " + args.basedir + testbed_file) - # Generate testbed.csv (TESTBED) + # Generate testbed.yaml (TESTBED) makeTestbed(testbed, args.basedir + testbed_file) print("\tCREATING VM_HOST/CREDS: " + args.basedir + vmHostCreds_file) # Generate vm_host\creds.yml (CREDS) diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index a2328c3de4..0a8665d56d 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -14,7 +14,7 @@ inventory = /etc/ansible/hosts library = library:library/ixia module_utils = module_utils -remote_tmp = $HOME/.ansible/tmp +remote_tmp = /tmp/.ansible-$USER pattern = * forks = 5 poll_interval = 15 diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index b180695f0c..3ce37cd3f6 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -38,7 +38,7 @@ - block: - name: set default testbed file set_fact: - testbed_file: testbed.csv + testbed_file: testbed.yaml when: testbed_file is not defined - name: Gathering testbed information diff --git a/ansible/devutil/conn_graph_helper.py b/ansible/devutil/conn_graph_helper.py index f6f74392be..f18d7aac3e 100644 --- a/ansible/devutil/conn_graph_helper.py +++ b/ansible/devutil/conn_graph_helper.py @@ -25,8 +25,7 @@ def get_conn_graph_facts(hostnames): utils.debug_fname = CONN_GRAPH_LOG lab_graph = utils.find_graph(hostnames=hostnames, part=True) - succeed, results = utils.build_results( - lab_graph=lab_graph, hostnames=hostnames, ignore_error=True) + succeed, results = lab_graph.build_results(hostnames=hostnames, ignore_error=True) if not succeed: print("Parse conn graph failes msg = {}".format(results)) return {'device_pdu_info': {}, 'device_pdu_links': {}} diff --git a/tests/snappi/__init__.py b/ansible/devutil/devices/__init__.py similarity index 100% rename from tests/snappi/__init__.py rename to ansible/devutil/devices/__init__.py diff --git a/ansible/devutil/ansible_hosts.py b/ansible/devutil/devices/ansible_hosts.py similarity index 98% rename from ansible/devutil/ansible_hosts.py rename to ansible/devutil/devices/ansible_hosts.py index 40913b2314..3a605686c8 100644 --- a/ansible/devutil/ansible_hosts.py +++ b/ansible/devutil/devices/ansible_hosts.py @@ -199,16 +199,18 @@ def __init__( hostvars (dict, optional): Additional ansible variables for ansible hosts. Similar as using `-e` argument of ansible-playbook command line to specify additional host variables. Defaults to {}. """ - # Check existence of inventories - if isinstance(inventories, list): - for inventory in inventories: - if not os.path.exists(inventory): - raise FileNotFoundError("Inventory file {} not found.".format(inventory)) - else: - if not os.path.exists(inventories): - raise FileNotFoundError("Inventory file {} not found.".format(inventories)) - self.inventories = inventories + + # Check existence of inventories only when host_pattern is not "localhost" + if host_pattern != "localhost": + if isinstance(self.inventories, list): + for inventory in self.inventories: + if not os.path.exists(inventory): + raise FileNotFoundError("Inventory file {} not found.".format(inventory)) + else: + if not os.path.exists(self.inventories): + raise FileNotFoundError("Inventory file {} not found.".format(self.inventories)) + self.host_pattern = host_pattern if loader: self.loader = loader @@ -216,10 +218,10 @@ def __init__( self.loader = DataLoader() if inventory_manager: - if isinstance(inventories, list): - sources = inventories + if isinstance(self.inventories, list): + sources = self.inventories else: - sources = [inventories] + sources = [self.inventories] if set(sources) != set(inventory_manager._sources): inventory_manager._sources = sources inventory_manager.parse_sources() diff --git a/ansible/devutil/devices.py b/ansible/devutil/devices/factory.py similarity index 69% rename from ansible/devutil/devices.py rename to ansible/devutil/devices/factory.py index eded3190ea..bb6f070947 100644 --- a/ansible/devutil/devices.py +++ b/ansible/devutil/devices/factory.py @@ -3,35 +3,19 @@ import os import yaml -from ansible_hosts import AnsibleHosts, AnsibleHost -from devutil.ansible_hosts import NoAnsibleHostError, MultipleAnsibleHostsError +from .ansible_hosts import AnsibleHost +from .ansible_hosts import AnsibleHosts +from .ansible_hosts import NoAnsibleHostError +from .ansible_hosts import MultipleAnsibleHostsError +from .sonic import SonicHosts logger = logging.getLogger(__name__) _self_dir = os.path.dirname(os.path.abspath(__file__)) -ansible_path = os.path.realpath(os.path.join(_self_dir, "../")) +ansible_path = os.path.realpath(os.path.join(_self_dir, "../../")) -class SonicHosts(AnsibleHosts): - SUPPORTED_UPGRADE_TYPES = ["onie", "sonic"] - - def __init__(self, inventories, host_pattern, options={}, hostvars={}): - super(SonicHosts, self).__init__(inventories, host_pattern, options=options.copy(), hostvars=hostvars.copy()) - - @property - def sonic_version(self): - try: - output = self.command("cat /etc/sonic/sonic_version.yml") - versions = {} - for hostname in self.hostnames: - versions[hostname] = yaml.safe_load(output[hostname]["stdout"]) - return versions - except Exception as e: - logger.error("Failed to run `cat /etc/sonic/sonic_version.yml`: {}".format(repr(e))) - return {} - - -def init_localhost(inventories, options={}, hostvars={}): +def init_localhost(inventories=None, options={}, hostvars={}): try: return AnsibleHost(inventories, "localhost", options=options.copy(), hostvars=hostvars.copy()) except (NoAnsibleHostError, MultipleAnsibleHostsError) as e: @@ -41,6 +25,21 @@ def init_localhost(inventories, options={}, hostvars={}): return None +def init_host(inventories, host_pattern, options={}, hostvars={}): + try: + return AnsibleHost(inventories, host_pattern, options=options.copy(), hostvars=hostvars.copy()) + except NoAnsibleHostError as e: + logger.error( + "No host '{}' in inventories '{}', exception: {}".format(host_pattern, inventories, repr(e)) + ) + return None + except MultipleAnsibleHostsError as e: + logger.error( + "Multiple hosts '{}' in inventories '{}', exception: {}".format(host_pattern, inventories, repr(e)) + ) + return None + + def init_hosts(inventories, host_pattern, options={}, hostvars={}): try: return AnsibleHosts(inventories, host_pattern, options=options.copy(), hostvars=hostvars.copy()) diff --git a/ansible/devutil/sonic_helpers.py b/ansible/devutil/devices/sonic.py similarity index 89% rename from ansible/devutil/sonic_helpers.py rename to ansible/devutil/devices/sonic.py index 1992146c96..4665daee8d 100644 --- a/ansible/devutil/sonic_helpers.py +++ b/ansible/devutil/devices/sonic.py @@ -1,12 +1,29 @@ import logging -import os +import yaml -from devutil.ansible_hosts import RunAnsibleModuleFailed +from .ansible_hosts import AnsibleHosts +from .ansible_hosts import RunAnsibleModuleFailed logger = logging.getLogger(__name__) -_self_dir = os.path.dirname(os.path.abspath(__file__)) -ansible_path = os.path.realpath(os.path.join(_self_dir, "../")) + +class SonicHosts(AnsibleHosts): + SUPPORTED_UPGRADE_TYPES = ["onie", "sonic"] + + def __init__(self, inventories, host_pattern, options={}, hostvars={}): + super(SonicHosts, self).__init__(inventories, host_pattern, options=options.copy(), hostvars=hostvars.copy()) + + @property + def sonic_version(self): + try: + output = self.command("cat /etc/sonic/sonic_version.yml") + versions = {} + for hostname in self.hostnames: + versions[hostname] = yaml.safe_load(output[hostname]["stdout"]) + return versions + except Exception as e: + logger.error("Failed to run `cat /etc/sonic/sonic_version.yml`: {}".format(repr(e))) + return {} def upgrade_by_sonic(sonichosts, image_url, disk_used_percent): diff --git a/ansible/devutils b/ansible/devutils index b8c519cd7f..01e08c96da 100755 --- a/ansible/devutils +++ b/ansible/devutils @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Supress warning import sys diff --git a/ansible/dualtor/nic_simulator/nic_simulator_grpc_mgmt_service.proto b/ansible/dualtor/nic_simulator/nic_simulator_grpc_mgmt_service.proto index 16f6d1bdcd..29cb235811 100644 --- a/ansible/dualtor/nic_simulator/nic_simulator_grpc_mgmt_service.proto +++ b/ansible/dualtor/nic_simulator/nic_simulator_grpc_mgmt_service.proto @@ -1,5 +1,5 @@ -import "nic_simulator_grpc_service.proto" -syntax = "proto3" +syntax = "proto3"; +import "nic_simulator_grpc_service.proto"; service DualTorMgmtService { @@ -15,42 +15,42 @@ service DualTorMgmtService { } message ListOfAdminRequest { - repeated string nic_addresses = 1 - repeated AdminRequest admin_requests = 2 + repeated string nic_addresses = 1; + repeated AdminRequest admin_requests = 2; } message ListOfAdminReply { - repeated string nic_addresses = 1 - repeated AdminReply admin_replies = 2 + repeated string nic_addresses = 1; + repeated AdminReply admin_replies = 2; } message ListOfOperationRequest { - repeated string nic_addresses = 1 - repeated OperationRequest operation_requests = 2 + repeated string nic_addresses = 1; + repeated OperationRequest operation_requests = 2; } message ListOfOperationReply { - repeated string nic_addresses = 1 - repeated OperationReply operation_replies = 2 + repeated string nic_addresses = 1; + repeated OperationReply operation_replies = 2; } message ListOfDropRequest { - repeated string nic_addresses = 1 - repeated DropRequest drop_requests = 2 + repeated string nic_addresses = 1; + repeated DropRequest drop_requests = 2; } message ListOfDropReply { - repeated string nic_addresses = 1 - repeated DropReply drop_replies = 2 + repeated string nic_addresses = 1; + repeated DropReply drop_replies = 2; } message ListOfNiCServerAdminStateRequest { - repeated string nic_addresses = 1 - repeated bool admin_states = 2 + repeated string nic_addresses = 1; + repeated bool admin_states = 2; } message ListOfNiCServerAdminStateReply { - repeated string nic_addresses = 1 - repeated bool admin_states = 2 - repeated bool successes = 3 + repeated string nic_addresses = 1; + repeated bool admin_states = 2; + repeated bool successes = 3; } diff --git a/ansible/dualtor/nic_simulator/nic_simulator_grpc_service.proto b/ansible/dualtor/nic_simulator/nic_simulator_grpc_service.proto index 9908126fa9..83dc5ac0cf 100644 --- a/ansible/dualtor/nic_simulator/nic_simulator_grpc_service.proto +++ b/ansible/dualtor/nic_simulator/nic_simulator_grpc_service.proto @@ -1,4 +1,4 @@ -syntax = "proto3" +syntax = "proto3"; service DualToRActive { rpc QueryAdminForwardingPortState(AdminRequest) returns(AdminReply) {} @@ -15,48 +15,48 @@ service DualToRActive { } message AdminRequest { - repeated int32 portid = 1 - repeated bool state = 2 + repeated int32 portid = 1; + repeated bool state = 2; } message AdminReply { - repeated int32 portid = 1 - repeated bool state = 2 + repeated int32 portid = 1; + repeated bool state = 2; } message OperationRequest { - repeated int32 portid = 1 + repeated int32 portid = 1; } message OperationReply { - repeated int32 portid = 1 - repeated bool state = 2 + repeated int32 portid = 1; + repeated bool state = 2; } message LinkStateRequest { - repeated int32 portid = 1 + repeated int32 portid = 1; } message LinkStateReply { - repeated int32 portid = 1 - repeated bool state = 2 + repeated int32 portid = 1; + repeated bool state = 2; } message ServerVersionRequest { - string version = 1 + string version = 1; } message ServerVersionReply { - string version = 1 + string version = 1; } message DropRequest { - repeated int32 portid = 1 - repeated int32 direction = 2 - bool recover = 3 + repeated int32 portid = 1; + repeated int32 direction = 2; + bool recover = 3; } message DropReply { - repeated int32 portid = 1 - repeated bool success = 2 + repeated int32 portid = 1; + repeated bool success = 2; } diff --git a/ansible/files/check_testbed_and_inventory_file.py b/ansible/files/check_testbed_and_inventory_file.py old mode 100644 new mode 100755 index 99eebdbbe0..369f03e56c --- a/ansible/files/check_testbed_and_inventory_file.py +++ b/ansible/files/check_testbed_and_inventory_file.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + """Tool for checking testbed file and related inventory file. When we may make some mistakes while adding or updating testbed configurations in testbed file and inventory files. diff --git a/ansible/files/creategraph.py b/ansible/files/creategraph.py deleted file mode 100755 index b1f81127ea..0000000000 --- a/ansible/files/creategraph.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/env python - -import csv -import sys -import os -import argparse -from lxml import etree - -try: - from ansible.module_utils.port_utils import get_port_alias_to_name_map -except ImportError: - # Add parent dir for using outside Ansible - sys.path.append('..') - from module_utils.port_utils import get_port_alias_to_name_map - -DEFAULT_DEVICECSV = 'sonic_lab_devices.csv' -DEFAULT_LINKCSV = 'sonic_lab_links.csv' -DEFAULT_CONSOLECSV = 'sonic_lab_console_links.csv' -DEFAULT_BMCCSV = 'sonic_lab_bmc_links.csv' -DEFAULT_PDUCSV = 'sonic_lab_pdu_links.csv' - -LAB_CONNECTION_GRAPH_ROOT_NAME = 'LabConnectionGraph' -LAB_CONNECTION_GRAPH_DPGL2_NAME = 'DevicesL2Info' - - -class LabGraph(object): - - """ - This is used to create "graph" file of lab for all connections and vlan info from csv file - We(both engineer and lab technician) maintian and modify the csv file to keep track of the lab - infrastucture for Sonic development and testing environment. - """ - - def __init__(self, dev_csvfile=None, link_csvfile=None, cons_csvfile=None, - bmc_csvfile=None, pdu_csvfile=None, graph_xmlfile=None): - self.devices = {} - self.links = [] - self.consoles = [] - self.bmcs = [] - self.pdus = [] - self.devcsv = dev_csvfile - self.linkcsv = link_csvfile - self.conscsv = cons_csvfile - self.bmccsv = bmc_csvfile - self.pducsv = pdu_csvfile - self.png_xmlfile = 'str_sonic_png.xml' - self.dpg_xmlfile = 'str_sonic_dpg.xml' - self.one_xmlfile = graph_xmlfile - self._cache_port_name_to_alias = {} - self._cache_port_alias_to_name = {} - self.pngroot = etree.Element('PhysicalNetworkGraphDeclaration') - self.dpgroot = etree.Element('DataPlaneGraph') - self.csgroot = etree.Element('ConsoleGraphDeclaration') - self.bmcgroot = etree.Element('BmcGraphDeclaration') - self.pcgroot = etree.Element('PowerControlGraphDeclaration') - - def _get_port_alias_to_name_map(self, hwsku): - """ - Retrive port alias to name map for specific hwsku. - """ - if hwsku in self._cache_port_alias_to_name: - return self._cache_port_alias_to_name[hwsku] - port_alias_to_name_map, _, _ = get_port_alias_to_name_map(hwsku) - self._cache_port_alias_to_name[hwsku] = port_alias_to_name_map - return port_alias_to_name_map - - def _get_port_name_to_alias_map(self, hwsku): - """ - Retrive port name to alias map for specific hwsku. - """ - if hwsku in self._cache_port_name_to_alias: - return self._cache_port_name_to_alias[hwsku] - port_alias_to_name_map = self._get_port_alias_to_name_map(hwsku) - port_name_to_alias_map = dict([(name, alias) for alias, name in port_alias_to_name_map.items()]) - self._cache_port_name_to_alias[hwsku] = port_name_to_alias_map - return port_name_to_alias_map - - def _get_port_name_set(self, device_hostname): - """ - Retrive port name set of a specific hwsku. - """ - hwsku = self.devices[device_hostname]['HwSku'] - return set(self._get_port_name_to_alias_map(hwsku).keys()) - - def _get_port_alias_set(self, device_hostname): - """ - Retrive port alias set of a specific hwsku. - """ - hwsku = self.devices[device_hostname]['HwSku'] - return set(self._get_port_alias_to_name_map(hwsku).keys()) - - def _convert_port_alias_to_name(self, device_hostname, port_alias): - """ - Given the device hostname and port alias, return the corresponding port name. - """ - os = self.devices[device_hostname].get('Os', '').lower() - if os != 'sonic': - raise Exception("Cannot convert port alias to name for non-SONiC device {}".format(device_hostname)) - hwsku = self.devices[device_hostname]['HwSku'] - port_alias_to_name_map = self._get_port_alias_to_name_map(hwsku) - return port_alias_to_name_map[port_alias] - - def read_devices(self): - with open(self.devcsv) as csv_dev: - csv_devices = csv.DictReader(filter(lambda row: row[0] != '#' and len(row.strip()) != 0, csv_dev)) - devices_root = etree.SubElement(self.pngroot, 'Devices') - pdus_root = etree.SubElement(self.pcgroot, 'DevicesPowerControlInfo') - cons_root = etree.SubElement(self.csgroot, 'DevicesConsoleInfo') - bmc_root = etree.SubElement(self.bmcgroot, 'DevicesBmcInfo') - for row in csv_devices: - attrs = {} - self.devices[row['Hostname']] = row - devtype = row['Type'].lower() - if 'pdu' in devtype: - for key in row: - attrs[key] = row[key].decode('utf-8') - etree.SubElement(pdus_root, 'DevicePowerControlInfo', attrs) - elif 'consoleserver' in devtype: - for key in row: - attrs[key] = row[key].decode('utf-8') - etree.SubElement(cons_root, 'DeviceConsoleInfo', attrs) - elif 'mgmttstorrouter' in devtype: - for key in row: - attrs[key] = row[key].decode('utf-8') - etree.SubElement(cons_root, 'DeviceConsoleInfo', attrs) - etree.SubElement(bmc_root, 'DeviceBmcInfo', attrs) - else: - for key in row: - if key.lower() != 'managementip' and key.lower() != 'protocol': - attrs[key] = row[key].decode('utf-8') - etree.SubElement(devices_root, 'Device', attrs) - - def read_links(self): - # Read and parse link.csv file - with open(self.linkcsv) as csv_file: - csv_links = csv.DictReader(filter(lambda row: row[0] != '#' and len(row.strip()) != 0, csv_file)) - links_group_by_devices = {} - for link in csv_links: - self.links.append(link) - if link['StartDevice'] not in links_group_by_devices: - links_group_by_devices[link['StartDevice']] = [] - links_group_by_devices[link['StartDevice']].append(link) - if link['EndDevice'] not in links_group_by_devices: - links_group_by_devices[link['EndDevice']] = [] - links_group_by_devices[link['EndDevice']].append(link) - - # For SONiC devices (DUT/Fanout), convert port alias to port name. Updates in `links_group_by_devices` will - # also be reflected in `self.links`, because they are holding reference to the same underlying `link` variable. - for device, links in links_group_by_devices.items(): - os = self.devices[device].get('Os', '').lower() - if os != 'sonic': - continue - ports = [] - for link in links: - if device == link['StartDevice']: - ports.append(link['StartPort']) - elif device == link['EndDevice']: - ports.append(link['EndPort']) - if any([port not in self._get_port_alias_set(device).union(self._get_port_name_set(device)) - for port in ports]): - # If any port of a device is neither port name nor port alias, skip conversion for this device. - continue - if all([port in self._get_port_alias_set(device) for port in ports]): - # If all ports of a device are port alias, convert them to port name. - for link in links: - if device == link['StartDevice']: - link['StartPort'] = self._convert_port_alias_to_name(device, link['StartPort']) - elif device == link['EndDevice']: - link['EndPort'] = self._convert_port_alias_to_name(device, link['EndPort']) - elif not all([port in self._get_port_name_set(device) for port in ports]): - # If some ports use port name and others use port alias, raise an Exception. - raise Exception("[Failed] For device {}, please check {} and ensure all ports use port name, " - "or ensure all ports use port alias.".format(device, self.linkcsv)) - - # Generate DeviceInterfaceLink XML nodes for connection graph - links_root = etree.SubElement(self.pngroot, 'DeviceInterfaceLinks') - for link in self.links: - attrs = {} - for key in link: - if key.lower() != 'vlanid' and key.lower() != 'vlanmode': - attrs[key] = link[key].decode('utf-8') - etree.SubElement(links_root, 'DeviceInterfaceLink', attrs) - - def read_consolelinks(self): - if not os.path.exists(self.conscsv): - return - with open(self.conscsv) as csv_file: - csv_cons = csv.DictReader(csv_file) - conslinks_root = etree.SubElement(self.csgroot, 'ConsoleLinksInfo') - for cons in csv_cons: - attrs = {} - for key in cons: - attrs[key] = cons[key].decode('utf-8') - etree.SubElement(conslinks_root, 'ConsoleLinkInfo', attrs) - self.consoles.append(cons) - - def read_bmclinks(self): - if not os.path.exists(self.bmccsv): - return - with open(self.bmccsv) as csv_file: - csv_bmc = csv.DictReader(csv_file) - bmclinks_root = etree.SubElement(self.bmcgroot, 'BmcLinksInfo') - for bmc in csv_bmc: - attrs = {} - for key in bmc: - attrs[key] = bmc[key].decode('utf-8') - etree.SubElement(bmclinks_root, 'BmcLinkInfo', attrs) - self.bmcs.append(bmc) - - def read_pdulinks(self): - if not os.path.exists(self.pducsv): - return - with open(self.pducsv) as csv_file: - csv_pdus = csv.DictReader(csv_file) - pduslinks_root = etree.SubElement(self.pcgroot, 'PowerControlLinksInfo') - for pdu_link in csv_pdus: - attrs = {} - for key in pdu_link: - attrs[key] = pdu_link[key].decode('utf-8') - etree.SubElement(pduslinks_root, 'PowerControlLinkInfo', attrs) - self.pdus.append(pdu_link) - - def generate_dpg(self): - for hostname in self.devices: - managementip = self.devices[hostname].get('ManagementIp', '') - devtype = self.devices[hostname]['Type'].lower() - if not hostname: - continue - if devtype in ('server', 'devsonic'): - # Build Management interface IP for server and DUT - l3inforoot = etree.SubElement(self.dpgroot, 'DevicesL3Info', {'Hostname': hostname}) - etree.SubElement(l3inforoot, 'ManagementIPInterface', {'Name': 'ManagementIp', 'Prefix': managementip}) - elif 'fanout' in devtype or 'ixiachassis' in devtype: - # Build Management interface IP here, - # if we create each device indivial minigraph file, we may comment this out - l3inforoot = etree.SubElement(self.dpgroot, 'DevicesL3Info', {'Hostname': hostname}) - etree.SubElement(l3inforoot, 'ManagementIPInterface', {'Name': 'ManagementIp', 'Prefix': managementip}) - # Build L2 information Here - l2inforoot = etree.SubElement(self.dpgroot, LAB_CONNECTION_GRAPH_DPGL2_NAME, {'Hostname': hostname}) - vlanattr = {} - for link in self.links: - if link['StartDevice'] == hostname: - vlanattr['portname'] = link['StartPort'] - if link['EndDevice'] == hostname: - vlanattr['portname'] = link['EndPort'] - if link['StartDevice'] == hostname or link['EndDevice'] == hostname: - vlanattr['vlanids'] = link['VlanID'] - vlanattr['mode'] = link['VlanMode'] - etree.SubElement(l2inforoot, 'InterfaceVlan', vlanattr) - - def create_xml(self): - ''' - - if two seperate file of png and dpg needed, uncomment these part - - pngxml = open(self.png_xmlfile, 'w') - png = etree.tostring(self.pngroot, pretty_print=True) - pngxml.write(png) - - pngxml = open(self.dpg_xmlfile, 'w') - dpg = etree.tostring(self.dpgroot, pretty_print=True) - pngxml.write(dpg) - ''' - - onexml = open(self.one_xmlfile, 'w') - root = etree.Element(LAB_CONNECTION_GRAPH_ROOT_NAME) - root.append(self.pngroot) - root.append(self.dpgroot) - root.append(self.csgroot) - root.append(self.bmcgroot) - root.append(self.pcgroot) - result = etree.tostring(root, pretty_print=True) - onexml.write(result) - - -def get_file_names(args): - if not args.inventory: - device, links, console, bmc, pdu = args.device, args.links, args.console, args.bmc, args.pdu - else: - device = 'sonic_{}_devices.csv'.format(args.inventory) - links = 'sonic_{}_links.csv'.format(args.inventory) - console = 'sonic_{}_console_links.csv'.format(args.inventory) - bmc = 'sonic_{}_bmc_links.csv'.format(args.inventory) - pdu = 'sonic_{}_pdu_links.csv'.format(args.inventory) - - return device, links, console, bmc, pdu - - -def main(): - - parser = argparse.ArgumentParser() - parser.add_argument("-d", "--device", help="device file [deprecate warning: use -i instead]", - default=DEFAULT_DEVICECSV) - parser.add_argument("-l", "--links", help="link file [deprecate warning: use -i instead]", - default=DEFAULT_LINKCSV) - parser.add_argument("-c", "--console", help="console connection file [deprecate warning: use -i instead]", - default=DEFAULT_CONSOLECSV) - parser.add_argument("-b", "--bmc", help="bmc connection file [deprecate warning: use -i instead]", - default=DEFAULT_BMCCSV) - parser.add_argument("-p", "--pdu", help="pdu connection file [deprecate warning: use -i instead]", - default=DEFAULT_PDUCSV) - parser.add_argument("-i", "--inventory", - help="specify inventory namei to generate device/link/console/pdu file names, default none", - default=None) - parser.add_argument("-o", "--output", help="output xml file", required=True) - args = parser.parse_args() - - device, links, console, bmc, pdu = get_file_names(args) - mygraph = LabGraph(device, links, console, bmc, pdu, args.output) - - mygraph.read_devices() - mygraph.read_links() - mygraph.read_consolelinks() - mygraph.read_bmclinks() - mygraph.read_pdulinks() - mygraph.generate_dpg() - mygraph.create_xml() - - -if __name__ == '__main__': - main() diff --git a/ansible/files/empty_graph.xml b/ansible/files/empty_graph.xml deleted file mode 100644 index 77f07d05d3..0000000000 --- a/ansible/files/empty_graph.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/ansible/files/example_ixia_connection_graph.xml b/ansible/files/example_ixia_connection_graph.xml deleted file mode 100644 index e6f7c704ce..0000000000 --- a/ansible/files/example_ixia_connection_graph.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ansible/files/graph_files.yml b/ansible/files/graph_files.yml deleted file mode 100644 index 866000c777..0000000000 --- a/ansible/files/graph_files.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- - # Public graph files - - lab_connection_graph.xml - - example_ixia_connection_graph.xml - - - # Private graph files diff --git a/ansible/files/graph_groups.yml b/ansible/files/graph_groups.yml new file mode 100644 index 0000000000..b3ef72b824 --- /dev/null +++ b/ansible/files/graph_groups.yml @@ -0,0 +1,3 @@ +--- + - lab + - example_ixia diff --git a/ansible/files/lab_connection_graph.xml b/ansible/files/lab_connection_graph.xml deleted file mode 100644 index 73d62cfe77..0000000000 --- a/ansible/files/lab_connection_graph.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ansible/group_vars/all/ceos.yml b/ansible/group_vars/all/ceos.yml index 1bbc1c7d03..61559a09f8 100644 --- a/ansible/group_vars/all/ceos.yml +++ b/ansible/group_vars/all/ceos.yml @@ -3,7 +3,10 @@ #ceos_image_filename: cEOS64-lab-4.23.2F.tar.xz #ceos_image_orig: ceosimage:4.23.2F #ceos_image: ceosimage:4.23.2F-1 -ceos_image_filename: cEOS64-lab-4.25.5.1M.tar -ceos_image_orig: ceosimage:4.25.5.1M -ceos_image: ceosimage:4.25.5.1M-1 +#ceos_image_filename: cEOS64-lab-4.25.5.1M.tar +#ceos_image_orig: ceosimage:4.25.5.1M +#ceos_image: ceosimage:4.25.5.1M-1 +ceos_image_filename: cEOS64-lab-4.29.3M.tar +ceos_image_orig: ceosimage:4.29.3M +ceos_image: ceosimage:4.29.3M-1 skip_ceos_image_downloading: false diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 0782d144c2..89706062ae 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -27,9 +27,9 @@ barefoot_hwskus: [ "montara", "mavericks", "Arista-7170-64C", "newport", "Arista marvell_hwskus: [ "et6448m" ] innovium_tl7_hwskus: ["Wistron_sw_to3200k_32x100" , "Wistron_sw_to3200k"] -cisco_hwskus: ["Cisco-8102-C64"] +cisco_hwskus: ["Cisco-8102-C64", "Cisco-8111-O32", "Cisco-8111-O64"] cisco-8000_gb_hwskus: ["Cisco-8102-C64"] - +cisco-8000_gr_hwskus: ["Cisco-8111-O32", "Cisco-8111-O64"] ## Note: ## Docker volumes should be list instead of dict. However, if we want to keep code DRY, we ## need to merge dictionaries, and convert them to list diff --git a/ansible/health_checker.py b/ansible/health_checker.py old mode 100644 new mode 100755 index 3c979e93d1..d8f4d739b4 --- a/ansible/health_checker.py +++ b/ansible/health_checker.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import imp import os import logging diff --git a/ansible/library/announce_routes.py b/ansible/library/announce_routes.py index af46a719f3..a1ead7a9a4 100644 --- a/ansible/library/announce_routes.py +++ b/ansible/library/announce_routes.py @@ -113,7 +113,7 @@ def wait_for_http(host_ip, http_port, timeout=10): def get_topo_type(topo_name): pattern = re.compile( - r'^(t0-mclag|t0|t1|ptf|fullmesh|dualtor|t2|mgmttor|m0|mc0|mx)') + r'^(t0-mclag|t0|t1|ptf|fullmesh|dualtor|t2|mgmttor|m0|mc0|mx|appliance)') match = pattern.match(topo_name) if not match: return "unsupported" @@ -956,6 +956,27 @@ def fib_t0_mclag(topo, ptf_ip, action="announce"): change_routes(action, ptf_ip, port6, routes_v6) +def fib_appliance(topo, ptf_ip, action="announce"): + common_config = topo['configuration_properties'].get('common', {}) + nhipv4 = common_config.get("nhipv4", NHIPV4) + nhipv6 = common_config.get("nhipv6", NHIPV6) + + routes_v4 = [] + routes_v6 = [] + routes_v4.append(("0.0.0.0/0", nhipv4, None)) + routes_v6.append(("::/0", nhipv6, None)) + vms = topo['topology']['VMs'] + all_vms = sorted(vms.keys()) + + for vm in all_vms: + vm_offset = vms[vm]['vm_offset'] + port = IPV4_BASE_PORT + vm_offset + port6 = IPV6_BASE_PORT + vm_offset + + change_routes(action, ptf_ip, port, routes_v4) + change_routes(action, ptf_ip, port6, routes_v6) + + def main(): module = AnsibleModule( argument_spec=dict( @@ -1000,6 +1021,9 @@ def main(): elif topo_type == "mx": fib_mx(topo, ptf_ip, action=action) module.exit_json(changed=True) + elif topo_type == "appliance": + fib_appliance(topo, ptf_ip, action=action) + module.exit_json(change=True) else: module.exit_json( msg='Unsupported topology "{}" - skipping announcing routes'.format(topo_name)) diff --git a/ansible/library/conn_graph_facts.py b/ansible/library/conn_graph_facts.py index c0a64f1a1e..67bbade16f 100755 --- a/ansible/library/conn_graph_facts.py +++ b/ansible/library/conn_graph_facts.py @@ -1,14 +1,15 @@ #!/usr/bin/env python +import csv + from ansible.module_utils.basic import AnsibleModule -import lxml.etree as ET import yaml import os import logging import traceback -import ipaddr as ipaddress +import ipaddress +import six from operator import itemgetter from itertools import groupby -from collections import defaultdict from natsort import natsorted try: @@ -27,11 +28,9 @@ DOCUMENTATION = ''' module: conn_graph_facts.py -version_added: 2.0 -short_description: Retrive lab fanout switches physical and vlan connections +short_description: Retrieve lab devices and physical connections information. Description: - Retrive lab fanout switches physical and vlan connections - add to Ansible facts + Retrieve lab devices information and the physical connections between the devices. options: host: [fanout switch name|Server name|Sonic Switch Name] @@ -47,11 +46,14 @@ the root fanout switch. required: False filepath: - Path of the connection graph xml file. Override the default path for looking up connection graph xml file. - required: False - filename: - Name of the connection graph xml file. Override the behavior of looking up connection graph xml file. When - this option is specified, always use the specified connection graph xml file. + Folder of the csv graph files. + + group: + The csv files are organized in multiple groups. Each group has a set of csv files describing the connections + and devices connected to a same root fanout switch. Usually devices within a same group are also tracked + in a dedicated inventory file under the `ansible` folder. + When the group file is not supplied, this module will try to find the group based on the supplied + host/hosts/anchor information. required: False Mutually exclusive options: host, hosts, anchor @@ -105,41 +107,61 @@ },...... } - ''' -class Parse_Lab_Graph(): - """ - Parse the generated lab physical connection graph and insert Ansible fact of the graph - for deploying fanout switches and dynamically configure vlan mapping to hook up EOS VMs - and ptf docker for lab testing - - There is a creategraph.py under ansible/files to create the png and - dpg like graph file for lab devices from csv file - The 2 csv files under ansible/files are csv files to list all devices and device links for Sonic testbed - There is a sonic_server_links.yml file to describe the connections between servers port and Sonic devices - This module conn_graph_file also parse the server links to have - a full root fanout switches template for deployment. - """ +LAB_GRAPHFILE_PATH = "files/" +LAB_GRAPH_GROUPS_FILE = "graph_groups.yml" + + +class LabGraph(object): + + SUPPORTED_CSV_FILES = { + "devices": "sonic_{}_devices.csv", + "links": "sonic_{}_links.csv", + "pdu_links": "sonic_{}_pdu_links.csv", + "console_links": "sonic_{}_console_links.csv", + "bmc_links": "sonic_{}_bmc_links.csv", + } + + def __init__(self, path, group): + self.path = path + self.group = group + self.csv_files = {k: os.path.join(self.path, v.format(group)) for k, v in self.SUPPORTED_CSV_FILES.items()} + + self._cache_port_alias_to_name = {} + + self.csv_facts = {} + self.read_csv_files() - def __init__(self, xmlfile): - self.root = ET.parse(xmlfile) - self.devices = {} - self.vlanport = {} - self.vlanrange = {} - self.links = {} - self.consolelinks = {} - self.bmclinks = {} - self.pdulinks = {} - self.server = defaultdict(dict) - self.pngtag = 'PhysicalNetworkGraphDeclaration' - self.dpgtag = 'DataPlaneGraph' - self.pcgtag = 'PowerControlGraphDeclaration' - self.csgtag = 'ConsoleGraphDeclaration' - self.bmcgtag = 'BmcGraphDeclaration' - - def port_vlanlist(self, vlanrange): + self.graph_facts = {} + self.csv_to_graph_facts() + + def read_csv_files(self): + for k, v in self.csv_files.items(): + if os.path.exists(v): + self.csv_facts[k] = self.read_csv_file(v) + else: + logging.debug("Missing file {}".format(v)) + self.csv_facts[k] = {} + + def read_csv_file(self, v): + with open(v) as csvfile: + reader = csv.DictReader(csvfile) + return [row for row in reader] + + def _port_vlanlist(self, vlanrange): + """Convert vlan range string to list of vlan ids + + Args: + vlanrange (str): vlan range string, e.g. "1-10,20,30-40" + + Raises: + ValueError: Unexpected vlanrange string. + + Returns: + list: list of vlan ids + """ vlans = [] for vlanid in list(map(str.strip, vlanrange.split(','))): if vlanid.isdigit(): @@ -147,512 +169,263 @@ def port_vlanlist(self, vlanrange): continue elif '-' in vlanid: vlanlist = list(map(str.strip, vlanid.split('-'))) - vlans.extend(list(range(int(vlanlist[0]), int(vlanlist[1])+1))) + vlans.extend(list(range(int(vlanlist[0]), int(vlanlist[1]) + 1))) continue elif vlanid != '': - raise ValueError('vlan range error "%s"' % vlanrange) + raise ValueError('vlan range error "{}"'.format(vlanrange)) vlans = sorted(set(vlans)) return vlans - def parse_graph(self): - """ - Parse the xml graph file - """ - deviceinfo = {} - deviceroot = self.root.find(self.pngtag).find('Devices') - devices = deviceroot.findall('Device') - if devices is not None: - for dev in devices: - attributes = dev.attrib - hostname = attributes['Hostname'] - if hostname is not None: - deviceinfo[hostname] = {} - deviceinfo[hostname]["Hostname"] = hostname - deviceinfo[hostname]['HwSku'] = attributes.get('HwSku') - deviceinfo[hostname]['Type'] = attributes.get('Type') - deviceinfo[hostname]['CardType'] = attributes.get( - 'CardType', 'Linecard') - deviceinfo[hostname]['HwSkuType'] = attributes.get( - 'HwSkuType', 'predefined') - deviceinfo[hostname]['Os'] = attributes.get('Os') - self.links[hostname] = {} - devicel2info = {} - devicel3s = self.root.find(self.dpgtag).findall('DevicesL3Info') - devicel2s = self.root.find(self.dpgtag).findall('DevicesL2Info') - if devicel2s is not None: - for l2info in devicel2s: - hostname = l2info.attrib['Hostname'] - if hostname is not None: - devicel2info[hostname] = {} - vlans = l2info.findall('InterfaceVlan') - for vlan in vlans: - portname = vlan.attrib['portname'] - portmode = vlan.attrib['mode'] - portvlanid = vlan.attrib['vlanids'] - portvlanlist = self.port_vlanlist(portvlanid) - devicel2info[hostname][portname] = { - 'mode': portmode, 'vlanids': portvlanid, 'vlanlist': portvlanlist} - if devicel3s is not None: - for l3info in devicel3s: - hostname = l3info.attrib['Hostname'] - if hostname is not None: - deviceinfo[hostname]["Hostname"] = hostname - management_ip = l3info.find( - 'ManagementIPInterface').attrib['Prefix'] - deviceinfo[hostname]['ManagementIp'] = management_ip - mgmtip = ipaddress.IPNetwork(management_ip) - deviceinfo[hostname]['mgmtip'] = str(mgmtip.ip) - management_gw = str(mgmtip.network+1) - deviceinfo[hostname]['ManagementGw'] = management_gw - allinks = self.root.find(self.pngtag).find( - 'DeviceInterfaceLinks').findall('DeviceInterfaceLink') - if allinks is not None: - for link in allinks: - start_dev = link.attrib['StartDevice'] - end_dev = link.attrib['EndDevice'] - if start_dev: - self.links[start_dev][link.attrib['StartPort']] = { - 'peerdevice': link.attrib['EndDevice'], 'peerport': link.attrib['EndPort'], - 'speed': link.attrib['BandWidth']} - if end_dev: - self.links[end_dev][link.attrib['EndPort']] = { - 'peerdevice': link.attrib['StartDevice'], 'peerport': link.attrib['StartPort'], - 'speed': link.attrib['BandWidth']} - console_root = self.root.find(self.csgtag) - if console_root: - devicecsgroot = console_root.find('DevicesConsoleInfo') - devicescsg = devicecsgroot.findall('DeviceConsoleInfo') - if devicescsg is not None: - for dev in devicescsg: - attributes = dev.attrib - hostname = attributes['Hostname'] - if hostname is not None: - deviceinfo[hostname] = {} - deviceinfo[hostname]["Hostname"] = hostname - deviceinfo[hostname]['HwSku'] = attributes.get('HwSku') - deviceinfo[hostname]['Type'] = attributes.get('Type') - deviceinfo[hostname]['Protocol'] = attributes.get( - 'Protocol') - deviceinfo[hostname]['Os'] = attributes.get('Os') - mgmt_ip = attributes.get('ManagementIp') - management_gw = str( - ipaddress.IPNetwork(mgmt_ip).network+1) - deviceinfo[hostname]['ManagementIp'] = mgmt_ip - deviceinfo[hostname]['ManagementGw'] = management_gw - self.consolelinks[hostname] = {} - console_link_root = console_root.find('ConsoleLinksInfo') - if console_link_root: - allconsolelinks = console_link_root.findall('ConsoleLinkInfo') - if allconsolelinks is not None: - for consolelink in allconsolelinks: - attributes = consolelink.attrib - start_dev = attributes.get('StartDevice') - start_port = attributes.get('StartPort') - end_dev = attributes.get('EndDevice') - end_port = 'ConsolePort' - console_proxy = attributes.get('Proxy') - console_type = attributes.get('Console_type') - baud_rate = attributes.get('BaudRate') - - if start_dev: - if start_dev not in self.consolelinks: - self.consolelinks.update({start_dev: {}}) - self.consolelinks[start_dev][start_port] = { - 'peerdevice': end_dev, - 'peerport': end_port, - 'proxy': console_proxy, - 'type': console_type, - 'baud_rate': baud_rate - } - if end_dev: - if end_dev not in self.consolelinks: - self.consolelinks.update({end_dev: {}}) - self.consolelinks[end_dev][end_port] = { - 'peerdevice': start_dev, - 'peerport': start_port, - 'proxy': console_proxy, - 'type': console_type, - 'baud_rate': baud_rate - } - bmc_root = self.root.find(self.bmcgtag) - if bmc_root: - devicebmcgroot = bmc_root.find('DevicesBmcInfo') - devicesbmcg = devicebmcgroot.findall('DeviceBmcInfo') - if devicesbmcg is not None: - for dev in devicesbmcg: - attributes = dev.attrib - hostname = attributes['Hostname'] - if hostname is not None: - deviceinfo[hostname] = {} - deviceinfo[hostname]["Hostname"] = hostname - deviceinfo[hostname]['HwSku'] = attributes.get('HwSku') - deviceinfo[hostname]['Type'] = attributes.get('Type') - deviceinfo[hostname]['Protocol'] = attributes.get( - 'Protocol') - deviceinfo[hostname]['Os'] = attributes.get('Os') - mgmt_ip = attributes.get('ManagementIp') - management_gw = str( - ipaddress.IPNetwork(mgmt_ip).network+1) - deviceinfo[hostname]['ManagementIp'] = mgmt_ip - deviceinfo[hostname]['ManagementGw'] = management_gw - self.bmclinks[hostname] = {} - bmc_link_root = bmc_root.find('BmcLinksInfo') - if bmc_link_root: - allbmclinks = bmc_link_root.findall('BmcLinkInfo') - if allbmclinks is not None: - for bmclink in allbmclinks: - attributes = bmclink.attrib - start_dev = attributes.get('StartDevice') - start_port = attributes.get('StartPort') - end_dev = attributes.get('EndDevice') - end_port = attributes.get('EndPort') - bmc_ip = attributes.get("BmcIp") - if start_dev: - if start_dev not in self.bmclinks: - self.bmclinks.update({start_dev: {}}) - self.bmclinks[start_dev][start_port] = { - 'peerdevice': end_dev, - 'peerport': end_port, - 'bmc_ip': bmc_ip - } - if end_dev: - if end_dev not in self.bmclinks: - self.bmclinks.update({end_dev: {}}) - self.bmclinks[end_dev][end_port] = { - 'peerdevice': start_dev, - 'peerport': start_port, - 'bmc_ip': bmc_ip - } - - pdu_root = self.root.find(self.pcgtag) - if pdu_root: - devicepcgroot = pdu_root.find('DevicesPowerControlInfo') - devicespcsg = devicepcgroot.findall('DevicePowerControlInfo') - if devicespcsg is not None: - for dev in devicespcsg: - hostname = dev.attrib['Hostname'] - if hostname is not None: - deviceinfo[hostname] = {} - deviceinfo[hostname]["Hostname"] = hostname - hwsku = dev.attrib['HwSku'] - devtype = dev.attrib['Type'] - protocol = dev.attrib['Protocol'] - mgmt_ip = dev.attrib['ManagementIp'] - deviceinfo[hostname]['HwSku'] = hwsku - deviceinfo[hostname]['Type'] = devtype - deviceinfo[hostname]['Protocol'] = protocol - deviceinfo[hostname]['ManagementIp'] = mgmt_ip - self.pdulinks[hostname] = {} - pdu_link_root = pdu_root.find('PowerControlLinksInfo') - if pdu_link_root: - allpdulinks = pdu_link_root.findall('PowerControlLinkInfo') - if allpdulinks is not None: - for pdulink in allpdulinks: - start_dev = pdulink.attrib['StartDevice'] - end_dev = pdulink.attrib['EndDevice'] - logging.debug("pdulink {}".format(pdulink.attrib)) - logging.debug("self.pdulinks {}".format(self.pdulinks)) - if start_dev: - if start_dev not in self.pdulinks: - self.pdulinks.update({start_dev: {}}) - self.pdulinks[start_dev][pdulink.attrib['StartPort']] = { - 'peerdevice': pdulink.attrib['EndDevice'], 'peerport': pdulink.attrib['EndPort']} - if end_dev: - if end_dev not in self.pdulinks: - self.pdulinks.update({end_dev: {}}) - self.pdulinks[end_dev][pdulink.attrib['EndPort']] = { - 'peerdevice': pdulink.attrib['StartDevice'], 'peerport': pdulink.attrib['StartPort']} - self.devices = deviceinfo - self.vlanport = devicel2info - - def convert_list2range(self, list_name): - """ - common module to convert a list to range for easier vlan configuration generation + def _convert_list2range(self, vlans): + """Convert list of vlan ids to vlan range string """ - ranges = [] - sl = sorted(set(list_name)) + vlan_ranges = [] + sl = sorted(set(vlans)) for _, g in groupby(enumerate(sl), lambda t: t[0] - t[1]): group = list(map(itemgetter(1), g)) if len(group) == 1: - ranges.append(str(group[0])) + vlan_ranges.append(str(group[0])) else: - ranges.append(str(group[0])+'-'+str(group[-1])) - return ranges - - def get_server_links(self): - return self.server + vlan_ranges.append(str(group[0]) + '-' + str(group[-1])) + return vlan_ranges + + def _get_port_alias_to_name_map(self, hwsku): + if hwsku in self._cache_port_alias_to_name: + return self._cache_port_alias_to_name[hwsku] + port_alias_to_name_map, _, _ = get_port_alias_to_name_map(hwsku) + self._cache_port_alias_to_name[hwsku] = port_alias_to_name_map + return port_alias_to_name_map + + def _port_alias_to_name(self, device, port): + hwsku = self.graph_facts["devices"][device]["HwSku"] + if self.graph_facts["devices"][device].get("Os", "").lower() != "sonic": + return port + return self._get_port_alias_to_name_map(hwsku).get(port, port) + + def _get_sorted_port_name_list(self, hwsku): + return natsorted(self._get_port_alias_to_name_map(hwsku).values()) + + def csv_to_graph_facts(self): + devices = {} + for entry in self.csv_facts["devices"]: + management_ip = entry["ManagementIp"] + if len(management_ip.split("/")) > 1: + iface = ipaddress.ip_interface(six.text_type(management_ip)) + entry["mgmtip"] = str(iface.ip) + entry["ManagementGw"] = str(iface.network.network_address + 1) + + if entry["Type"].lower() not in ["pdu", "consoleserver", "mgmttstorrouter"]: + if "CardType" not in entry: + entry["CardType"] = "Linecard" + if "HwSkuType" not in entry: + entry["HwSkuType"] = "predefined" + devices[entry["Hostname"]] = entry + self.graph_facts["devices"] = devices + + links = {} + port_vlans = {} + for entry in self.csv_facts["links"]: + start_device = entry["StartDevice"] + start_port = self._port_alias_to_name(start_device, entry["StartPort"]) + end_device = entry["EndDevice"] + end_port = self._port_alias_to_name(end_device, entry["EndPort"]) + band_width = entry["BandWidth"] + vlan_ID = entry["VlanID"] + vlan_mode = entry["VlanMode"] + + if start_device not in links: + links[start_device] = {} + links[start_device][start_port] = { + "peerdevice": end_device, + "peerport": end_port, + "speed": band_width, + } - def get_host_vlan(self, hostname): - """ - Calculate dpg vlan data for each link(port) and return a Switch/Device total Vlan range - """ + if end_device not in links: + links[end_device] = {} + links[end_device][end_port] = { + "peerdevice": start_device, + "peerport": start_port, + "speed": band_width, + } - if hostname in self.devices and self.devices[hostname]['Type'].lower() == 'devsonic': - self.vlanport[hostname] = {} - for port in self.links[hostname]: - peerdevice = self.links[hostname][port]['peerdevice'] - if self.devices[peerdevice]["Type"].lower() == "devsonic": - continue - peerport = self.links[hostname][port]['peerport'] - peerportmode = self.vlanport[peerdevice][peerport]['mode'] - peervlanids = self.vlanport[peerdevice][peerport]['vlanids'] - peervlanlist = self.vlanport[peerdevice][peerport]['vlanlist'] - self.vlanport[hostname][port] = { - 'mode': peerportmode, 'vlanids': peervlanids, 'vlanlist': peervlanlist} - - if hostname in self.vlanport: - dpgvlans = self.vlanport[hostname] - vlans = [] - for intf in dpgvlans: - vlans += dpgvlans[intf]['vlanlist'] - self.vlanrange = self.convert_list2range(vlans) - return {'VlanRange': self.vlanrange, 'VlanList': vlans} - - def get_host_device_info(self, hostname): - """ - return the given hostname device info of hwsku and type - """ - return self.devices.get(hostname) + if start_device not in port_vlans: + port_vlans[start_device] = {} + port_vlans[start_device][start_port] = { + "mode": vlan_mode, + "vlanids": vlan_ID, + "vlanlist": self._port_vlanlist(vlan_ID), + } - def get_host_port_vlans(self, hostname): - """ - return the given hostname device vlan port information - """ - return self.vlanport.get(hostname) + if end_device not in port_vlans: + port_vlans[end_device] = {} + port_vlans[end_device][end_port] = { + "mode": vlan_mode, + "vlanids": vlan_ID, + "vlanlist": self._port_vlanlist(vlan_ID), + } - def get_host_connections(self, hostname): - """ - return the given hostname device each individual connection - """ - return self.links.get(hostname) + self.graph_facts["links"] = links + self.graph_facts["port_vlans"] = port_vlans + + console_links = {} + for entry in self.csv_facts["console_links"]: + start_device = entry["EndDevice"] + if start_device not in console_links: + console_links[start_device] = {} + console_links[start_device] = { + "ConsolePort": { + "baud_rate": entry.get("BaudRate", None), + "peerdevice": entry["StartDevice"], + "peerport": entry["StartPort"], + "proxy": entry["Proxy"], + "type": entry["Console_type"], + } + } + self.graph_facts["console_links"] = console_links + + pdu_links = {} + for entry in self.csv_facts["pdu_links"]: + start_device = entry["EndDevice"] + if start_device not in pdu_links: + pdu_links[start_device] = {} + pdu_links[start_device][entry["EndPort"]] = { + "peerdevice": entry["StartDevice"], + "peerport": entry["StartPort"], + } + self.graph_facts["pdu_links"] = pdu_links + + bmc_links = {} + for entry in self.csv_facts["bmc_links"]: + start_device = entry["EndDevice"] + if start_device not in bmc_links: + bmc_links[start_device] = {} + bmc_links[start_device][entry["EndPort"]] = { + "peerdevice": entry["StartDevice"], + "peerport": entry["StartPort"], + "bmc_ip": entry["BmcIp"], + } + self.graph_facts["bmc_links"] = bmc_links + + def build_results(self, hostnames, ignore_error=False): + device_info = {} + device_conn = {} + device_port_vlans = {} + device_vlan_list = {} + device_vlan_range = {} + device_vlan_map_list = {} + device_console_link = {} + device_console_info = {} + device_pdu_links = {} + device_pdu_info = {} + device_bmc_link = {} + device_bmc_info = {} + msg = "" - def contains_hosts(self, hostnames, part): - if not part: - return set(hostnames) <= set(self.devices) - # It's possible that not all devices are found in connect_graph when using in devutil - THRESHOLD = 0.8 - count = 0 for hostname in hostnames: - if hostname in self.devices.keys(): - count += 1 - return hostnames and (count * 1.0 / len(hostnames) >= THRESHOLD) - - # get the console of a device, if it exists, host is being managed by the returned device - def get_host_console_info(self, hostname): - """ - return the given hostname console info of mgmtip, protocol, hwsku and type - """ - if hostname in self.devices: - try: - ret = self.devices[self.consolelinks[hostname] - ['ConsolePort']['peerdevice']] - except KeyError: - ret = {} - return ret - else: - """ - Please be noted that an empty dict is returned when hostname is not found - The behavior is different with get_host_vlan. devutils script will check if the returned dict - is empty to determine if console info exists for given hostname. - """ - return {} - - # return the list of devices that is managed by host through console - def get_host_console_link(self, hostname): - """ - return the given hostname console link info of console server and port - """ - if hostname in self.consolelinks: - return self.consolelinks[hostname] - else: - # Please be noted that an empty dict is returned when hostname is not found - return {} - - # get the bmc of a device, if it exists, host is being managed by the returned device - def get_host_bmc_info(self, hostname): - """ - return the given hostname bmc info of mgmtip, protocol, hwsku and type - """ - if hostname in self.devices: - try: - # currently we only support end port iDRAC - ret = self.devices[self.bmclinks[hostname] - ['iDRAC']['peerdevice']] - except KeyError: - ret = {} - return ret - else: - """ - Please be noted that an empty dict is returned when hostname is not found - The behavior is different with get_host_vlan. - """ - return {} - - # return the list of devices that is managed by host through bmc - def get_host_bmc_link(self, hostname): - """ - return the given hostname bmc link info of management server and port - """ - if hostname in self.bmclinks: - return self.bmclinks[hostname] - else: - # Please be noted that an empty dict is returned when hostname is not found - return {} - - def get_host_pdu_info(self, hostname): - """ - return the given hostname pdu info of mgmtip, protocol, hwsku and type - """ - if hostname in self.devices: - ret = {} - if hostname in self.pdulinks: - for key in self.pdulinks[hostname].keys(): - try: - ret.update( - {key: self.devices[self.pdulinks[hostname][key]['peerdevice']]}) - except KeyError: - pass - return ret - else: - # Please be noted that an empty dict is returned when hostname is not found - return {} - - def get_host_pdu_links(self, hostname): - """ - return the given hostname pdu links info of pdu servers and ports - """ - if hostname in self.pdulinks: - return self.pdulinks[hostname] - else: - # Please be noted that an empty dict is returned when hostname is not found - return {} - - -LAB_CONNECTION_GRAPH_FILE = 'graph_files.yml' -EMPTY_GRAPH_FILE = 'empty_graph.xml' -LAB_GRAPHFILE_PATH = 'files/' - - -def find_graph(hostnames, part=False): - """ - Find a graph file contains all devices in testbed. - duts are spcified by hostnames - - Parameters: - hostnames: list of duts in the target testbed. - part: select the graph file if over 80% of hosts are found in conn_graph when part is True - """ - filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE) - with open(filename) as fd: - file_list = yaml.safe_load(fd) - - # Finding the graph file contains all duts from hostnames, - for fn in file_list: - logging.debug("Looking at conn graph file: %s for hosts %s" % - (fn, hostnames)) - filename = os.path.join(LAB_GRAPHFILE_PATH, fn) - lab_graph = Parse_Lab_Graph(filename) - lab_graph.parse_graph() - logging.debug("For file %s, got hostnames %s" % - (fn, lab_graph.devices)) - if lab_graph.contains_hosts(hostnames, part): - logging.debug( - "Returning lab graph from conn graph file: %s for hosts %s" % (fn, hostnames)) - return lab_graph - # Fallback to return an empty connection graph, this is - # needed to bridge the kvm test needs. The KVM test needs - # A graph file, which used to be whatever hardcoded file. - # Here we provide one empty file for the purpose. - lab_graph = Parse_Lab_Graph(os.path.join( - LAB_GRAPHFILE_PATH, EMPTY_GRAPH_FILE)) - lab_graph.parse_graph() - return lab_graph - - -def get_port_name_list(hwsku): - # Create a map of SONiC port name to physical port index - # Start by creating a list of all port names - port_alias_to_name_map, _, _ = get_port_alias_to_name_map(hwsku) - - # Create a map of SONiC port name to physical port index - # Start by creating a list of all port names - port_name_list = port_alias_to_name_map.values() - # Sort the list in natural order, because SONiC port names, when - # sorted in natural sort order, match the phyical port index order - port_name_list_sorted = natsorted(port_name_list) - return port_name_list_sorted - - -def build_results(lab_graph, hostnames, ignore_error=False): - """ - Refactor code for building json results. - Code is refactored because same logic is needed in devutil - """ - device_info = {} - device_conn = {} - device_port_vlans = {} - device_vlan_range = {} - device_vlan_list = {} - device_vlan_map_list = {} - device_console_info = {} - device_console_link = {} - device_bmc_info = {} - device_bmc_link = {} - device_pdu_info = {} - device_pdu_links = {} - msg = {} - for hostname in hostnames: - dev = lab_graph.get_host_device_info(hostname) - if dev is None and not ignore_error: - msg = "cannot find info for %s" % hostname - return (False, msg) - device_info[hostname] = dev - device_conn[hostname] = lab_graph.get_host_connections(hostname) - host_vlan = lab_graph.get_host_vlan(hostname) - port_vlans = lab_graph.get_host_port_vlans(hostname) - # for multi-DUTs, must ensure all have vlan configured. - if host_vlan: - device_vlan_range[hostname] = host_vlan["VlanRange"] - device_vlan_list[hostname] = host_vlan["VlanList"] - if dev["Type"].lower() != "devsonic": - device_vlan_map_list[hostname] = host_vlan["VlanList"] + device = self.graph_facts["devices"].get(hostname, None) + if device is None and not ignore_error: + msg = "Cannot find device {}, check if it is in {}".format(hostname, self.csv_files["devices"]) + return (False, msg) + device_info[hostname] = device + device_conn[hostname] = self.graph_facts["links"].get(hostname, {}) + + device_port_vlans[hostname] = self.graph_facts["port_vlans"].get(hostname, {}) + + vlan_list = [] + for port_info in device_port_vlans[hostname].values(): + vlan_list.extend(port_info["vlanlist"]) + vlan_list = natsorted(vlan_list) + device_vlan_list[hostname] = vlan_list + device_vlan_range[hostname] = self._convert_list2range(vlan_list) + + if device["Type"].lower() != "devsonic": + device_vlan_map_list[hostname] = vlan_list else: device_vlan_map_list[hostname] = {} - port_name_list_sorted = get_port_name_list(dev['HwSku']) - logging.debug("For %s with hwsku %s, port_name_list is %s" % ( - hostname, dev['HwSku'], port_name_list_sorted)) - for a_host_vlan in host_vlan["VlanList"]: - # Get the corresponding port for this vlan from the port vlan list for this hostname + sorted_port_name_list = self._get_sorted_port_name_list(device["HwSku"]) + + for host_vlan in vlan_list: found_port_for_vlan = False - for a_port in port_vlans: - if a_host_vlan in port_vlans[a_port]['vlanlist']: - if a_port in port_name_list_sorted: - port_index = port_name_list_sorted.index( - a_port) - device_vlan_map_list[hostname][port_index] = a_host_vlan + for port_name, port_info in device_port_vlans[hostname].items(): + if host_vlan in port_info["vlanlist"]: + if port_name in sorted_port_name_list: + port_index = sorted_port_name_list.index(port_name) + device_vlan_map_list[hostname][port_index] = host_vlan found_port_for_vlan = True - break elif not ignore_error: - msg = "Did not find port for %s in the ports based on hwsku '%s' for host %s" % ( - a_port, dev['HwSku'], hostname) + msg = "Did not find port for '{}' in the ports based on hwsku '{}' for host '{}'"\ + .format(port_name, device["HwSku"], hostname) + logging.error("Sorted port name list: {}".format(sorted_port_name_list)) + logging.error("port_vlans of host {}: {}".format(hostname, device_port_vlans[hostname])) return (False, msg) if not found_port_for_vlan and not ignore_error: - msg = "Did not find corresponding link for vlan %d in %s for host %s" % ( - a_host_vlan, port_vlans, hostname) + msg = "Did not find corresponding link for vlan {} in {} for host {}"\ + .format(host_vlan, device_port_vlans[hostname], hostname) return (False, msg) - device_port_vlans[hostname] = port_vlans - device_console_info[hostname] = lab_graph.get_host_console_info( - hostname) - device_console_link[hostname] = lab_graph.get_host_console_link( - hostname) - device_bmc_info[hostname] = lab_graph.get_host_bmc_info(hostname) - device_bmc_link[hostname] = lab_graph.get_host_bmc_link(hostname) - device_pdu_info[hostname] = lab_graph.get_host_pdu_info(hostname) - device_pdu_links[hostname] = lab_graph.get_host_pdu_links(hostname) - results = {k: v for k, v in locals().items() - if (k.startswith("device_") and v)} - return (True, results) + device_console_link[hostname] = self.graph_facts["console_links"].get(hostname, {}) + device_console_info[hostname] = self.graph_facts["devices"].get( + device_console_link[hostname].get("ConsolePort", {}).get("peerdevice"), + {} + ) + device_pdu_links[hostname] = self.graph_facts["pdu_links"].get(hostname, {}) + device_pdu_info[hostname] = {} + for psu, psu_info in device_pdu_links[hostname].items(): + pdu_hostname = psu_info.get("peerdevice") + device_pdu_info[hostname][psu] = self.graph_facts["devices"].get(pdu_hostname, {}) + + device_bmc_link[hostname] = self.graph_facts["bmc_links"].get(hostname, {}) + device_bmc_info[hostname] = {} + for _, bmc_link in device_bmc_link[hostname].items(): + bmc_hostname = bmc_link.get("peerdevice") + device_bmc_info[hostname] = self.graph_facts["devices"].get(bmc_hostname, {}) + break + results = {k: v for k, v in locals().items() if (k.startswith("device_") and v)} + + return (True, results) + + +def find_graph(hostnames, part=False): + """Find the graph file for the target device + + Args: + hostnames (list): List of hostnames + part (bool, optional): Select the graph file if over 80% of hosts are found in conn_graph when part is True. + Defaults to False. + + Returns: + obj: Instance of LabGraph or None if no graph file is found. + """ + graph_group_file = os.path.join(LAB_GRAPHFILE_PATH, LAB_GRAPH_GROUPS_FILE) + with open(graph_group_file) as fd: + graph_groups = yaml.safe_load(fd) + + target_graph = None + target_group = None + for group in graph_groups: + logging.debug("Looking at graph files of group {} for hosts {}".format(group, hostnames)) + lab_graph = LabGraph(LAB_GRAPHFILE_PATH, group) + graph_hostnames = set(lab_graph.graph_facts["devices"].keys()) + logging.debug("For graph group {}, got hostnames {}".format(group, graph_hostnames)) + + if not part: + if set(hostnames) <= graph_hostnames: + target_graph = lab_graph + target_group = group + break + else: + THRESHOLD = 0.8 + in_graph_hostnames = set(hostnames).intersection(graph_hostnames) + if len(in_graph_hostnames) * 1.0 / len(hostnames) >= THRESHOLD: + target_graph = lab_graph + target_group = group + break + + if target_graph is not None: + logging.debug("Returning lab graph of group {} for hosts {}".format(target_group, hostnames)) + + return target_graph def main(): @@ -660,8 +433,8 @@ def main(): argument_spec=dict( host=dict(required=False), hosts=dict(required=False, type='list'), - filename=dict(required=False), filepath=dict(required=False), + group=dict(required=False), anchor=dict(required=False, type='list'), ignore_errors=dict(required=False, type='bool', default=False), ), @@ -677,18 +450,17 @@ def main(): else: # return the whole graph hostnames = [] + try: # When called by pytest, the file path is obscured to /tmp/.../. # we need the caller to tell us where the graph files are with # filepath argument. - if m_args['filepath']: + if m_args["filepath"]: global LAB_GRAPHFILE_PATH LAB_GRAPHFILE_PATH = m_args['filepath'] - if m_args['filename']: - filename = os.path.join(LAB_GRAPHFILE_PATH, m_args['filename']) - lab_graph = Parse_Lab_Graph(filename) - lab_graph.parse_graph() + if m_args["group"]: + lab_graph = LabGraph(LAB_GRAPHFILE_PATH, m_args["group"]) else: # When calling passed in anchor instead of hostnames, # the caller is asking to return the whole graph. This @@ -696,26 +468,29 @@ def main(): target = anchor if anchor else hostnames lab_graph = find_graph(target) - # early return for the whole graph or empty graph file(vtestbed) - if ( - not hostnames or - not lab_graph.devices and not lab_graph.links and not lab_graph.vlanport - ): + if not lab_graph: + results = { + 'device_info': {}, + 'device_conn': {}, + 'device_port_vlans': {}, + } + module.exit_json(ansible_facts=results) + + # early return for the whole graph + if not hostnames: results = { - 'device_info': lab_graph.devices, - 'device_conn': lab_graph.links, - 'device_port_vlans': lab_graph.vlanport, + 'device_info': lab_graph.graph_facts["devices"], + 'device_conn': lab_graph.graph_facts["links"], + 'device_port_vlans': lab_graph.graph_facts["port_vlans"] } module.exit_json(ansible_facts=results) - succeed, results = build_results( - lab_graph, hostnames, m_args['ignore_errors']) + succeed, results = lab_graph.build_results(hostnames, m_args['ignore_errors']) if succeed: module.exit_json(ansible_facts=results) else: module.fail_json(msg=results) except (IOError, OSError): - module.fail_json( - msg="Can not find lab graph file under {}".format(LAB_GRAPHFILE_PATH)) + module.fail_json(msg="Can not find required file, exception: {}".format(traceback.format_exc())) except Exception: module.fail_json(msg=traceback.format_exc()) diff --git a/ansible/library/shell_cmds.py b/ansible/library/shell_cmds.py index 59f85c6044..bfae69369a 100644 --- a/ansible/library/shell_cmds.py +++ b/ansible/library/shell_cmds.py @@ -21,7 +21,10 @@ # "admin" # ], # "cmd": "ls /home", -# "rc": 0 +# "cmd_with_timeout": "", +# "rc": 0, +# "timeout": 0, +# "err_msg": "" # }, # { # "stderr_lines": [], @@ -31,7 +34,10 @@ # "/home/admin" # ], # "cmd": "pwd", -# "rc": 0 +# "cmd_with_timeout": "", +# "rc": 0, +# "timeout": 0, +# "err_msg": "" # } # ], # "cmds": [ @@ -66,6 +72,7 @@ options: cmds: List of commands. Each command should be a string. continue_on_fail: Bool. Specify whether to continue running rest of the commands if any of the command failed. + timeout: Integer. Specify time limit (in second) for each command. 0 means no limit. Default value is 0. ''' EXAMPLES = r''' @@ -76,19 +83,34 @@ - ls /home - pwd continue_on_fail: False + timeout: 30 ''' -def run_cmd(module, cmd): +def run_cmd(module, cmd, timeout): + cmd_with_timeout = '' + err_msg = '' + + if int(timeout) != 0 and "'" in cmd: + err_msg = "[WARNING] timeout is not supported for command contains single quote, ran without time limit" + timeout = 0 + + if int(timeout) == 0: + rc, out, err = module.run_command(cmd, use_unsafe_shell=True) + else: + cmd_with_timeout = "echo '{}' | timeout --preserve-status {} bash".format(cmd, timeout) + rc, out, err = module.run_command(cmd_with_timeout, use_unsafe_shell=True) - rc, out, err = module.run_command(cmd, use_unsafe_shell=True) result = dict( cmd=cmd, + cmd_with_timeout=cmd_with_timeout, + err_msg=err_msg, rc=rc, stdout=out, stderr=err, stdout_lines=out.splitlines(), - stderr_lines=err.splitlines() + stderr_lines=err.splitlines(), + timeout=timeout ) return result @@ -98,18 +120,20 @@ def main(): module = AnsibleModule( argument_spec=dict( cmds=dict(type='list', required=True), - continue_on_fail=dict(type='bool', default=True) + continue_on_fail=dict(type='bool', default=True), + timeout=dict(type='int', default=0) ) ) cmds = module.params['cmds'] continue_on_fail = module.params['continue_on_fail'] + timeout = module.params['timeout'] startd = datetime.datetime.now() results = [] for cmd in cmds: - result = run_cmd(module, cmd) + result = run_cmd(module, cmd, timeout) results.append(result) if result['rc'] != 0 and not continue_on_fail: break diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py index 400dbf62a9..8f263fe8b2 100644 --- a/ansible/library/test_facts.py +++ b/ansible/library/test_facts.py @@ -100,7 +100,7 @@ ''' # Default testbed file name -TESTBED_FILE = 'testbed.csv' +TESTBED_FILE = 'testbed.yaml' TESTCASE_FILE = 'roles/test/vars/testcases.yml' diff --git a/ansible/linkstate/testbed_inv.ini b/ansible/linkstate/testbed_inv.ini index 6034ece56f..a8784662d4 100644 --- a/ansible/linkstate/testbed_inv.ini +++ b/ansible/linkstate/testbed_inv.ini @@ -1,5 +1,5 @@ [Global] -testbed_configuration = testbed.csv +testbed_configuration = testbed.yaml vm_inventory = veos lab_inventory = lab lab_links = files/sonic_lab_links.csv diff --git a/ansible/linkstate/testbed_inv.py b/ansible/linkstate/testbed_inv.py index 225aedfbf3..b7387f879c 100755 --- a/ansible/linkstate/testbed_inv.py +++ b/ansible/linkstate/testbed_inv.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys import json diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index ec996f4521..a0eacfcda1 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -19,7 +19,9 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): port_alias_to_name_map = {} port_alias_asic_map = {} port_name_to_index_map = {} - HWSKU_WITH_PORT_INDEX_FROM_PORT_CONFIG = ["8800-LC-48H-O", "88-LC0-36FH-MO"] + HWSKU_WITH_PORT_INDEX_FROM_PORT_CONFIG = ["Cisco-88-LC0-36FH-M-O36", + "Cisco-88-LC0-36FH-O36", + "Cisco-8800-LC-48H-C48"] try: from sonic_py_common import multi_asic from ansible.module_utils.multi_asic_utils import load_db_config @@ -253,10 +255,10 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): elif hwsku == "Cisco-8101-C64": for i in range(0, 64): port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % (i * 4) - elif hwsku in ["8800-LC-48H-O"]: + elif hwsku in ["Cisco-8800-LC-48H-C48"]: for i in range(0, 48, 1): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % (i * 4) - elif hwsku in ["88-LC0-36FH-MO"]: + elif hwsku in ["Cisco-88-LC0-36FH-M-O36", "Cisco-88-LC0-36FH-O36"]: for i in range(0, 36, 1): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % (i * 8) elif hwsku in ["msft_multi_asic_vs"]: diff --git a/ansible/recover_server.py b/ansible/recover_server.py index 491921f731..636d8dd88f 100755 --- a/ansible/recover_server.py +++ b/ansible/recover_server.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Script used to recover testbed servers after reboot/upgrade/black-out. - Cleanup server @@ -324,12 +324,12 @@ def _join_all(threads): parser = argparse.ArgumentParser(description='Recover testbed servers.') parser.add_argument('--testbed-servers', action='append', type=str, required=True, help='testbed server to recover') - parser.add_argument('--testbed', default='testbed.csv', - help='testbed file(default: testbed.csv)') + parser.add_argument('--testbed', default='testbed.yaml', + help='testbed file(default: testbed.yaml)') parser.add_argument('--vm-file', default='veos', help='vm inventory file(default: veos)') - parser.add_argument('--vm-type', default='veos', choices=[ - 'veos', 'ceos', 'vsonic'], help='vm type (veos|ceos|vsonic, default: veos)') + parser.add_argument('--vm-type', default='ceos', choices=[ + 'veos', 'ceos', 'vsonic'], help='vm type (veos|ceos|vsonic, default: ceos)') parser.add_argument( '--inventory', help='Deprecated. Inventory info is already in testbed.(csv|yaml), no need to specify in argument') diff --git a/ansible/restart_nightly_ptf.py b/ansible/restart_nightly_ptf.py old mode 100644 new mode 100755 index f366f68326..c0ca92006a --- a/ansible/restart_nightly_ptf.py +++ b/ansible/restart_nightly_ptf.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import argparse import logging import imp diff --git a/ansible/roles/eos/templates/dualtor-64-leaf.j2 b/ansible/roles/eos/templates/dualtor-64-leaf.j2 new file mode 120000 index 0000000000..0312b84bdd --- /dev/null +++ b/ansible/roles/eos/templates/dualtor-64-leaf.j2 @@ -0,0 +1 @@ +dualtor-leaf.j2 \ No newline at end of file diff --git a/ansible/roles/fanout/tasks/sonic/fanout_sonic_202205.yml b/ansible/roles/fanout/tasks/sonic/fanout_sonic_202205.yml index 7f8712e6c2..f7f0e2b9a1 100644 --- a/ansible/roles/fanout/tasks/sonic/fanout_sonic_202205.yml +++ b/ansible/roles/fanout/tasks/sonic/fanout_sonic_202205.yml @@ -20,6 +20,17 @@ dest: "/usr/share/sonic/templates/copp_cfg.j2" become: yes +- name: Disable feature teamd and remove teamd container (avoid swss crash after config reload) + block: + - name: Check if teamd container exists + shell: "docker ps -a -q -f name=teamd" + register: teamd_container + + - name: disable feature teamd and remove container + shell: "config feature state teamd disabled && sleep 10 && docker rm teamd" + become: true + when: teamd_container.stdout != "" + - name: SONiC update config db shell: config reload -y -f become: true diff --git a/ansible/roles/fanout/templates/sonic_mlnx_copp_cfg.j2 b/ansible/roles/fanout/templates/sonic_mlnx_copp_cfg.j2 index 3ccf8d3408..c54dbf02d7 100644 --- a/ansible/roles/fanout/templates/sonic_mlnx_copp_cfg.j2 +++ b/ansible/roles/fanout/templates/sonic_mlnx_copp_cfg.j2 @@ -83,7 +83,7 @@ "trap_group": "queue4_group3", "always_enabled": "true" }, -{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] is not in ['ToRRouter', 'EPMS', 'MgmtTsToR', 'MgmtToRRouter', 'BmcMgmtToRRouter']) %} +{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] not in ['ToRRouter', 'EPMS', 'MgmtTsToR', 'MgmtToRRouter', 'BmcMgmtToRRouter']) %} "dhcp_relay": { "trap_ids": "dhcp,dhcpv6", "trap_group": "queue4_group3" diff --git a/ansible/roles/test/files/helpers/arp_responder.py b/ansible/roles/test/files/helpers/arp_responder.py index 057dd6398d..ed951ed13a 100644 --- a/ansible/roles/test/files/helpers/arp_responder.py +++ b/ansible/roles/test/files/helpers/arp_responder.py @@ -10,6 +10,7 @@ import logging logging.getLogger("scapy.runtime").setLevel(logging.ERROR) scapy2.conf.use_pcap = True +import scapy.arch.pcapdnet # noqa F401 def hexdump(data): diff --git a/ansible/roles/test/files/helpers/config_service_acls.sh b/ansible/roles/test/files/helpers/config_service_acls.sh index 1ba81fe9ca..27b680da7f 100755 --- a/ansible/roles/test/files/helpers/config_service_acls.sh +++ b/ansible/roles/test/files/helpers/config_service_acls.sh @@ -105,7 +105,7 @@ logger -t cacltest "added cacl test rules" iptables -nL | logger -t cacltest # Sleep to allow Ansible playbook ample time to attempt to connect and timeout -sleep 180 +sleep 150 # Delete the test ACL config file rm -rf /tmp/testacl.json diff --git a/ansible/roles/test/files/ptftests/IP_decap_test.py b/ansible/roles/test/files/ptftests/IP_decap_test.py index 51433a755e..fe6012274f 100644 --- a/ansible/roles/test/files/ptftests/IP_decap_test.py +++ b/ansible/roles/test/files/ptftests/IP_decap_test.py @@ -520,9 +520,9 @@ def run_encap_combination_test(self, outer_pkt_type, inner_pkt_type): def check_range(self, ip_range, outer_pkt_type, inner_pkt_type, dut_index): dst_ips = [] dst_ips.append(ip_range.get_first_ip()) - if ip_range.length > 1: + if ip_range.length() > 1: dst_ips.append(ip_range.get_last_ip()) - if ip_range.length > 2: + if ip_range.length() > 2: dst_ips.append(ip_range.get_random_ip()) logging.info('Checking dst_ips={}'.format(dst_ips)) diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py index 7c8bf96110..fe22891073 100644 --- a/ansible/roles/test/files/ptftests/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/advanced-reboot.py @@ -239,10 +239,7 @@ def __init__(self): # Inter-packet interval, to be used in send_in_background method. # Improve this interval to gain more precision of disruptions. self.send_interval = 0.0035 - # How many packets to be sent in send_in_background method - self.packets_to_send = min( - int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) - + self.sent_packet_count = 0 # Thread pool for background watching operations self.pool = ThreadPool(processes=3) @@ -255,6 +252,7 @@ def __init__(self): self.asic_state_time = {} # Recording last asic state entering time self.asic_vlan_reach = [] # Recording asic vlan reachability self.recording = False # Knob for recording asic_vlan_reach + self.finalizer_state = '' # light_probe: # True : when one direction probe fails, don't probe another. # False: when one direction probe fails, continue probe another. @@ -710,13 +708,6 @@ def setUp(self): if 'warm-reboot' in self.reboot_type: self.log(self.get_sad_info()) - # Pre-generate list of packets to be sent in send_in_background method. - generate_start = datetime.datetime.now() - if not self.vnet: - self.generate_bidirectional() - self.log("%d packets are ready after: %s" % ( - len(self.packets_list), str(datetime.datetime.now() - generate_start))) - self.dataplane = ptf.dataplane_instance for p in self.dataplane.ports.values(): port = p.get_packet_source() @@ -897,36 +888,6 @@ def generate_arp_ping_packet(self): self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwsrc') self.arp_src_port = src_port - def generate_bidirectional(self): - """ - This method is used to pre-generate packets to be sent in background thread. - Packets are composed into a list, and present a bidirectional flow as next: - five packet from T1, one packet from vlan. - Each packet has sequential TCP Payload - to be identified later. - """ - - self.send_interval = self.time_to_listen / self.packets_to_send - self.packets_list = [] - from_t1_iter = itertools.cycle(self.from_t1) - sent_count_vlan_to_t1 = 0 - sent_count_t1_to_vlan = 0 - for i in range(self.packets_to_send): - payload = '0' * 60 + str(i) - if (i % 5) == 0: # From vlan to T1. - packet = scapyall.Ether(self.from_vlan_packet) - packet.load = payload - from_port = self.from_server_src_port - sent_count_vlan_to_t1 += 1 - else: # From T1 to vlan. - src_port, packet = next(from_t1_iter) - packet = scapyall.Ether(packet) - packet.load = payload - from_port = src_port - sent_count_t1_to_vlan += 1 - self.packets_list.append((from_port, str(packet))) - self.log("Sent prep count vlan to t1: {}".format(sent_count_vlan_to_t1)) - self.log("Sent prep count t1 to vlan: {}".format(sent_count_t1_to_vlan)) - def put_nowait(self, queue, data): try: queue.put_nowait(data) @@ -1008,9 +969,9 @@ def check_warmboot_finalizer(self, finalizer_timeout): self.wait_until_control_plane_up() dut_datetime = self.get_now_time() self.log('waiting for warmboot-finalizer service to become activating') - finalizer_state = self.get_warmboot_finalizer_state() + self.finalizer_state = self.get_warmboot_finalizer_state() - while finalizer_state != 'activating': + while self.finalizer_state != 'activating': time.sleep(1) dut_datetime_after_ssh = self.get_now_time() time_passed = float(dut_datetime_after_ssh.strftime( @@ -1019,15 +980,15 @@ def check_warmboot_finalizer(self, finalizer_timeout): self.fails['dut'].add( 'warmboot-finalizer never reached state "activating"') raise TimeoutError - finalizer_state = self.get_warmboot_finalizer_state() + self.finalizer_state = self.get_warmboot_finalizer_state() self.log('waiting for warmboot-finalizer service to finish') - finalizer_state = self.get_warmboot_finalizer_state() - self.log('warmboot finalizer service state {}'.format(finalizer_state)) + self.finalizer_state = self.get_warmboot_finalizer_state() + self.log('warmboot finalizer service state {}'.format(self.finalizer_state)) count = 0 - while finalizer_state == 'activating': - finalizer_state = self.get_warmboot_finalizer_state() - self.log('warmboot finalizer service state {}'.format(finalizer_state)) + while self.finalizer_state == 'activating': + self.finalizer_state = self.get_warmboot_finalizer_state() + self.log('warmboot finalizer service state {}'.format(self.finalizer_state)) time.sleep(10) if count * 10 > int(self.test_params['warm_up_timeout_secs']): self.fails['dut'].add( @@ -1487,17 +1448,44 @@ def extract_no_cpu_replies(self, arr): else: return non_zero[-1] + def get_teamd_state(self): + stdout, stderr, _ = self.dut_connection.execCommand( + 'sudo systemctl is-active teamd.service') + if stderr: + self.fails['dut'].add("Error collecting teamd state. stderr: {}, stdout:{}".format( + str(stderr), str(stdout))) + raise Exception("Error collecting teamd state. stderr: {}, stdout:{}".format( + str(stderr), str(stdout))) + if not stdout: + self.log('teamd state not returned from DUT') + return '' + + teamd_state = stdout[0].strip() + return teamd_state + + def wait_until_teamd_goes_down(self): + self.log('Waiting for teamd service to go down') + teamd_state = self.get_teamd_state() + self.log('teamd service state: {}'.format(teamd_state)) + dut_datetime = self.get_now_time() + teamd_shutdown_timeout = 300 + + while teamd_state == 'active': + time.sleep(1) + dut_datetime_during_shutdown = self.get_now_time() + time_passed = float(dut_datetime_during_shutdown.strftime( + "%s")) - float(dut_datetime.strftime("%s")) + if time_passed > teamd_shutdown_timeout: + self.fails['dut'].add( + 'Teamd service did not go down') + raise TimeoutError + teamd_state = self.get_teamd_state() + + self.log('teamd service state: {}'.format(teamd_state)) + def reboot_dut(self): time.sleep(self.reboot_delay) - if not self.kvm_test and\ - (self.reboot_type == 'fast-reboot' or 'warm-reboot' in - self.reboot_type or 'service-warm-restart' in self.reboot_type): - # Event for the sniff_in_background status. - self.sniffer_started = threading.Event() - self.sniff_thr.start() - self.sender_thr.start() - self.log("Rebooting remote side") if self.reboot_type != 'service-warm-restart' and self.test_params['other_vendor_flag'] is False: # Check to see if the warm-reboot script knows about the retry count feature @@ -1505,24 +1493,44 @@ def reboot_dut(self): "sudo " + self.reboot_type + " -h", timeout=5) if "retry count" in stdout: if self.test_params['neighbor_type'] == "sonic": - stdout, stderr, return_code = self.dut_connection.execCommand( - "sudo " + self.reboot_type + " -N", timeout=30) + reboot_command = self.reboot_type + " -N" else: - stdout, stderr, return_code = self.dut_connection.execCommand( - "sudo " + self.reboot_type + " -n", timeout=30) + reboot_command = self.reboot_type + " -n" else: - stdout, stderr, return_code = self.dut_connection.execCommand( - "sudo " + self.reboot_type, timeout=30) + reboot_command = self.reboot_type + + # create an empty log file to capture output of reboot command + reboot_log_file = "/host/{}.log".format(reboot_command.replace(' ', '')) + self.dut_connection.execCommand("sudo touch {}; sudo chmod 666 {}".format( + reboot_log_file, reboot_log_file)) + + # execute reboot command w/ nohup so that when the execCommand times-out: + # 1. there is a reader/writer for any bash commands using PIPE + # 2. the output and error of CLI still gets written to log file + stdout, stderr, return_code = self.dut_connection.execCommand( + "nohup sudo {} -v &> {}".format( + reboot_command, reboot_log_file), timeout=10) elif self.test_params['other_vendor_flag'] is True: ignore_db_integrity_check = " -d" stdout, stderr, return_code = self.dut_connection.execCommand( - "sudo " + self.reboot_type + ignore_db_integrity_check, timeout=30) + "sudo " + self.reboot_type + ignore_db_integrity_check, timeout=10) else: self.restart_service() return + if not self.kvm_test and\ + (self.reboot_type == 'fast-reboot' or 'warm-reboot' in + self.reboot_type or 'service-warm-restart' in self.reboot_type): + # Event for the sniff_in_background status. + self.sniffer_started = threading.Event() + + self.wait_until_teamd_goes_down() + + self.sniff_thr.start() + self.sender_thr.start() + if stdout != []: self.log("stdout from %s: %s" % (self.reboot_type, str(stdout))) if stderr != []: @@ -1661,17 +1669,14 @@ def apply_filter_all_ports(self, filter_expression): port = p.get_packet_source() scapyall.attach_filter(port.socket, filter_expression) - def send_in_background(self, packets_list=None, interval=None): + def send_in_background(self, packets_list=None): """ This method sends predefined list of packets with predefined interval. """ - if not interval: - interval = self.send_interval if not packets_list: packets_list = self.packets_list self.sniffer_started.wait(timeout=10) with self.dataplane_io_lock: - sent_packet_count = 0 # While running fast data plane sender thread there are two reasons for filter to be applied # 1. filter out data plane traffic which is tcp to free up the load # on PTF socket (sniffer thread is using a different one) @@ -1681,17 +1686,37 @@ def send_in_background(self, packets_list=None, interval=None): 'not (arp and ether src {}) and not tcp'.format(self.test_params['dut_mac'])) sender_start = datetime.datetime.now() self.log("Sender started at %s" % str(sender_start)) - for entry in packets_list: - time.sleep(interval) - if self.vnet: - testutils.send_packet( - self, entry[0], entry[1].decode("base64")) - else: - testutils.send_packet(self, *entry) - sent_packet_count += 1 + + self.packets_list = [] + from_t1_iter = itertools.cycle(self.from_t1) + sent_count_vlan_to_t1 = 0 + sent_count_t1_to_vlan = 0 + + while True: + time.sleep(self.send_interval) + if self.reboot_start and self.finalizer_state == "inactive": + # keep sending packets until device reboots and finalizer enters inactive state + break + payload = '0' * 60 + str(self.sent_packet_count) + if (self.sent_packet_count % 5) == 0: # From vlan to T1. + packet = scapyall.Ether(self.from_vlan_packet) + packet.load = payload + from_port = self.from_server_src_port + sent_count_vlan_to_t1 += 1 + else: # From T1 to vlan. + src_port, packet = next(from_t1_iter) + packet = scapyall.Ether(packet) + packet.load = payload + from_port = src_port + sent_count_t1_to_vlan += 1 + testutils.send_packet(self, from_port, str(packet)) + self.sent_packet_count = self.sent_packet_count + 1 + + self.log("Sent count vlan to t1: {}".format(sent_count_vlan_to_t1)) + self.log("Sent count t1 to vlan: {}".format(sent_count_t1_to_vlan)) self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start)) - self.log("Total sent packets by sender: {}".format(sent_packet_count)) + self.log("Total sent packets by sender: {}".format(self.sent_packet_count)) # Signal sniffer thread to allow early finish. # Without this signalling mechanism, the sniffer thread can continue for a hardcoded max time. @@ -1734,14 +1759,18 @@ def tcpdump_sniff(self, wait=300, sniff_filter=''): wait (int): Duration in seconds to sniff the traffic sniff_filter (str): Filter that tcpdump will use to collect only relevant packets """ - capture_pcap = ("/tmp/capture_%s.pcap" % self.logfile_suffix - if self.logfile_suffix is not None else "/tmp/capture.pcap") - subprocess.call(["rm", "-rf", capture_pcap]) # remove old capture - self.kill_sniffer = False - self.start_sniffer(capture_pcap, sniff_filter, wait) - self.create_single_pcap(capture_pcap) - self.packets = scapyall.rdpcap(capture_pcap) - self.log("Number of all packets captured: {}".format(len(self.packets))) + try: + capture_pcap = ("/tmp/capture_%s.pcap" % self.logfile_suffix + if self.logfile_suffix is not None else "/tmp/capture.pcap") + subprocess.call(["rm", "-rf", capture_pcap]) # remove old capture + self.kill_sniffer = False + self.start_sniffer(capture_pcap, sniff_filter, wait) + self.create_single_pcap(capture_pcap) + self.packets = scapyall.rdpcap(capture_pcap) + self.log("Number of all packets captured: {}".format(len(self.packets))) + except Exception: + traceback_msg = traceback.format_exc() + self.log("Error in tcpdump_sniff: {}".format(traceback_msg)) def start_sniffer(self, pcap_path, tcpdump_filter, timeout): """ @@ -1771,6 +1800,8 @@ def start_sniffer(self, pcap_path, tcpdump_filter, timeout): for process in processes_list: process.join() + self.log("Killed all tcpdump processes by SIGINT") + def start_dump_process(self, iface, pcap_path, tcpdump_filter): """ Start tcpdump on specific interface and save data to pcap file @@ -1827,12 +1858,11 @@ def convert_pcapng_to_pcap(self, pcap_path, pcapng_full_capture): def check_tcp_payload(self, packet): """ This method is used by examine_flow() method. - It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload, - as created by generate_bidirectional() method'. + It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload """ try: int(str(packet[scapyall.TCP].payload) - ) in range(self.packets_to_send) + ) in range(self.sent_packet_count) return True except Exception: return False @@ -2012,7 +2042,7 @@ def examine_flow(self, filename=None): self.fails["dut"].add("Data traffic loss not found but reboot test type is '%s' which " "must have data traffic loss" % self.reboot_type) - if len(self.packets_list) > sent_counter: + if self.sent_packet_count > sent_counter: self.dataplane_loss_checked_successfully = False self.fails["dut"].add("Not all sent packets counted by receiver process. " "Could be issue with sniffer performance") @@ -2026,7 +2056,7 @@ def examine_flow(self, filename=None): self.fails["dut"].add("Unexpected count of sent packets available in pcap file. " "Could be issue with DUT flooding for original packets which was sent to DUT") - if prev_payload != (self.packets_to_send - 1): + if prev_payload != (self.sent_packet_count - 1): # Specific case when packet loss started but final lost packet not detected self.dataplane_loss_checked_successfully = False message = "Unable to calculate the dataplane traffic loss time. The traffic did not restore after " \ diff --git a/ansible/roles/test/files/ptftests/pfc_wd.py b/ansible/roles/test/files/ptftests/pfc_wd.py index cfcfb9769d..b579572618 100644 --- a/ansible/roles/test/files/ptftests/pfc_wd.py +++ b/ansible/roles/test/files/ptftests/pfc_wd.py @@ -20,6 +20,7 @@ def __init__(self): def setUp(self): self.dataplane = ptf.dataplane_instance self.router_mac = self.test_params['router_mac'] + self.vlan_mac = self.test_params.get('vlan_mac', self.router_mac) self.queue_index = int(self.test_params['queue_index']) self.pkt_count = int(self.test_params['pkt_count']) self.port_src = int(self.test_params['port_src']) @@ -106,7 +107,7 @@ def runTest(self): ip_src = "1.1.1.1" pkt_args = { - 'eth_dst': self.router_mac, + 'eth_dst': self.vlan_mac, 'eth_src': src_mac, 'ip_src': ip_src, 'ip_dst': self.ip_dst, diff --git a/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py b/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py index b6da399087..b6d700bbef 100644 --- a/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py +++ b/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py @@ -28,6 +28,8 @@ DHCP6OptElapsedTime = scapy.layers.dhcp6.DHCP6OptElapsedTime DHCP6OptIA_NA = scapy.layers.dhcp6.DHCP6OptIA_NA DUID_LLT = scapy.layers.dhcp6.DUID_LLT +DHCP6OptIfaceId = scapy.layers.dhcp6.DHCP6OptIfaceId +DHCP6OptServerId = scapy.layers.dhcp6.DHCP6OptServerId class DataplaneBaseTest(BaseTest): @@ -262,6 +264,7 @@ def create_dhcp_reply_relay_reply_packet(self): peeraddr=self.client_link_local) reply_relay_reply_packet /= DHCP6OptRelayMsg( message=[DHCP6_Reply(trid=12345)]) + reply_relay_reply_packet /= DHCP6OptIfaceId(ifaceid=self.vlan_ip) return reply_relay_reply_packet @@ -276,6 +279,7 @@ def create_dhcp_relay_forward_packet(self): msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) relay_forward_packet /= DHCP6OptRelayMsg( message=[DHCP6_Solicit(trid=12345)]) + relay_forward_packet /= DHCP6OptElapsedTime(elapsedtime=0) return relay_forward_packet @@ -287,6 +291,7 @@ def create_dhcp_relayed_relay_packet(self): packet_inside = DHCP6_RelayForward( msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) packet_inside /= DHCP6OptRelayMsg(message=[DHCP6_Solicit(trid=12345)]) + packet_inside /= DHCP6OptElapsedTime(elapsedtime=0) relayed_relay_packet /= DHCP6_RelayForward(msgtype=12, hopcount=1, linkaddr=self.relay_linkaddr, peeraddr=self.client_link_local) relayed_relay_packet /= DHCP6OptRelayMsg(message=[packet_inside]) @@ -304,6 +309,7 @@ def create_dhcp_relay_relay_reply_packet(self): packet_inside = DHCP6_RelayReply( msgtype=13, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) packet_inside /= DHCP6OptRelayMsg(message=[DHCP6_Reply(trid=12345)]) + relay_relay_reply_packet /= DHCP6OptServerId(duid=DUID_LLT(lladdr="00:11:22:33:44:55")) relay_relay_reply_packet /= DHCP6OptRelayMsg(message=[packet_inside]) return relay_relay_reply_packet diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py index b8bdd03e66..7eab6ef987 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py @@ -53,6 +53,11 @@ err_end_ignore_marker = -7 err_start_ignore_marker = -8 +# -- Max log message length +# The default maximum length of a single log message. Any line longer than MAX_LOG_MESSAGE_LENGTH +# will not be picked up by the analyzer. +MAX_LOG_MESSAGE_LENGTH = 1000 + class AnsibleLogAnalyzer: ''' @@ -404,7 +409,8 @@ def line_is_expected(self, str, expect_messages_regex): return ret_code - def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_regex, expect_messages_regex): + def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_regex, expect_messages_regex, + maximum_log_length=None): ''' @summary: Analyze input file content for messages matching input regex expressions. See line_matches() for details on matching criteria. @@ -422,6 +428,8 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege @param end_marker_regex - end marker + @param maximum_log_length - The long log message (length > maximum_log_length) will be dropped by LogAnalyzer. + @return: List of strings match search criteria. ''' @@ -500,8 +508,13 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege # Skip long logs in sairedis recording since most likely # they are bulk set operations for non-default routes # without much insight while they are time consuming to analyze - if not check_marker and len(rev_line) > 1000: + # In advanced_reboot test, we need to analyze the bulk operations for mac learning + # So we need to allow long lines + if maximum_log_length is None: + maximum_log_length = MAX_LOG_MESSAGE_LENGTH + if not check_marker and len(rev_line) > maximum_log_length: continue + if self.line_is_expected(rev_line, expect_messages_regex): expected_lines.append(rev_line) @@ -521,7 +534,8 @@ def analyze_file(self, log_file_path, match_messages_regex, ignore_messages_rege return matching_lines, expected_lines # --------------------------------------------------------------------- - def analyze_file_list(self, log_file_list, match_messages_regex, ignore_messages_regex, expect_messages_regex): + def analyze_file_list(self, log_file_list, match_messages_regex, ignore_messages_regex, expect_messages_regex, + maximum_log_length=None): ''' @summary: Analyze input files messages matching input regex expressions. See line_matches() for details on matching criteria. @@ -537,6 +551,9 @@ def analyze_file_list(self, log_file_list, match_messages_regex, ignore_messages @param expect_messages_regex: regex class instance containing messages that are expected to appear in logfile. + @param maximum_log_length + The maximum length of the log message. If the length of the log message is greater than this value, + @return: Returns map ''' res = {} @@ -544,8 +561,9 @@ def analyze_file_list(self, log_file_list, match_messages_regex, ignore_messages for log_file in log_file_list: if not len(log_file): continue - match_strings, expect_strings = self.analyze_file( - log_file, match_messages_regex, ignore_messages_regex, expect_messages_regex) + match_strings, expect_strings = self.analyze_file(log_file, match_messages_regex, ignore_messages_regex, + expect_messages_regex, + maximum_log_length=maximum_log_length) match_strings.reverse() expect_strings.reverse() diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index 4f2eae9b7b..d253cc9bd0 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -177,3 +177,6 @@ r, ".* ERR .*CounterCheck: Invalid port oid.*" # https://msazure.visualstudio.com/One/_workitems/edit/17617756 # https://msazure.visualstudio.com/One/_workitems/edit/17863895 r, ".* ERR syncd\d*#syncd.*SAI_API_ACL:_brcm_sai_acl_entry_bind.*" + +# https://msazure.visualstudio.com/One/_workitems/edit/24444744/ +r, ".* ERR syncd\d*#syncd.*SAI_API_UNSPECIFIED:sai_bulk_object_get_stats.*" diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt index 31f1068877..eea5169953 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt @@ -1,6 +1,6 @@ r, "\.ERR", "crash" r, "kernel:.*Oops", "kernel:.*hung", "kernel.*oom\s" -r, "kernel:.*scheduling", "kernel:.*atomic", "kernel:.*panic" +r, "kernel:.*scheduling", "kernel:.*panic" r, "kernel:.*\serr", "kernel:.*allocation", "kernel:.*kill" r, "kernel:.*kmemleak.*", "kernel:.* Err:" s, " ERR " diff --git a/ansible/roles/test/tasks/sonic.yml b/ansible/roles/test/tasks/sonic.yml index 64eddff540..001c1f2400 100644 --- a/ansible/roles/test/tasks/sonic.yml +++ b/ansible/roles/test/tasks/sonic.yml @@ -52,7 +52,7 @@ - name: set default testbed file set_fact: - testbed_file: testbed.csv + testbed_file: testbed.yaml when: testbed_file is not defined ############################################### diff --git a/ansible/roles/test/templates/arp_responder.conf.j2 b/ansible/roles/test/templates/arp_responder.conf.j2 index 7d6dcb3062..4cd308e9fc 100644 --- a/ansible/roles/test/templates/arp_responder.conf.j2 +++ b/ansible/roles/test/templates/arp_responder.conf.j2 @@ -1,5 +1,5 @@ [program:arp_responder] -command=/usr/bin/python /opt/arp_responder.py {{ arp_responder_args }} +command=/root/env-python3/bin/python3 /opt/arp_responder.py {{ arp_responder_args }} process_name=arp_responder stdout_logfile=/tmp/arp_responder.out.log stderr_logfile=/tmp/arp_responder.err.log diff --git a/ansible/roles/vm_set/files/mux_simulator.md b/ansible/roles/vm_set/files/mux_simulator.md index 12b70d9d04..956ad0081f 100644 --- a/ansible/roles/vm_set/files/mux_simulator.md +++ b/ansible/roles/vm_set/files/mux_simulator.md @@ -257,6 +257,36 @@ No json data required in POST. This API is to recover flows of all the mux bridg Response: `all_mux_status` +### POST `/mux//output` + +Set flow action of all mux bridges belong to `vm_set` to `output`. + +Format of json data required in POST: +``` +{ + "out_sides": ["nic", "upper_tor", "lower_tor"], +} +``` + +* `out_sides` is a list. It can contain single or multiple items from: `nic`, `upper_tor`, `lower_tor`. + +Response: `all_mux_status` + +### POST `/mux//drop` + +Set flow action of all mux bridges belong to `vm_set` to `drop`. + +Format of json data required in POST: +``` +{ + "out_sides": ["nic", "upper_tor", "lower_tor"], +} +``` + +* `out_sides` is a list. It can contain single or multiple items from: `nic`, `upper_tor`, `lower_tor`. + +Response: `all_mux_status` + ### GET `/mux//port_index>/flap_counter` Get flap counter of bridge specified by `vm_set` and `port_index`. diff --git a/ansible/roles/vm_set/files/mux_simulator.py b/ansible/roles/vm_set/files/mux_simulator.py index 4797b9379b..81c7470078 100644 --- a/ansible/roles/vm_set/files/mux_simulator.py +++ b/ansible/roles/vm_set/files/mux_simulator.py @@ -809,6 +809,46 @@ def reset_flow_handler(vm_set): return g_muxes.reset_flows() +@app.route('/mux//output', methods=['POST']) +def output_flow_handler(vm_set): + """Handler for updating flow action to output + + Args: + vm_set (string): The vm_set of test setup. Parsed by flask from request URL. + + Posted json data should be like: + {"out_sides": [, , ...]} + where could be "nic", "upper_tor" or "lower_tor". + + Returns: + object: Return a flask response object. + """ + _validate_vm_set(vm_set) + data = _validate_out_sides(request) + app.logger.info('===== {} POST {} with {} ====='.format(request.remote_addr, request.url, json.dumps(data))) + return g_muxes.update_flows('output', data['out_sides']) + + +@app.route('/mux//drop', methods=['POST']) +def drop_flow_handler(vm_set): + """Handler for updating all flow to drop + + Args: + vm_set (string): The vm_set of test setup. Parsed by flask from request URL. + + Posted json data should be like: + {"out_sides": [, , ...]} + where could be "nic", "upper_tor" or "lower_tor". + + Returns: + object: Return a flask response object. + """ + _validate_vm_set(vm_set) + data = _validate_out_sides(request) + app.logger.info('===== {} POST {} with {} ====='.format(request.remote_addr, request.url, json.dumps(data))) + return g_muxes.update_flows('drop', data['out_sides']) + + @app.route('/mux///flap_counter', methods=['GET']) def flap_counter_port(vm_set, port_index): """ diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index 463881aedc..b769c63023 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -12,6 +12,7 @@ import logging import docker import ipaddress +import six from ansible.module_utils.basic import AnsibleModule @@ -1241,7 +1242,7 @@ def get_existing_rt_tables(): "Kernel only supports up to 252 additional routing tables") rt_name = ns_if ns_if_addr = ipaddress.ip_interface( - self.mux_cable_facts[host_ifindex]["soc_ipv4"].decode()) + six.ensure_text(self.mux_cable_facts[host_ifindex]["soc_ipv4"])) gateway_addr = str(ns_if_addr.network.network_address + 1) if rt_slot not in rt_tables: # add route table mapping, use interface name as route table name @@ -1251,8 +1252,11 @@ def get_existing_rt_tables(): self.netns, ns_if, rt_name)) VMTopology.cmd("ip netns exec %s ip rule add from %s table %s" % ( self.netns, ns_if_addr.ip, rt_name)) + # issue: https://www.mail-archive.com/debian-bugs-dist@lists.debian.org/msg1811241.html + # When the route table is empty, the ip route flush command will fail. + # So ignore the error here. VMTopology.cmd( - "ip netns exec %s ip route flush table %s" % (self.netns, rt_name)) + "ip netns exec %s ip route flush table %s" % (self.netns, rt_name), ignore_errors=True) VMTopology.cmd("ip netns exec %s ip route add %s dev %s table %s" % ( self.netns, ns_if_addr.network, ns_if, rt_name)) VMTopology.cmd("ip netns exec %s ip route add default via %s dev %s table %s" % ( @@ -1388,7 +1392,7 @@ def iface_disable_txoff(iface_name, pid=None): return VMTopology.cmd('nsenter -t %s -n ethtool -K %s tx off' % (pid, iface_name)) @staticmethod - def cmd(cmdline, grep_cmd=None, retry=1, negative=False, shell=False, split_cmd=True): + def cmd(cmdline, grep_cmd=None, retry=1, negative=False, shell=False, split_cmd=True, ignore_errors=False): """Execute a command and return the output Args: @@ -1396,6 +1400,7 @@ def cmd(cmdline, grep_cmd=None, retry=1, negative=False, shell=False, split_cmd= grep_cmd (str, optional): Grep command line. Defaults to None. retry (int, optional): Max number of retry if command result is unexpected. Defaults to 1. negative (bool, optional): If negative is True, expect the command to fail. Defaults to False. + ignore_errors (bool, optional): If ignore_errors is True, return the output even if the command fails. Raises: Exception: If command result is unexpected after max number of retries, raise an exception. @@ -1457,10 +1462,13 @@ def cmd(cmdline, grep_cmd=None, retry=1, negative=False, shell=False, split_cmd= # Result is unexpected, need to retry continue - # Reached max retry, fail with exception - err_msg = 'ret_code=%d, error message="%s". cmd="%s%s"' \ - % (ret_code, err, cmdline_ori, ' | ' + grep_cmd_ori if grep_cmd_ori else '') - raise Exception(err_msg) + if ignore_errors: + return out + else: + # Reached max retry, fail with exception + err_msg = 'ret_code=%d, error message="%s". cmd="%s%s"' \ + % (ret_code, err, cmdline_ori, ' | ' + grep_cmd_ori if grep_cmd_ori else '') + raise Exception(err_msg) @staticmethod def get_ovs_br_ports(bridge): diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 4aebdc28b9..8f03fddbe6 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -231,9 +231,9 @@ register: vm_list_paused become: true -- name: Require VMs as VEOS by default +- name: Require VMs as CEOS by default set_fact: - vm_type: "veos" + vm_type: "ceos" when: vm_type is not defined - name: Check VM type diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index 9954edb127..a32f744c84 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -137,6 +137,7 @@ vm_type: "{{ vm_type }}" vm_properties: "{{ vm_properties if vm_properties is defined else omit }}" ptf_mgmt_ip_addr: "{{ ptf_ip }}" + ptf_extra_mgmt_ip_addr: "{{ ptf_extra_mgmt_ip.split(',') | default([]) }}" ptf_mgmt_ipv6_addr: "{{ ptf_ipv6 }}" ptf_mgmt_ip_gw: "{{ mgmt_gw }}" ptf_mgmt_ipv6_gw: "{{ mgmt_gw_v6 | default(None) }}" diff --git a/ansible/roles/vm_set/tasks/start.yml b/ansible/roles/vm_set/tasks/start.yml index 903ab2a71a..8f12fcdaca 100644 --- a/ansible/roles/vm_set/tasks/start.yml +++ b/ansible/roles/vm_set/tasks/start.yml @@ -1,6 +1,6 @@ -- name: Require VMs as VEOS by default +- name: Require VMs as CEOS by default set_fact: - vm_type: "veos" + vm_type: "ceos" when: vm_type is not defined - name: Load topo variables diff --git a/ansible/setup-management-network.sh b/ansible/setup-management-network.sh index 072de43b8c..3819390e9e 100755 --- a/ansible/setup-management-network.sh +++ b/ansible/setup-management-network.sh @@ -46,6 +46,16 @@ if ! ifconfig br1; then echo "br1 not found, creating bridge network" brctl addbr br1 brctl show br1 +else + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo + echo " br1 exists, possibly lab server, are you sure you want to continue?" + echo + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo + echo + echo "Please double check and manually configure IP for br1 to avoid breaking lab server connectivity" + exit 0 fi echo diff --git a/ansible/templates/minigraph_png.j2 b/ansible/templates/minigraph_png.j2 index b6abe3a072..8eda4b3f58 100644 --- a/ansible/templates/minigraph_png.j2 +++ b/ansible/templates/minigraph_png.j2 @@ -184,7 +184,11 @@ {% elif 'T2' in dev %} {% set dev_type = 'SpineRouter' %} {% elif 'T3' in dev %} -{% set dev_type = 'CoreRouter' %} +{% if loop.index|int % 2 %} +{% set dev_type = 'RegionalHub' %} +{% else %} +{% set dev_type = 'AZNGHub' %} +{% endif %} {% elif 'T0' in dev %} {% set dev_type = 'ToRRouter' %} {% elif 'M1' in dev %} diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index 674b4199c5..6ba8954978 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -23,9 +23,9 @@ function usage echo " $0 [options] collect-show-tech " echo echo "Options:" - echo " -t : testbed CSV file name (default: 'testbed.csv')" + echo " -t : testbed CSV file name (default: 'testbed.yaml')" echo " -m : virtual machine file name (default: 'veos')" - echo " -k : vm type (veos|ceos|vsonic|vcisco) (default: 'veos')" + echo " -k : vm type (veos|ceos|vsonic|vcisco) (default: 'ceos')" echo " -n : vm num (default: 0)" echo " -s : master set identifier on specified (default: 1)" echo " -d : sonic vm directory (default: $HOME/sonic-vm)" @@ -184,7 +184,7 @@ function read_file function start_vms { if [[ $vm_type == ceos ]]; then - echo "VM type is ceos. No need to run start-vms. Please specify VM type using the -k option. Example: -k veos" + echo "VM type is ceos. No need to run start-vms. Please specify VM type using the -k option. Example: -k ceos" exit fi server=$1 @@ -200,7 +200,7 @@ function start_vms function stop_vms { if [[ $vm_type == ceos ]]; then - echo "VM type is ceos. No need to run stop-vms. Please specify VM type using the -k option. Example: -k veos" + echo "VM type is ceos. No need to run stop-vms. Please specify VM type using the -k option. Example: -k ceos" exit fi server=$1 @@ -215,7 +215,7 @@ function stop_vms function start_topo_vms { if [[ $vm_type == ceos ]]; then - echo "VM type is ceos. No need to run start-topo-vms. Please specify VM type using the -k option. Example: -k veos" + echo "VM type is ceos. No need to run start-topo-vms. Please specify VM type using the -k option. Example: -k ceos" exit fi testbed_name=$1 @@ -233,7 +233,7 @@ function start_topo_vms function stop_topo_vms { if [[ $vm_type == ceos ]]; then - echo "VM type is ceos. No need to run stop-topo-vms. Please specify VM type using the -k option. Example: -k veos" + echo "VM type is ceos. No need to run stop-topo-vms. Please specify VM type using the -k option. Example: -k ceos" exit fi testbed_name=$1 @@ -268,7 +268,7 @@ function add_topo ansible_options+=" -e eos_batch_size=1" fi - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" \ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -i ${inv_name} testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" \ -e testbed_name="$testbed_name" -e duts_name="$duts" -e VM_base="$vm_base" \ -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$vm_set_name" \ -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" -e ptf_ipv6="$ptf_ipv6" \ @@ -313,7 +313,7 @@ function remove_topo ansible_options="-e sonic_vm_storage_location=$sonic_vm_dir" fi - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" \ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -i ${inv_name} testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" \ -e testbed_name="$testbed_name" -e duts_name="$duts" -e VM_base="$vm_base" \ -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$vm_set_name" \ -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" -e ptf_ipv6="$ptf_ipv6" \ @@ -744,8 +744,8 @@ function deploy_topo_with_cache } vmfile=veos -tbfile=testbed.csv -vm_type=veos +tbfile=testbed.yaml +vm_type=ceos vm_num=0 msetnumber=1 sonic_vm_dir="" diff --git a/ansible/testbed_add_vm_topology.yml b/ansible/testbed_add_vm_topology.yml index 29d94c3357..3b3ca2ed92 100644 --- a/ansible/testbed_add_vm_topology.yml +++ b/ansible/testbed_add_vm_topology.yml @@ -122,9 +122,9 @@ - set_fact: base_topo: "{{ topo.split('_') | first }}" - - name: Require VMs as VEOS by default + - name: Require VMs as CEOS by default set_fact: - vm_type: "veos" + vm_type: "ceos" when: vm_type is not defined - name: Check if it is a known topology diff --git a/ansible/upgrade_sonic.py b/ansible/upgrade_sonic.py old mode 100644 new mode 100755 index 651dcf1673..45d94e3cf5 --- a/ansible/upgrade_sonic.py +++ b/ansible/upgrade_sonic.py @@ -1,9 +1,11 @@ +#!/usr/bin/env python3 + import argparse import logging import sys -from devutil.devices import init_localhost, init_sonichosts, init_testbed_sonichosts -from devutil.sonic_helpers import upgrade_image +from devutil.devices.factory import init_localhost, init_sonichosts, init_testbed_sonichosts +from devutil.devices.sonic import upgrade_image logging.basicConfig( stream=sys.stdout, diff --git a/ansible/upgrade_sonic.yml b/ansible/upgrade_sonic.yml index 2483897960..e93ebeda41 100644 --- a/ansible/upgrade_sonic.yml +++ b/ansible/upgrade_sonic.yml @@ -14,7 +14,7 @@ block: - name: Set default testbed file set_fact: - testbed_file: testbed.csv + testbed_file: testbed.yaml when: testbed_file is not defined - name: Gather testbed information diff --git a/ansible/vars/topo_dualtor-64.yml b/ansible/vars/topo_dualtor-64.yml new file mode 100644 index 0000000000..7db7c92ed3 --- /dev/null +++ b/ansible/vars/topo_dualtor-64.yml @@ -0,0 +1,362 @@ +topology: + dut_num: 2 + host_interfaces: + - 0.2@0,1.2@0 + - 0.3@3,1.3@3 + - 0.6@6,1.6@6 + - 0.7@7,1.7@7 + - 0.8@8,1.8@8 + - 0.9@9,1.9@9 + - 0.10@10,1.10@10 + - 0.11@11,1.11@11 + - 0.12@12,1.12@12 + - 0.13@13,1.13@13 + - 0.14@14,1.14@14 + - 0.15@15,1.15@15 + - 0.18@18,1.18@18 + - 0.19@19,1.19@19 + - 0.22@22,1.22@22 + - 0.23@23,1.23@23 + - 0.24@24,1.24@24 + - 0.25@25,1.25@25 + - 0.26@26,1.26@26 + - 0.27@27,1.27@27 + - 0.28@28,1.28@28 + - 0.29@29,1.29@29 + - 0.30@30,1.30@30 + - 0.31@31,1.31@31 + - 0.32@32,1.32@32 + - 0.33@33,1.33@33 + - 0.34@34,1.34@34 + - 0.35@35,1.35@35 + - 0.36@36,1.36@36 + - 0.37@37,1.37@37 + - 0.38@38,1.38@38 + - 0.39@39,1.39@39 + - 0.40@40,1.40@40 + - 0.41@41,1.41@41 + - 0.42@42,1.42@42 + - 0.43@43,1.43@43 + - 0.44@44,1.44@44 + - 0.45@45,1.45@45 + - 0.46@46,1.46@46 + - 0.47@47,1.47@47 + - 0.48@48,1.48@48 + - 0.49@49,1.49@49 + - 0.50@50,1.50@50 + - 0.51@51,1.51@51 + - 0.52@52,1.52@52 + - 0.53@53,1.53@53 + - 0.54@54,1.54@54 + - 0.55@55,1.55@55 + - 0.56@56,1.56@56 + - 0.57@57,1.57@57 + - 0.58@58,1.58@58 + - 0.59@59,1.59@59 + - 0.60@60,1.60@60 + - 0.61@61,1.61@61 + - 0.62@62,1.62@62 + - 0.63@63,1.63@63 + disabled_host_interfaces: + - 0.2@0,1.2@0 + - 0.3@3,1.3@3 + - 0.18@18,1.18@18 + - 0.19@19,1.19@19 + - 0.33@33,1.33@33 + - 0.34@34,1.34@34 + - 0.35@35,1.35@35 + - 0.43@43,1.43@43 + - 0.44@44,1.44@44 + - 0.45@45,1.45@45 + - 0.46@46,1.46@46 + - 0.47@47,1.47@47 + - 0.49@49,1.49@49 + - 0.50@50,1.50@50 + - 0.51@51,1.51@51 + - 0.59@59,1.59@59 + - 0.60@60,1.60@60 + - 0.61@61,1.61@61 + - 0.62@62,1.62@62 + - 0.63@63,1.63@63 + VMs: + ARISTA01T1: + vlans: + - "0.0@64" + - "0.1@65" + - "1.0@66" + - "1.1@67" + vm_offset: 0 + ARISTA02T1: + vlans: + - "0.4@68" + - "0.5@69" + - "1.4@70" + - "1.5@71" + vm_offset: 1 + ARISTA03T1: + vlans: + - "0.16@72" + - "0.17@73" + - "1.16@74" + - "1.17@75" + vm_offset: 2 + ARISTA04T1: + vlans: + - "0.20@76" + - "0.21@77" + - "1.20@78" + - "1.21@79" + vm_offset: 3 + DUT: + loopback: + ipv4: + - 10.1.0.32/32 + - 10.1.0.33/32 + ipv6: + - FC00:1::32/128 + - FC00:1::33/128 + loopback1: + ipv4: + - 10.1.0.34/32 + - 10.1.0.35/32 + ipv6: + - FC00:1::34/128 + - FC00:1::35/128 + loopback2: + ipv4: + - 10.1.0.36/32 + - 10.1.0.36/32 + ipv6: + - FC00:1::36/128 + - FC00:1::36/128 + loopback3: + ipv4: + - 10.1.0.38/32 + - 10.1.0.39/32 + ipv6: + - FC00:1::38/128 + - FC00:1::39/128 + vlan_configs: + default_vlan_config: one_vlan_a + one_vlan_a: + Vlan1000: + id: 1000 + intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] + prefix: 192.168.0.1/21 + prefix_v6: fc02:1000::1/64 + tag: 1000 + mac: 00:aa:bb:cc:dd:ee + two_vlan_a: + Vlan100: + id: 100 + intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29] + prefix: 192.168.0.1/22 + prefix_v6: fc02:100::1/64 + tag: 100 + Vlan200: + id: 200 + intfs: [30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] + prefix: 192.168.4.1/22 + prefix_v6: fc02:200::1/64 + tag: 200 + four_vlan_a: + Vlan1000: + id: 1000 + intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14] + prefix: 192.168.0.1/23 + prefix_v6: fc02:400::1/64 + tag: 1000 + Vlan2000: + id: 2000 + intfs: [15, 22, 23, 24, 25, 26, 27, 28, 29] + prefix: 192.168.2.1/23 + prefix_v6: fc02:401::1/64 + tag: 2000 + Vlan3000: + id: 3000 + intfs: [30, 31, 32, 36, 37, 38, 39, 40, 41] + prefix: 192.168.4.1/23 + prefix_v6: fc02:402::1/64 + tag: 3000 + Vlan4000: + id: 4000 + intfs: [42, 48, 52, 53, 54, 55, 56, 57, 58] + prefix: 192.168.6.1/23 + prefix_v6: fc02:403::1/64 + tag: 4000 + tunnel_configs: + default_tunnel_config: tunnel_ipinip + tunnel_ipinip: + MuxTunnel0: + type: IPInIP + attach_to: Loopback0 + dscp: uniform + ecn_encap: standard + ecn_decap: copy_from_outer + ttl_mode: pipe + +configuration_properties: + common: + dut_asn: 65100 + dut_type: ToRRouter + swrole: leaf + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65500 + failure_rate: 0 + +configuration: + ARISTA01T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + - 10.0.1.56 + - FC00::1:71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + lacp: 1 + dut_index: 0 + Ethernet2: + lacp: 1 + dut_index: 0 + Ethernet3: + lacp: 2 + dut_index: 1 + Ethernet4: + lacp: 2 + dut_index: 1 + Port-Channel1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + Port-Channel2: + ipv4: 10.0.1.57/31 + ipv6: fc00::1:72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::1d/64 + + ARISTA02T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + - 10.0.1.58 + - FC00::1:75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + lacp: 1 + dut_index: 0 + Ethernet2: + lacp: 1 + dut_index: 0 + Ethernet3: + lacp: 2 + dut_index: 1 + Ethernet4: + lacp: 2 + dut_index: 1 + Port-Channel1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + Port-Channel2: + ipv4: 10.0.1.59/31 + ipv6: fc00::1:76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::1e/64 + + ARISTA03T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + - 10.0.1.60 + - FC00::1:79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + lacp: 1 + dut_index: 0 + Ethernet2: + lacp: 1 + dut_index: 0 + Ethernet3: + lacp: 2 + dut_index: 1 + Ethernet4: + lacp: 2 + dut_index: 1 + Port-Channel1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + Port-Channel2: + ipv4: 10.0.1.61/31 + ipv6: fc00::1:7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::1f/64 + + ARISTA04T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.62 + - FC00::7D + - 10.0.1.62 + - FC00::1:7D + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + lacp: 1 + dut_index: 0 + Ethernet2: + lacp: 1 + dut_index: 0 + Ethernet3: + lacp: 2 + dut_index: 1 + Ethernet4: + lacp: 2 + dut_index: 1 + Port-Channel1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + Port-Channel2: + ipv4: 10.0.1.63/31 + ipv6: fc00::1:7e/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::20/64 diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 52412dde69..7808fd3747 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -57,11 +57,13 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: t0 MIN_WORKER: $(T0_INSTANCE_NUM) MAX_WORKER: $(T0_INSTANCE_NUM) + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" - job: t0_2vlans_elastictest displayName: "kvmtest-t0-2vlans by Elastictest" @@ -69,13 +71,15 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: t0 TEST_SET: t0-2vlans MIN_WORKER: $(T0_2VLANS_INSTANCE_NUM) MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM) DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a" + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" - job: t1_lag_elastictest displayName: "kvmtest-t1-lag by Elastictest" @@ -83,11 +87,13 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: t1-lag MIN_WORKER: $(T1_LAG_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_INSTANCE_NUM) + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" - job: dualtor_elastictest displayName: "kvmtest-dualtor-t0 by Elastictest" @@ -95,12 +101,14 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: dualtor MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM) MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) COMMON_EXTRA_PARAMS: "--disable_loganalyzer " + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" - job: multi_asic_elastictest displayName: "kvmtest-multi-asic-t1-lag by Elastictest" @@ -108,26 +116,15 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: t1-8-lag TEST_SET: multi-asic-t1-lag MIN_WORKER: $(MULTI_ASIC_INSTANCE_NUM) MAX_WORKER: $(MULTI_ASIC_INSTANCE_NUM) NUM_ASIC: 4 - -# - job: wan_elastictest -# displayName: "kvmtest-wan by Elastictest" -# timeoutInMinutes: 240 -# continueOnError: false -# pool: ubuntu-20.04 -# steps: -# - template: .azure-pipelines/run-test-scheduler-template.yml -# parameters: -# TOPOLOGY: wan-pub -# MIN_WORKER: $(WAN_INSTANCE_NUM) -# MAX_WORKER: $(WAN_INSTANCE_NUM) -# COMMON_EXTRA_PARAMS: "--skip_sanity " + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" - job: sonic_t0_elastictest displayName: "kvmtest-t0-sonic by Elastictest" @@ -135,7 +132,7 @@ stages: continueOnError: false pool: ubuntu-20.04 steps: - - template: .azure-pipelines/run-test-scheduler-template.yml + - template: .azure-pipelines/run-test-elastictest-template.yml parameters: TOPOLOGY: t0-64-32 MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) @@ -143,3 +140,18 @@ stages: TEST_SET: t0-sonic COMMON_EXTRA_PARAMS: "--neighbor_type=sonic " VM_TYPE: vsonic + KVM_IMAGE_BRANCH: "master" + MGMT_BRANCH: "master" + +# - job: wan_elastictest +# displayName: "kvmtest-wan by Elastictest" +# timeoutInMinutes: 240 +# continueOnError: false +# pool: ubuntu-20.04 +# steps: +# - template: .azure-pipelines/run-test-elastictest-template.yml +# parameters: +# TOPOLOGY: wan-pub +# MIN_WORKER: $(WAN_INSTANCE_NUM) +# MAX_WORKER: $(WAN_INSTANCE_NUM) +# COMMON_EXTRA_PARAMS: "--skip_sanity " diff --git a/docs/api_wiki/ansible_methods/shell_cmds.md b/docs/api_wiki/ansible_methods/shell_cmds.md index d11373f86e..c002db3fc1 100644 --- a/docs/api_wiki/ansible_methods/shell_cmds.md +++ b/docs/api_wiki/ansible_methods/shell_cmds.md @@ -25,12 +25,16 @@ def test_fun(duthosts, rand_one_dut_hostname): - Reguired: `False` - Type: `Boolean` - Default: `True` +- `timeout` - Specify time limit (in second) for each command. 0 means no limit. + - Reguired: `False` + - Type: `Integer` + - Default: `0` ## Expected Output A dictionary with results from commands run. The dictionary hierarchy is described below, with each indentation describing a sub-dictionary: - `end` - Datetime for when the commands finished running -- `cmds` - the list of commands that were run +- `cmds` - the list of commands that user input. - `start` - Datetime for when the commands started running - `delta` - difference between `start` and `end` - `results` - List of dictionaries, each corresponding to the results for one of the commands run @@ -38,5 +42,7 @@ A dictionary with results from commands run. The dictionary hierarchy is describ - `stderr` - What was printed to stderr (as one string) during execution of command - `stdout_lines` - What was printed to stdout (split by line) during execution of command - `stdout` - What was printed to stdout (as one string) during execution of command - - `cmd` - command that was run + - `cmd` - command that user input. It's what actaully ran if `timeout == 0`. + - `cmd_with_timeout` - command wrapped with `timeout`. It's what actually ran if `timeout != 0`. - `rc` - return code + - `timeout` - time limit (in second) for each command. 0 means no limit. diff --git a/docs/testbed/README.testbed.Keysight.md b/docs/testbed/README.testbed.Keysight.md index 034455e1a3..b8daaa0f27 100644 --- a/docs/testbed/README.testbed.Keysight.md +++ b/docs/testbed/README.testbed.Keysight.md @@ -17,6 +17,8 @@ Based on test need there may be multiple topologies possible as shown below : - Multiple IxNetwork Topology ![](img/multiple-ixnetwork.PNG) +## Virtual Topology +![](img/IxNetwork_Virtual_Topology.png) ## Topology Description ### Ixia Chassis (IxNetwork) @@ -91,3 +93,148 @@ Note : The folders within /opt/container/one/ should to be created with read and ``` 6. Launch IxNetworkWeb using browser `https://container ip` + + +## For Virtual Topology next steps are required +### Deploy Ixia_Virtual_Chassis + +1. Download Ixia_Virtual_Chassis image from: +https://downloads.ixiacom.com/support/downloads_and_updates/public/IxVM/9.30/9.30.0.328/Ixia_Virtual_Chassis_9.30_KVM.qcow2.tar.bz2 +2. Start the VMs: + +Example is for the image located in /vms + ``` + cd /vms + sudo tar xjf Ixia_Virtual_Chassis_9.30_KVM.qcow2.tar.bz2 + virt-install --name IxChassis --memory 16000 --vcpus 8 --disk /vms/Ixia_Virtual_Chassis_9.30_KVM.qcow2,bus=sata --import --os-variant centos7.0 --network bridge=br1,model=virtio --noautoconsole + + ``` +3. If a dhcp server is present we can observe the IP assigned +``` + Welcome to Ixia Virtual Chassis + CentOS Linux 7 + Kernel 3.10 on x86_64 + Management IPv4: 10.36.78.217/22 + IxOS Version: 9.30.3001.12 + IxNetwork Protocol Version: 9.30.2212.1 +``` +Note: If the Ixia Virtual Chassis dont take the ip from DHCP server this solutions might help you: +- Disable firewall +``` +sudo ufw disable +``` +- Instead of command in step 2 +``` +virt-install --name IxChassis --memory 16000 --vcpus 8 --disk /vms/Ixia_Virtual_Chassis_9.30_KVM.qcow2,bus=sata --import --os-variant centos7.0 --network bridge=br1,model=virtio --noautoconsole +``` +Try to use this +``` +virt-install --name IxChassis --memory 16000 --vcpus 8 --disk /vms/Ixia_Virtual_Chassis_9.30_KVM.qcow2,bus=sata --import --osinfo detect=on,require=off --network bridge=br1,model=virtio --noautoconsole +``` + +### Deploy two Ixia Virtual Load Module +#### Prerequisite +1. For PCI forwarding the SR-IOV and IOMMU must be enabled in BIOS +2. In ubuntu server the file /etc/default/grub must be edited. Add the arguments "intel_iommu=on iommu=pt" for the GRUB_CMDLINE_LINUX_DEFAULT line +``` +GRUB_CMDLINE_LINUX_DEFAULT="quiet splash intel_iommu=on iommu=pt" +``` +Example of file: +``` +GRUB_DEFAULT=0 +GRUB_TIMEOUT_STYLE=hidden +GRUB_TIMEOUT=0 +GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian` +GRUB_CMDLINE_LINUX_DEFAULT="quiet splash intel_iommu=on iommu=pt" +GRUB_CMDLINE_LINUX="" +``` + +#### Identify the PCI device designated for passthrough to the Load Modules +1. Get the pci number of the device designated for passthrough +``` +lspci | grep Ethernet +``` +Output example +``` +04:00.0 Ethernet controller: Intel Corporation I210 Gigabit Network Connection (rev 03) +05:00.0 Ethernet controller: Intel Corporation I210 Gigabit Network Connection (rev 03) +21:00.0 Ethernet controller: Mellanox Technologies MT27700 Family [ConnectX-4] +21:00.1 Ethernet controller: Mellanox Technologies MT27700 Family [ConnectX-4] +``` +So in this case the device designated for passthrough to the Load Modules are: + +21:00.0 for Load Module 1 (virt-install require the different syntax 21:00.0 -> pci_0000_21_00_0) + +21:00.1 for Load Module 2 (virt-install require the different syntax 21:00.1 -> pci_0000_21_00_1) + + +#### Load Module 1 +1. Download Ixia_Load_Module image from: + https://downloads.ixiacom.com/support/downloads_and_updates/public/IxVM/9.30/9.30.0.328/Ixia_Virtual_Load_Module_IXN_9.30_KVM.qcow2.tar.bz2 +3. Start the VMs: + +Example is for the image located in /vms +``` +cd /vms +sudo tar xjf Ixia_Virtual_Load_Module_IXN_9.30_KVM.qcow2.tar.bz2 +mv Ixia_Virtual_Load_Module_IXN_9.30_KVM.qcow2 IxLM1.qcow2 + +sudo virt-install --name IxLM1 \ +--ram 4096 \ +--vcpus 4 \ +--network bridge=br1,model=virtio \ +--host-device=pci_0000_21_00_0 \ #Change the pci_0000_21_00_0 to yours from "Identify the PCI device designated for passthrough to the Load Modules" section +--serial pty \ +--serial unix,path=/tmp/Virtual_Load_Module_1 \ +--disk path=/vms/IxLM1.qcow2,device=disk,bus=sata,format=qcow2 \ +--channel unix,target_type=virtio,name=org.qemu.guest_agent.0 \ +--boot hd \ +--vnc \ +--noautoconsole \ +--osinfo detect=on,require=off \ +--force + +``` +3. If a dhcp server is present we can observe the IP assigned +``` +Welcome to Ixia Virtual Load Module +CentOS Linux 7 +Kernel 3.10 on x86_64 +Management IPv4: 10.36.78.31/22 +IxOS Version: 9.30.3001.12 +IxVM Status: Active: activating (start) since Fri 2023-06-16 13:54:35 PDT; 1s ago +``` + +#### Load Module 2 +1. Start the VMs: + +Example is for the image located in /vms +``` +cd /vms +sudo tar xjf Ixia_Virtual_Load_Module_IXN_9.30_KVM.qcow2.tar.bz2 +mv Ixia_Virtual_Load_Module_IXN_9.30_KVM.qcow2 IxLM2.qcow2 + +sudo virt-install --name IxLM2 \ +--ram 4096 \ +--vcpus 4 \ +--network bridge=br1,model=virtio \ +--host-device=pci_0000_21_00_1 \ #Change the pci_0000_21_00_1 to yours from "Identify the PCI device designated for passthrough to the Load Modules" section +--serial pty \ +--serial unix,path=/tmp/Virtual_Load_Module_2 \ +--disk path=/vms/IxLM2.qcow2,device=disk,bus=sata,format=qcow2 \ +--channel unix,target_type=virtio,name=org.qemu.guest_agent.0 \ +--boot hd \ +--vnc \ +--noautoconsole \ +--osinfo detect=on,require=off \ +--force +``` +2. If a dhcp server is present we can observe the IP assigned +``` +Welcome to Ixia Virtual Load Module +CentOS Linux 7 +Kernel 3.10 on x86_64 +Management IPv4: 10.36.78.219/22 +IxOS Version: 9.30.3001.12 +IxVM Status: Active: activating (start) since Fri 2023-06-16 16:42:40 PDT; 1s ago +``` diff --git a/docs/testbed/README.testbed.Setup.md b/docs/testbed/README.testbed.Setup.md index 354e93d831..7452407cb4 100644 --- a/docs/testbed/README.testbed.Setup.md +++ b/docs/testbed/README.testbed.Setup.md @@ -56,6 +56,11 @@ This document describes the steps to setup the testbed and deploy a topology. - reboot - at minimum terminate ssh connection or log out and log back in - this is needed for the permissions to be update, otherwise next step will fail + + - Disable firewall (optional) + ``` + sudo ufw disable + ``` ## Download an cEOS VM image We use EOS-based VMs or SONiC VMs to simulate neighboring devices in both virtual and physical testbeds. You can use vEOS or SONiC image as neighbor devices, this method can be found in [vEOS (KVM-based) image](https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md#option-1-veos-kvm-based-image) and [SONiC image](https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md#option-3-use-sonic-image-as-neighboring-devices). But for the physical testbed, we recommend using cEOS for **its less consumption of both memory and interaction with the kernel**. To achieve the use of cEOS as neighbor devices, we need to do serveral steps. 1. Pull debian jessie @@ -190,8 +195,8 @@ Managing the testbed and running tests requires various dependencies to be insta 5. Create a `docker-sonic-mgmt` container. Note that you must mount your clone of `sonic-mgmt` inside the container to access the deployment and testing scripts: ``` docker load < docker-sonic-mgmt.gz - docker run -v $PWD:/data -it docker-sonic-mgmt bash - cd /data/sonic-mgmt + docker run -v $PWD:/var/AzDevOps -it docker-sonic-mgmt bash + cd /var/AzDevOps/sonic-mgmt ``` **NOTE: From this point on, all steps are ran inside the `docker-sonic-mgmt` container.** diff --git a/docs/testbed/img/IxNetwork_Virtual_Topology.png b/docs/testbed/img/IxNetwork_Virtual_Topology.png new file mode 100644 index 0000000000..0bf14ff7b7 Binary files /dev/null and b/docs/testbed/img/IxNetwork_Virtual_Topology.png differ diff --git a/spytest/Doc/intro.md b/spytest/Doc/intro.md index fbb6c25a72..2b00d19134 100755 --- a/spytest/Doc/intro.md +++ b/spytest/Doc/intro.md @@ -1,65 +1,183 @@ -Overview -======== -The SPyTest is test automation for validating SONiC. It is based on **PyTest** and is developed leveraging open source Python packages available for device access/interaction and CLI output parsing. + -SPyTest constitutes of following components. +- [Revision](#revision) +- [Scope](#scope) +- [Definitions/Abbreviations](#definitionsabbreviations) +- [Overview](#overview) + - [Framework](#framework) + - [TGen APIs](#tgen-apis) + - [Feature APIs](#feature-apis) + - [Utility APIs](#utility-apis) + - [TextFSM Templates](#textfsm-templates) + - [Test Scripts](#test-scripts) + - [Packaging](#packaging) + - [Testbed](#testbed) + - [Sample topology](#sample-topology) +- [Traffic Generation](#traffic-generation) + - [Ixia](#ixia) + - [Spirent](#spirent) + - [HLTAPI](#hltapi) + - [Scapy](#scapy) +- [Execution Modes](#execution-modes) + - [PTF Mode](#ptf-mode) + - [Standalone Mode](#standalone-mode) + - [Virtual SONiC](#virtual-sonic) +- [Environment](#environment) + - [PTF](#ptf) + - [Standalone](#standalone) +- [Test Execution](#test-execution) + - [Running test script(s)](#running-test-scripts) + - [Running tests using PyTest marker](#running-tests-using-pytest-marker) + - [Running tests using suite name](#running-tests-using-suite-name) + - [Execution Results and Logs](#execution-results-and-logs) + - [Command line arguments](#command-line-arguments) +- [Log Files](#log-files) +- [Dashboard](#dashboard) +- [Internals](#internals) + - [Init sequence](#init-sequence) + - [Base Configuration](#base-configuration) + - [Module Configuration](#module-configuration) + - [Customize Error Patterns](#customize-error-patterns) + - [Syslog Error Patterns](#syslog-error-patterns) + - [Batch Processing](#batch-processing) + - [Static Analysis](#static-analysis) +- [Test Suites](#test-suites) - * Framework - * TGen API - * Feature API - * Utility API - * TextFSM Templates - * Test Scripts + -Please read [ROOT] as [git-repo-clone]/spytest in this document. +## Revision -##### Framework -Please refer to [ROOT]/spytest/infra.py for list of functions. +Rev | RevDate | Author(s) | Change Description +---- | ---------- | -------------------------- | ------------------ +v1.00 | Apr 14, 2020 | Ram Sasthri, Kristipati | Initial Version +v2.00 | May 01, 2023 | Ram Sasthri, Kristipati | Added Table of Contents +v2.10 | Jun 01, 2023 | Ram Sasthri, Kristipati | Updates +v2.11 | Jun 08, 2023 | Ram Sasthri, Kristipati | Added test Suites Section +v2.12 | Jun 09, 2023 | Ram Sasthri, Kristipati | Added syslog patterns -These functions are expected to be called from feature API and they abstract the device interaction and other common operations like below. +## Scope -* Error Pattern Detection and Result Classification -* Crash Detection and Recovery -* Power cycle operations using Remote Power Supply (RPS) +This document describes the details for SPyTest Framework. -##### TGen API +## Definitions/Abbreviations -The SPyTest uses **HLTAPI** to interface commercial traffic generators (TGen) like Ixia and Spirent. -The same API are implemented using Scapy to generate traffic in PTF environment. More details are in **Traffic Generation** section of this document. +* **TGEN** Traffic Generator +* **PyTest** Open Source general purpose automation [framework](https://github.com/pytest-dev/pytest) +* **SCAPY** Scapy is a powerful Python-based interactive packet manipulation program and [library](https://github.com/secdev/scapy) +* **[ROOT]** Refers to [git-repo-clone]/spytest in this document -##### Feature API -Please refer to [ROOT]/apis/***/*.py for list of functions. +## Overview -These functions are expected to be called from test scripts and they abstract the UI and version differences. +The SPyTest is a test automation framework designed to validate SONiC. It utilizes PyTest as its foundation and leverages various open-source Python packages for tasks such as device access, CLI output parsing and traffic generation. -##### Utility API -Please refer to [ROOT]/utilities/*.py for list of functions. +The components of SPyTest include: -These functions provide commonly used utility functions to avoid code duplication. -As a guideline, we should avoid adding DUT specific functions in utilities. +* Framework: This forms the core of the automation framework, providing the necessary infrastructure and functionalities to author test scripts, execute and generate test reports. -##### TextFSM Templates +* TGen APIs: Traffic Generator APIs enables the generation and control of network traffic for testing purposes. It allows users to configure and manipulate traffic patterns, perform packet-level operations, and measure network performance. -The SPyTest uses SONiC CLI to interface with DUTs (Devices under Test). -It uses **Netmiko** library to execute the commands on telnet/ssh connections. -The CLI output is parsed using **TextFSM** templates to convert into Python dictionary for further processing. +* Feature APIs: The Feature APIs provides a set of functions and methods that allow testers to interact with specific features and functionalities of SONiC. This component simplifies the testing process by providing a higher-level abstraction layer for validating individual features. -The framework API applies TextFSM templates for show command output and returns the parsed output to the caller. The templates need to be added at [ROOT]/templates and update the index file in the same directory. Sample TextFSM templates are available for large number of commands in github [ntc-templates](https://github.com/networktocode/ntc-templates) +* Utility APIs: The Utility APIs offers a collection of utility functions that assist in various testing operations. -Please refer to the [TEXTFSM](https://github.com/google/textfsm/wiki/TextFSM) document on github. +* TextFSM Templates: TextFSM is a powerful framework for parsing and extracting structured data from unstructured text outputs, such as command line outputs. SPyTest utilizes TextFSM templates, which define the patterns and rules for extracting relevant information from the CLI outputs of devices under test. -##### Test Scripts +* Test Scripts: Test scripts are the actual test cases written using the SPyTest framework. These scripts combine the functionalities provided by the aforementioned components to define the test scenarios and validate the behavior of SONiC. -Test Script (also referred as module) is a logical collection of discrete test functions, grouped together based on functionality. It contains one or more test functions each verifies one or more test cases. +### Framework + + Please refer to [ROOT]/spytest/infra.py for list of functions. + +These functions are designed to be called from the feature API, providing a higher-level abstraction for device interaction and handling common operations. They abstract various tasks, including: + +* Logging: In the SPyTest framework, logging functions are provided to ensure a consistent logging mechanism for users. These functions allow users to generate log messages at various levels of severity, including INFO, DEBUG, WARNING, and ERROR. This allows for detailed information about the test execution flow and any potential issues or errors encountered during the testing process. The logging functionality in SPyTest covers different aspects of logging. It includes the generation of overall log files that capture the execution details of the entire test suite or test run. Additionally, SPyTest provides per device log files, which contain specific logging information related to individual devices involved in the testing. This helps in isolating and analyzing device-specific issues or behaviors. Furthermore, SPyTest supports per module log files, which capture log messages specific to different modules or components being tested. This allows users to focus on the logs relevant to a particular module, aiding in the identification of issues or errors within that specific module. + +* Error Pattern Detection and Result Classification: These functions are responsible for detecting error patterns in the output of device interactions and classifying the results accordingly. By analyzing the device responses, they can identify specific error conditions or anomalies, allowing for appropriate actions or reporting. + +* Crash Detection and Recovery: These functions are aimed at detecting crashes or abnormal behavior in the device. They monitor the device state and log files to identify signs of a crash, such as system reboots or error messages. Upon detection, they can trigger recovery mechanisms or initiate further investigation to mitigate the impact of the crash. + +* Power Cycle Operations using Remote Power Supply (RPS): These functions facilitate power cycle operations on the device using a Remote Power Supply (RPS). They provide an interface to remotely control the power supply unit connected to the device. This enables the automation of power cycling operations, allowing for scenarios such as device reboot or troubleshooting power-related issues. + +Feature APIs can leverage these functions to facilitate device interactions and manage common operations without the need to directly address low-level implementation details. As a result, feature APIs become modular, reusable, and more easily maintainable. + +### TGen APIs + +The SPyTest framework utilizes HLTAPI (High-Level Traffic Application Programming Interface) to establish an interface with commercial traffic generators such as Ixia and Spirent, specifically for traffic generation purposes. By leveraging HLTAPI, SPyTest can seamlessly interact with these traffic generators, enabling the configuration and control of network traffic for testing scenarios. + +Additionally, SPyTest also provides an alternative implementation of the same API using SCAPY, a powerful packet manipulation library in Python. This SCAPY-based implementation is specifically designed to generate traffic within the PTF (Packet Test Framework) environment and is also applicable for testing virtual SONiC. + +The "Traffic Generation" section of the document provides more details about how traffic generation is handled within the SPyTest framework. + +By offering support for both HLTAPI and Scapy, SPyTest provides flexibility in choosing the appropriate traffic generation method based on the testing environment and the availability of commercial traffic generators or the need for custom traffic generation using Scapy. + +### Feature APIs + + Please refer to [ROOT]/apis/***/*.py for list of functions. + +These functions are designed to be called from test scripts, providing an abstraction layer for handling user interface (UI) interactions and managing version differences. They abstract various tasks, including: + +* UI Interaction: These functions handle interactions with the user interface of the system or application being tested. They provide a simplified and standardized way to interact with UI elements such as buttons, menus, forms, and dialogs. By abstracting the UI interactions, these functions make it easier to write test scripts that are not dependent on specific UI implementation details. + +* Version Differences: These functions handle version differences in the system or application being tested. They provide a mechanism to identify the current version of the software and adapt the test script's behavior accordingly. This allows for conditional execution of specific test steps or variations in test logic based on the software version. By abstracting version differences, these functions ensure test scripts can be more flexible and compatible across different software versions. + +The abstraction provided by these functions helps in creating more maintainable and reusable test scripts. Test scripts can focus on the test logic and flow, while relying on these functions to handle the intricacies of UI interactions and version-specific variations. This abstraction layer reduces the effort required to update test scripts when UI elements change or when working with different software versions, leading to more efficient and robust test automation. + +### Utility APIs + + Please refer to [ROOT]/utilities/*.py for list of functions. + +The Utility APIs in the SPyTest framework consists of various utility functions that are commonly used and aim to prevent code duplication. These utility functions can be found in the [ROOT]/utilities/*.py files. + +The purpose of these utility functions is to provide a centralized and reusable set of functionalities that can be utilized across different test scripts. By encapsulating commonly used operations in utility functions, it avoids duplicating code and promotes cleaner and more efficient test script development. + +It's important to note that when adding functions to the Utility API, it is recommended to avoid including Device Under Test (DUT) specific functions. The Utility API should focus on providing general-purpose utilities that can be utilized across different devices or test scenarios. This ensures that the utility functions remain versatile and can be used in various testing environments without being tightly coupled to specific DUTs. + +### TextFSM Templates + +The SPyTest framework utilizes the SONiC Command Line Interface (CLI) to interact with the Devices Under Test (DUTs). It leverages the Netmiko library, which provides a unified interface for executing commands on telnet or SSH connections to the DUTs. This allows SPyTest to establish a connection with the DUTs and execute CLI commands programmatically. + +To process the CLI output and extract structured data, SPyTest employs TextFSM (Text File Stream Model) templates. These templates define patterns and rules for parsing the CLI output and converting it into a structured format, typically a Python dictionary. By using TextFSM, SPyTest can convert unstructured CLI output into a more manageable and machine-readable format, facilitating further processing and analysis. + +To incorporate TextFSM templates into the SPyTest framework, the templates should be added to the [ROOT]/templates directory. Additionally, the index file in the same directory needs to be updated to include the newly added templates. This allows the framework to locate and utilize the appropriate template for parsing specific CLI commands. + +Sample TextFSM templates for a wide range of commands can be found in the GitHub repository called [ntc-templates.](https://github.com/networktocode/ntc-templates) These templates can serve as a reference or starting point for creating or customizing templates within the SPyTest framework. + +For more detailed information about TextFSM and its usage, you can refer to the [TEXTFSM](https://github.com/google/textfsm/wiki/TextFSM) documentation on GitHub. This documentation provides insights into the features, syntax, and functionality of TextFSM, helping users understand how to create and modify templates effectively within the SPyTest framework. + +### Test Scripts + +A Test Script, also known as a module, is a logical collection of discrete test functions that are grouped together based on functionality within the SPyTest framework. It serves as a container for organizing and executing individual test functions, each responsible for verifying one or more test cases. + +In SPyTest, a Test Script typically consists of multiple test functions, each dedicated to validating specific aspects of the system or application being tested. These test functions encapsulate the necessary logic, assertions, and steps to execute the test cases and verify the expected behavior. + +By grouping related test functions within a Test Script, it becomes easier to manage and organize the test suite. Test Scripts can be structured based on functional areas, specific features, or any other logical grouping that aligns with the testing requirements. This allows for better modularity, reusability, and maintainability of the test codebase. + +Test Scripts serve as an entry point for executing the associated test functions. They can be executed individually or as part of a larger test suite. The framework provides mechanisms for running specific Test Scripts or executing all available Test Scripts, depending on the testing needs. + +### Packaging + +SPyTest expands its functionality by integrating open-source packages, which include libraries, frameworks, and tools offering additional features and capabilities that are necessary for SPyTest. ![Image](arch.jpg "icon") -Testbed -======= -The testbed file specifies the topology information as described below and it is mandatory input to SPyTest run. +### Testbed + +The testbed file is an essential input required to run SPyTest. It specifies the topology information necessary for the test execution. The testbed file serves as a configuration file that describes the devices, their connections, and various attributes related to the test environment. + +Typically, the testbed file is written in a structured format, such as YAML or JSON, and contains the following information: + +* Device Information: The testbed file provides details about the devices involved in the test. This includes information such as the device name, IP address, management interface, and authentication credentials required to establish a connection. + +* Connection Details: It specifies the connections between devices in the topology. This information outlines the connectivity between devices and the network interfaces used for communication. It defines how devices are interconnected, enabling SPyTest to understand the network topology and perform tests accordingly. + +* Additional Attributes: The testbed file may include additional attributes or metadata associated with each device. These attributes can include information vendor-specific details, configuration parameters, or any other relevant information needed for test scripts. + +By providing the topology information through the testbed file, SPyTest gains a clear understanding of the test environment, allowing it to establish connections to devices, execute test functions, and validate the desired behavior across the network. + +It is important to ensure the accuracy and completeness of the testbed file, as any inaccuracies or missing information may lead to test execution issues or incorrect results. -#### Sample topology +### Sample topology ![Image](topo.png "icon") @@ -92,13 +210,7 @@ testbed file content for this topology is given below. rps: {model: Raritan, ip: 1.2.3.5, outlet: 11, username: admin, password: admin} ixia-01: device_type: TGEN - properties: {type: ixia, version: "8.42", ip: 1.2.3.6, ix_server: 1.2.3.7} - stc-01: - device_type: TGEN - properties: {type: stc, version: 4.91, ip: 1.2.3.8} - scapy-01: - device_type: TGEN - properties: {type: scapy, version: 1.0, ip: 1.2.3.8} + properties: {type: ixia, version: "9.31", ip: 1.2.3.6, ix_server: 1.2.3.7} topology: DUT-01: @@ -142,123 +254,63 @@ Each child of of this node represents single device, which can be DUT or TGen as The details of DUT attributes are as given below: -* **access** - * DUT access details - * **protocol** - * DUT access protocol - * currently supported access protocols [telnet, ssh] - * **ip** - * IP address for telnet/ssh connection to DUT - * Only IPv4 address is currently supported - * **port** - * TCP port for telnet/ssh connection to DUT - -* **credentials** - * DUT access credentials - * **username** - * DUT access username - * **password** - * DUT access password - * **altpassword** - * DUT access alternative password - * This is useful if we need to change the default password on first boot - -* **properties** - * DUT properties - * **config** - * Configuration profile name to be applied before executing test scripts - * The profile details read from yaml section name matching with this name under **configs** - * Refer to testbeds/sonic_configs.yaml for more details - * **build** - * Build profile name to be applied before executing test scripts - * The profile details read from yaml section name matching with this name under **builds** - * Refer to testbeds/sonic_builds.yaml for more details - * **services** - * Services profile name to be used for external services like radius/tacacs - * The profile details read from yaml section name matching with this name under **services** - * Refer to testbeds/sonic_services.yaml for more details - * TODO: add mode details - * **speed** - * Speed profile name to be applied before executing test scripts - * The profile details read from yaml section name matching with this name under **speeds** - * Refer to testbeds/sonic_speeds.yaml for sample - -* **breakout** - * Static port breakout configuration - * This is essentially list of interface-name, breakout-mode pairs. - -* **rps** - * Remote Power Supply (PDU) details - * **model** - * RPS Model - * currently supported models [Raritan, ServerTech, Avocent] - * Telnet protocol to interface with RPS - * **ip** - * IP address of RPS - * Only IPv4 address is currently supported - * **outlet** - * RPS outlet identification - * **username** - * RPS telnet username - * **password** - * RPS telnet password +* **access** DUT access details + * **protocol** DUT access protocol, currently supported access protocols [telnet, ssh] + * **ip** IP address for telnet/ssh connection to DUT, Only IPv4 address is currently supported + * **port** TCP port for telnet/ssh connection to DUT +

+ +* **credentials** DUT access credentials + * **username** DUT access username + * **password** DUT access password + * **altpassword** DUT access alternative password, This is useful if we need to change the default password on first boot + +* **properties** DUT properties + * **config** Configuration profile name to be applied before executing test scripts, the profile details read from yaml section name matching with this name under **configs**, Refer to testbeds/sonic_configs.yaml for more details + * **build** Build profile name to be applied before executing test scripts, The profile details read from yaml section name matching with this name under **builds**, Refer to testbeds/sonic_builds.yaml for more details + * **services** Services profile name to be used for external services like radius/tacacs, The profile details read from yaml section name matching with this name under **services**, Refer to testbeds/sonic_services.yaml for more details + * **speed** Speed profile name to be applied before executing test scripts, The profile details read from yaml section name matching with this name under **speeds**, Refer to testbeds/sonic_speeds.yaml for sample + +* **breakout** port breakout configuration, This is essentially list of interface-name, breakout-mode pairs. + +* **rps** Remote Power Supply (PDU) details + * **model** RPS Model, currently supported models [Raritan, ServerTech, Avocent], Telnet protocol to interface with RPS + * **ip** IP address of RPS, Only IPv4 address is currently supported + * **outlet** RPS outlet identification + * **username** RPS telnet username + * **password** RPS telnet password The details of TGen attributes are as given below: -* **properties** - * TGen properties - * **type** - * Traffic Generator Type - * Currently supported TGen types [ixia, stc, scapy] - * **version** - * Traffic Generator version - * Supported versions: - * ixia 8.42 to 9.20u2 - * stc 4.91 - * scapy 1.0 [scapy TGEN version is just a place holder and not used] - * **ip** - * Traffic Generator chassis IP address - * Only IPv4 address is currently supported - * **ix_server** - * This is only applicable for Ixia - * This is IxNetwork Server IP Address - * Only IPv4 address is currently supported - * TODO: Add section on IxNetwork +* **properties** TGen properties + * **type** Traffic Generator Type, Currently supported TGen types [ixia, stc, scapy] + * **version** Traffic Generator version. Supported versions are ixia 8.42 to 9.31, stc 4.91 and scapy 1.0 [scapy TGEN version is just a place holder and not used] + * **ip** Traffic Generator chassis IP address, Only IPv4 address is currently supported + * **ix_server** This is only applicable for Ixia and it should point to IxNetwork Server IP Address, Only IPv4 address is currently supported. The **topology** section gives interconnect details between DUTs as well as interconnect between each device with TGen. Each child of of this node represents a topology element and should be a DUT name from **devices** section. The interconnections are specified in **interfaces** child of each topology element. Each connected interface will have **EndDevice** and **EndPort** attributes representing the partner and its link. - -Traffic Generation -================== +## Traffic Generation ![Image](tgen.jpg "icon") -The SPyTest supports Ixia and Spirent third party traffic generators, which provide client libraries to talk to hardware. +SPyTest supports the integration of Ixia and Spirent as third-party traffic generators. These traffic generators provide client libraries that enable communication with the underlying hardware. -##### Ixia +### Ixia -* Ixia is supported in the IxNetwork Server mode -* User should use an intermediate server to host the IxNetwork Server -* Start IxNetwork API server in the server where the IxNetwork Server is installed -* The IxNetwork Server IP address needs to be given in setup file as "ix_server" -* Currently SPyTest is verified with below IxNetwork versions: - * 8.42 to 9.20u2 -* There are differences in the way to install and launch these versions. - Please consult the Ixia documentation for more details. +For Ixia, SPyTest supports the IxNetwork Server mode. To utilize this mode, users need to set up an intermediate server to host the IxNetwork Server. The IxNetwork API server should be started on the server where the IxNetwork Server is installed. In the setup file, the IP address of the IxNetwork Server should be specified as "ix_server". SPyTest has been verified with IxNetwork versions ranging from 8.42 to 9.31. However, please note that there may be differences in the installation and launch procedures for different versions, so it is advisable to consult the Ixia documentation for more detailed instructions. -##### Spirent +### Spirent -* Spirent is supported in Spirent Testcenter client mode -* Currently SPyTest is verified with below Spirent versions - * 4.91 +For Spirent, SPyTest supports the Spirent Testcenter client mode. SPyTest has been verified with Spirent versions 4.91. -##### API +### HLTAPI All the HLTAPIs are exposed as wrapper functions in the format "tg_[HLTAPI]". There are few differences between Ixia and Spirent which are handled in these wrapper functions. As and when any new differences are identified, we should be able to add them easily in these wrapper functions. Users can refer to either the Ixia or Spirent HLTAPI reference guides and invoke the tg_[HLTAPI]. -##### Scapy +### Scapy ![Image](ptf.jpg "icon") @@ -290,28 +342,31 @@ Users can refer to either the Ixia or Spirent HLTAPI reference guides and invoke * Currently Not supported fully * Only basic BGP neighborship is unit tested -Execution Modes -=============== +## Execution Modes The SPyTest supports executing tests in standalone environment and PTF environment. -#### PTF Mode +### PTF Mode Refer to [README.testbed.md](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/README.testbed.md) for setting up PTF-32 or PTF-64 topology. -#### Standalone Mode +### Standalone Mode In standalone mode, the DUTs can be connected to each other and TGen. -Environment - PTF Mode -============================== +### Virtual SONiC + +Refer to [VSNet](https://github.com/ramakristipati/sonic-mgmt/blob/vsnet/spytest/Doc/vsnet.md) for creating virtual SONiC network. Once topology is created VSNet provides commandline options to execute the SPyTest tests. + +## Environment + +### PTF Refer to [README.testbed.Overview.md](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/doc/README.testbed.Overview.md) for setting up PTF environment details. -Environment - Standalone Mode -============================== +### Standalone -SPyTest currently supports Python2 and pip. The needed packages can be installed using +SPyTest currently supports Python3. The needed packages can be installed using [ROOT]/bin/upgrade_requirements.sh @@ -329,24 +384,23 @@ The traffic generator libraries installation should look similar to content in b *Please refer to install.md for more details on installation* -Executing Tests -=============== +## Test Execution First step is to create the testbed file with physical connection details. -#### Running test script(s) +### Running test script(s) [ROOT]/bin/spytest --testbed testbed_file.yaml \ [ROOT]/tests/sanity/test_sanity_l2.py \ [ROOT]/tests/sanity/test_sanity_l3.py \ --logs-path -#### Running tests using PyTest marker +### Running tests using PyTest marker [ROOT]/bin/spytest --testbed testbed_file.yaml \ -m community_pass --logs-path -#### Running tests using suite name +### Running tests using suite name [ROOT]/bin/spytest --testbed testbed_file.yaml \ --test-suite --logs-path @@ -354,7 +408,7 @@ First step is to create the testbed file with physical connection details. The test suite files are expected to be present in [ROOT]/reporting/suites folder. *Please refer to community-ptf for example suite definition. -#### Execution Results and Logs +### Execution Results and Logs The results are stored in a CSV file with the date (YYYY-MM-DD) and time (HH-MM-SS) included in the file name e.g. results_2020_04_04_15_27_result.csv @@ -362,177 +416,184 @@ e.g. results_2020_04_04_15_27_result.csv The log messages are stored in a log file with the date (YYYY-MM-DD) and time (HH-MM-SS) included in the file name e.g. results_2020_04_04_15_27_logs.log -#### Command line arguments - -The following custom command line options are added to SPyTest in addition to existing PyTest [options](https://docs.pytest.org/en/latest/) - -* --testbed-file=[file path] - * testbed file path -- default: ./testbed.yaml -* --tclist-file=[file path] - * file with test function names -* --tclist-csv=[csv] - * comma separated list of test functions -* --logs-path=[logs folder path] - * logs folder -- default: [current directory] -* --email=EMAIL - * Email address(es) to send report to -* --port-defaults={breakout,speed,both,none} - * set port defaults -- default: none -* --load-image={installer,onie,none} - * Loading image before and after execution using specified method -- default: onie -* --memory-check={test,module,none} - * read memory usage default: none - * The setting determines when the logs need to be collected - * none - never - * test - at the end of test function - * module - at the end of module -* --syslog-check={emerg,alert,crit,err,warning,notice,info,debug,none} - * read syslog messages of given level at the end of every module. default: err -* --save-sairedis={none,test,module} - * Fetch the sairedis logs from DUT to logs location - * The setting determines when the logs need to be collected - * none - never - * test - at the end of test function - * module - at the end of module -* --port-init-wait=PORT_INIT_WAIT - * Wait time in seconds for ports to come up after clearing configurationn -- default: 300 -* --fetch-core-files={always,onfail,none,onerror,session,onfail-epilog,module-always,module-onfail,module-onerror} - * Fetch the core files from DUT to logs location -- default: session - * The setting determines when the core files need to be collected - * always - at the end of every test function - * onfail - at the end of every failed test function - * none - never - * onerror - at the end of every test function if it is failed with errors as given in sonic_errors.yaml - * session - at the end of entire run - * module-always - at the end of every module - * module-onfail - at the end of every module if it has at least one test function is failed - * module-onerror - at the end of every module if it has at least one test function is failed with errors as given in sonic_errors.yaml -* --get-tech-support={always,onfail,none,onerror,session,onfail-epilog,module-always,module-onfail,module-onerror} - * Get the tech-support information from DUT to logs location -- default: onfail-epilog - * The setting determines when the tech support need to be collected - * always - at the end of every test function - * onfail - at the end of every failed test function - * onfail-epilog - at the end of every failed test function before cleanup - * none - never - * onerror - at the end of every test function if it is failed with errors as given in sonic_errors.yaml - * session - at the end of entire run - * module-always - at the end of every module - * module-onfail - at the end of every module if it has at least one test function is failed - * module-onerror - at the end of every module if it has at least one test function is failed with errors as given in sonic_errors.yaml -* --tc-max-timeout=TC_MAX_TIMEOUT - * Max time that a testcase can take to execute -- default: 600 -* --module-init-max-timeout=MODULE_INIT_MAX_TIMEOUT - * Max time that a module initialization can take to execute -- default: 1200 -* --random-order={0,1} - * Enable executing tests in random order -- default: 1 -* --community-build={none,master,201911} - * Community build support -- default: none - -Log Files -========= -List of logs files generated are as given below where [PREFIX] = "results_%Y_%m_%d_%H_%M_%S" - -* [PREFIX]_dlog-[DUTID]-[DUTNAME].log - * This contains per DUT log, where DUTID is D1,D2 etc and DUTNAME is as given in testbed file - * One file will be generated for each DUT in the testbed file -* [PREFIX]_logs.log - * This is consolidate log for all the entire run -* [PREFIX]_stdout.log - * This is same as [PREFIX]_logs.log except that any stdout/stderr messages from SPyTest and dependent libraries also get logged -* [PREFIX]_summary.txt - * This contains final summary of run with how many tests executed, time taken, pass rate etc. -* [PREFIX]_functions.csv - * This contains result of each test function executed in the run - * It also contain result, description, time taken etc. -* [PREFIX]_functions.html - * This is same as [PREFIX]_functions.csv in HTML table for readily viewing in browser. -* [PREFIX]_testcases.csv - * This contains result of each test case executed in the run - * As mentioned in the beginning each test function may have one or more test cases - * It also contain result, description, time taken etc. -* [PREFIX]_testcases.html - * This is same as [PREFIX]_testcases.csv in HTML table for readily viewing in browser. -* [PREFIX]_modules.csv - * This contains result counts (number of test functions) in various categories per test module and the time taken for each module -* [PREFIX]_modules.html - * This is same as [PREFIX]_modules.csv in HTML table for readily viewing in browser. -* [PREFIX]_features.csv - * This contains result counts (number of test cases) in various categories per test component and the time taken for each - * Please refer to [ROOT]/reporting/tcmap.csv for test cases association to components - * Example component names: Regression, NAT -* [PREFIX]_features.html - * This is same as [PREFIX]_features.csv in HTML table for readily viewing in browser. -* [PREFIX]_stats.txt - * This contains statistics on time spent in each CLI command and TGen operation for each module -* [PREFIX]_stats.csv - * This contains statistics on total time spent in CLI and TGen for each module -* [PREFIX]_stats.html - * This is same as [PREFIX]_stats.csv in HTML table for readily viewing in browser. -* [PREFIX]_syslog.csv - * This contains syslog messages collected on all DUTs in each test module - * Please check for --syslog-check command line option to configure the severity of messages to be collected and frequency -* [PREFIX]_syslog.html - * This is same as [PREFIX]_syslog.csv in HTML table for readily viewing in browser. -* [PREFIX]_mlog_[module].log - * This is same as [PREFIX]_logs.log but per module -* [PREFIX]_tgen - * This contains TGen specific debug logs - -Dashboard -=============== -The dashboard.html contains links to various files generated - -Internals -=============== +### Command line arguments + +To obtain a comprehensive list of command line options available in SPyTest, you can use the following command: + + [ROOT]/bin/spytest --help + +Executing this command will display all the command line options along with their associated help strings, which provide explanatory information about each option. These help strings serve as a guide to understand the purpose and functionality of each command line option available in SPyTest. + +## Log Files + +Below is the list of log files generated during SPyTest execution, each file name contain prefixed by [PREFIX] which is a placeholder for the timestamp in the format results_%Y_%m_%d_%H_%M_%S. It is removed below to minimise the redundent information. + +Below is the collection of log files generated during SPyTest execution. Each log file's name is prefixed with [PREFIX], representing the timestamp in the format "results_%Y_%m_%d_%H_%M_%S." In the list provided below, the actual [PREFIX] have been omitted to avoid redundancy. + +* dlog-[DUTID]-[DUTNAME].log: This file contains the per DUT log, where [DUTID] represents the DUT identifier (e.g., D1, D2) and [DUTNAME] is the name specified in the testbed file. One file is generated for each DUT in the testbed file. +* logs.log: This file is a consolidated log for the entire test run. +* stdout.log: Similar to logs.log, this file includes any stdout/stderr messages from SPyTest and its dependent libraries. +* summary.txt: This file provides the final summary of the test run, including the number of executed tests, time taken, pass rate, etc. +* functions.csv: This file contains the result of each executed test function, including the result, description, time taken, etc. +* functions.html: This file is an HTML version of functions.csv for easy viewing in a browser. +* testcases.csv: This file contains the result of each executed test case. Test functions may have one or more test cases associated with them. The file includes the result, description, time taken, etc., for each test case. +* testcases.html: This file is an HTML version of testcases.csv. +* modules.csv: This file contains the result counts (number of test functions) in various categories per test module and the time taken for each module. +* modules.html: This file is an HTML version of modules.csv. +* features.csv: This file contains the result counts (number of test cases) in various categories per test component and the time taken for each. The association between test cases and components can be found in [ROOT]/reporting/tcmap.csv. Example component names include "Regression" and "NAT". +* features.html: This file is an HTML version of features.csv. +* stats.txt: This file contains statistics on the time spent on each CLI command and TGen operation for each module. +* stats.csv: This file contains statistics on the total time spent on CLI and TGen operations for each module. +* stats.html: This file is an HTML version of stats.csv. +* syslog.csv: This file contains syslog messages collected from all DUTs in each test module. The severity of messages collected and their frequency can be configured using the --syslog-check command-line option. +* syslog.html: This file is an HTML version of syslog.csv. +* mlog_[module].log: This file is similar to logs.log but specific to each module. +* tgen: This directory contains TGen specific debug logs. +These log files provide valuable information for analyzing test results, debugging issues, and gaining insights into the test execution process. They offer a comprehensive view of the test run, including detailed logs, summaries, statistics, and associated data. + +## Dashboard + +The dashboard.html file serves as a dashboard or summary report that provides links to various files generated during the SPyTest execution. These files are typically generated as part of the test execution process and contain detailed information about the test results, logs, and other relevant data. + +The dashboard.html file is designed to provide a centralized location for accessing these files and navigating through the generated artifacts. It offers a user-friendly interface that allows users to easily access and review the test results and associated files. + +Some of the files that are commonly linked in the dashboard.html file include: + +Test Results: This could be a detailed test report containing information about the test cases executed, their pass/fail status, and any associated logs or screenshots. +Logs: These can include system logs, device logs, test framework logs, and any other relevant logs generated during the test execution. +Artifacts: Any additional artifacts generated during the test execution, such as captured packets, configuration files, or debug information. +Screenshots: If there are any visual elements or UI testing involved, screenshots may be captured during the test execution and linked in the dashboard for easy access and review. +The dashboard.html file acts as a centralized hub to access and navigate through these files, providing a comprehensive view of the test execution and its associated artifacts. It simplifies the process of reviewing and analyzing the test results and enables users to quickly access the specific files they need for further investigation or reporting purposes. + +By leveraging the dashboard.html file, users can efficiently explore the generated files, access relevant information, and gain insights into the test execution without having to manually locate and open each individual file. + +## Internals + ### Init sequence -SPyTest performs below operations before executing the test modules. +Before executing the test modules, SPyTest performs several operations to ensure the test environment is properly set up. These operations include: + +* Validation of Testbed File: SPyTest validates the specified testbed file to ensure its correctness and completeness. This validation ensures that the required information about devices, connections, and other configuration details are accurately specified in the testbed file. + +* Device and TGen Connection: SPyTest establishes connections to all the devices and Traffic Generators (TGen) specified in the testbed file. This step enables SPyTest to interact with the devices and TGens during test execution. + +* Software Upgrade: If specified in the command line arguments or the testbed build profile, SPyTest upgrades the software on the Devices Under Test (DUTs) to the specified version. This ensures that the test environment is running the desired software version for testing. + +* Configuration Database Cleanup: SPyTest removes all entries from the configuration database (config_db.json), except for "DEVICE_METADATA", "MGMT_INTERFACE" and "PORT" entries. This cleanup operation ensures that the configuration database is in a clean state before applying the test-specific configurations. -* Validate the testbed file specified. -* Connect to all devices and TGen specified in testbed file. -* Upgrade the software on DUTs as specified in command line arguments - or as given in testbed build profile. -* Remove all entries except for "DEVICE_METADATA" and "PORT" entries in config_db.json -* Perform static port breakout as specified in testbed file -* Configure port speeds as given in testbed speed profile -* Apply the configuration as given in the testbed config profile -* Save the resultant configuration as base configuration +* Port Breakout: If specified in the testbed file, SPyTest performs a static port breakout operation. This operation configures the port breakout mode for specific ports, allowing them to be used in different breakout configurations. + +* Port Speed Configuration: SPyTest configures the port speeds according to the specified speed profile in the testbed file. This step ensures that the ports are set to the desired speeds for testing. + +* Base Configuration Saving: After applying the configurations, SPyTest saves the resultant configuration as the base configuration. This serves as a reference point for future test executions and enables comparison to track any configuration changes caused by the tests. + +By performing these operations, SPyTest ensures that the test environment is properly set up and configured according to the test requirements, providing a consistent and controlled testing environment for the execution of test modules. ### Base Configuration -As described in **Init Sequence** section, base configuration is created during the SPyTest initialization. The framework ensures that the system is brought to base configuration before starting any test modules. + +As described in the "Init Sequence" section, the SPyTest framework creates a base configuration during its initialization process. This base configuration serves as a reference point and represents the desired initial state of the system before any test modules are executed. + +The framework ensures that the system is brought to the base configuration state before starting the execution of any test modules. This ensures a consistent starting point for the tests and helps in achieving reliable and reproducible test results. + +The base configuration is typically defined based on the testbed file and any additional configuration profiles or settings specified. It includes configurations related to the network devices, interfaces, protocols, services, and any other relevant aspects of the test environment. + +By ensuring the system is in the base configuration state, SPyTest provides a controlled environment for running test modules. It helps in eliminating any unwanted side effects or residual configurations from previous tests, ensuring that each test module starts from a clean and predictable state. + +This initialization step plays a crucial role in maintaining the integrity and reliability of the test execution process, allowing test modules to focus on specific functionalities or test cases without being influenced by the system's previous state. ### Module Configuration -The test modules are expected to configure the device and TGen in the module prologue and clean it up in module epilogue. Below example shows the way modules register these hooks. + +In SPyTest, test modules are responsible for configuring the device and Traffic Generators (TGen) within their respective module prologue and cleaning up any configurations or resources in the module epilogue. This ensures that the necessary setup and teardown operations are performed before and after each test module execution. @pytest.fixture(scope="module", autouse=True) - def sanity_l2_module_hooks(request): + def module_hooks(request): ########### module prologue ################# yield ########### module epilogue ################# -The framework will ensure to call prologue before any test functions in the module are executed. Similarly epilogue is executed after all test functions are executed. +The module prologue function will be automatically called at the beginning of the module execution, allowing you to perform device and TGen configuration. The module epilogue function is executed after all test functions have been executed within the module, ensuring that any necessary cleanup operations are performed. + +By leveraging these fixture functions with the appropriate scope and settings, SPyTest ensures the orderly execution of the module prologue before any test functions and the module epilogue after all test functions in the module. ### Customize Error Patterns - TODO + +The [ROOT]/testbeds/sonic_errors.yaml contains regular expressions of errors and corresponding actions to be performed when those errors are encountered. This configuration file is used to define error patterns and specify the appropriate action to be taken when those patterns match with the encountered errors. + +### Syslog Error Patterns + +SPyTest collects and consolidates syslog messages after each test. The categorization of syslog messages is based on the information provided in the [ROOT]/reporting/syslogs.yaml file. This file serves as a reference for organizing the messages into different categories. + +The syslog messages are categorized as follows: + +Yellow: Messages in this category are reported only once for every module. They are considered important and are not discarded. + +Green: Messages in this category are silently discarded. They are typically not relevant for test analysis or reporting. + +Red: If a syslog message matches a pattern specified in this category, the corresponding test is marked as failed. The first matched message is treated as a DUTIssue, indicating a potential software issue. + +By leveraging the [ROOT]/reporting/syslogs.yaml file, SPyTest effectively organizes syslog messages, enabling efficient analysis and reporting of test results. + +The introduction of the red and green categories provides a streamlined approach for handling different types of syslog messages. The red category helps automate the identification and analysis of failures related to software issues, while the green category allows for the exclusion of syslog messages that are not pertinent to the analysis or reporting process. + +The initial implementation of these categories is currently empty to align with the existing behavior. However, there is room for future contributions to expand the list of regular expressions and categories. This collaborative effort can enhance the failure analysis capabilities and cover a wider range of scenarios. + +In summary, this approach facilitates the automation of failure analysis, particularly in identifying software-related issues through the red category. Additionally, it aids in eliminating known noise syslog messages, particularly those originating from third-party code. + +In addition to the syslog categorization, SPyTest saves the collected syslog messages from all Device Under Test (DUT) devices in each test module into a syslog.csv file. This file serves as a comprehensive record of the syslog messages and includes information about their severity and frequency. The content of the syslog.csv file can be further customized by configuring the --syslog-check command-line option. + +Furthermore, SPyTest provides a syslog.html file, which is an HTML version of the syslog messages. This file offers a user-friendly and visually appealing representation of the syslog data. It can be opened in a web browser, facilitating easy navigation and exploration of the syslog messages. The syslog.html file presents the syslog categories in a more organized and accessible manner, enhancing the usability of the information. ### Batch Processing - TODO -### Scheduling - TODO +The SPyTest framework is designed to handle the execution of a large number of test modules, which may require various types of topologies based on the features being tested. These topologies can range from single-device setups to more complex configurations involving multiple interconnected devices. To efficiently execute these test modules in parallel, the SPyTest framework employs a batch processing technique. + +The devices used in the testing environment are typically organized into racks or pods, with interconnecting cables. The batch processing capability allows users to divide these pods into smaller topologies known as gateway (gw) nodes. Users can specify the desired number of DUT topologies for each bucket using command line arguments. For example, specifying "-tclist-buckets 1,2,4" indicates the intention to have 4, 2, and 1 DUT topologies respectively. + +Users can further customize the devices used in each bucket by providing the necessary configuration through command line arguments. The configuration can be specified using the "--env SPYTEST_TOPO_4" flag, where "4" represents the bucket number. The selected devices and their interconnections are defined using a specific syntax, with "|" separating groups and "||" indicating multiple groups. + +To determine the topology requirements for each module, the SPyTest framework reads the "modules.csv" file. This information is used to associate the appropriate modules with the corresponding nodes created based on the bucket arguments. + +Once the nodes and module associations are established, the SPyTest framework starts executing the test modules on the higher bucket nodes first. Before dispatching the actual test modules, a framework test module is launched on these nodes to perform necessary initializations such as image upgrades or breakout configurations. + +The framework continues executing the test modules within the current bucket until they are completed. It then progresses to the next lower bucket, repeating the process until all the test cases have been executed. This approach ensures optimal resource utilization and facilitates faster execution of the extensive suite of test modules. ### Static Analysis - [ROOT]/bin/lint.sh script can be used to perform static analysis. - This invokes PyLint with flags to disable some of unsupported options -### Best Practices - TODO +To perform static analysis on the SPyTest codebase, you can utilize the lint.sh script located in the [ROOT]/bin directory. This script uses PyLint, a popular Python static code analyzer, to analyze the code and provide feedback on potential issues and code quality improvements. The lint.sh script includes flags that are specifically configured to disable certain unsupported options and settings in PyLint. This ensures that the analysis is performed with the appropriate configuration for the SPyTest codebase. + +## Test Suites + +The test suites can be found in the directory [ROOT]/reporting/suites. To access the topology requirements for all test modules, one can refer to the file [ROOT]/reporting/modules_stc.csv. + +This CSV file provides detailed information about each test module and their corresponding topology requirements. Blank lines and comments (lines starting with '#') are ignored during processing and can be disregarded. + +The CSV file contains the following columns: + +* TopoBucket: Indicates the number of devices needed for the test module. It represents the bucket or group to which the test module belongs based on the required network topology. + +* OrderBucket: Specifies the execution order within each bucket. Test modules with higher values in this column are executed first within their respective bucket. + +* Preference (Optional): An optional column that specifies the preference value for the test module. If not specified, the preference value is considered as 1. If there are multiple lines with different preference values, one line is randomly selected. + +* Module: Contains the name of the test module relative to the tests folder. It represents the test module's file name or path. + +* Topology: Defines the topology requirements for the test module. It is similar to the ensure_minimum_topology notation and describes the specific network configurations or dependencies needed to run the test module successfully. + +To illustrate, let's examine the topology requirements for the sanity suite. + + 2,4,sanity/test_sanity_l2.py,D1D2:4 D1T1:3 D2T1:1 + 2,2,sanity/test_sanity_l3.py,D1D2:1 D1T1:1 D2T1:1 + 1,2,sanity/test_sanity_sys.py,D1 + + The "test_sanity_l2.py" module requires two devices connected with 4 links, with specific link requirements for each device and the traffic generator. The "test_sanity_l3.py" module also requires two devices connected with 1 link, with each device needing a link to the traffic generator. The "test_sanity_sys.py" module requires a single device. + + To verify the physical devices' topology and validate network connectivity, the option is available to use either Ixia or Spirent as the traffic generator. The latest supported versions for Ixia and Spirent are 9.31 and 4.91, respectively. These versions are recommended for optimal compatibility and performance. + + Once the testbed file is created, below command can be used to execute sanity suite. -### Known Issues - TODO + [ROOT]/bin/spytest --testbed testbed_file.yaml \ + --test-suite dev-sanity --logs-path -### Known Limitations - TODO +As mentioned earlier, the results can be checked by referring to the xxx_functions.csv or xxx_functions.html files. These files contain information related to the test functions and their corresponding outcomes. -### Roadmap - TODO +As an alternative, for a more convenient way to access the results, the /dashboard.html file can be opened, and the modules link in the left pane can be utilized. Clicking on the modules link will load /results_modules.html in the right pane, which provides a comprehensive view of the results for all modules. The results can be sorted based on any column in this page, and by clicking on the module name, the corresponding module log file can be opened for detailed analysis. We can search for " Report(" to each to individual function results. diff --git a/spytest/bin/clean.sh b/spytest/bin/clean.sh index 34999ea4f4..04920c8bdf 100755 --- a/spytest/bin/clean.sh +++ b/spytest/bin/clean.sh @@ -5,4 +5,3 @@ cd $(dirname $0)/.. find . -name __pycache__ | xargs rm -rf find . -name .pytest_cache | xargs rm -rf find . -name "*.pyc" | xargs rm -f - diff --git a/spytest/bin/env b/spytest/bin/env index b72d0810ab..e8b97176af 100755 --- a/spytest/bin/env +++ b/spytest/bin/env @@ -1,33 +1,38 @@ #!/bin/sh -if [ -z "$SCID" ]; then +if [ -z "$SCID" -o ! -d "$SCID" ]; then export SCID=/opt/projects/scid if [ ! -d $SCID ]; then export SCID=/projects/scid - else - echo "================== USING LOCAL SPYTEST TOOLS =================" fi fi if [ "$SCID_PYTHON_BIN" != "" ]; then - echo "USING Python From $SCID_PYTHON_BIN" -elif [ "$SPYTEST_PYTHON_VERSION" = "3.6.6" ]; then - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/3.6.6/bin -elif [ "$SPYTEST_PYTHON_VERSION" = "3.7.1" ]; then - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/3.7.1/bin + echo "# Python From $SCID_PYTHON_BIN" +elif [ -z "$SPYTEST_PYTHON_VERSION" -o "$SPYTEST_PYTHON_VERSION" = "current" ]; then + export SCID_PYTHON_BIN=$SCID/tools/Python-3.8.12/bin + if [ ! -f $SCID_PYTHON_BIN/python ]; then + export SCID_PYTHON_BIN=$SCID/tools/Python-3.8/bin + fi + #echo "# Branch default Python From $SCID_PYTHON_BIN" +elif [ -d $SCID/tools/Python-$SPYTEST_PYTHON_VERSION/bin ]; then + export SCID_PYTHON_BIN=$SCID/tools/Python-$SPYTEST_PYTHON_VERSION/bin +elif [ -d $SCID/tools/ActivPython/$SPYTEST_PYTHON_VERSION/bin ]; then + export SCID_PYTHON_BIN=$SCID/tools/ActivPython/$SPYTEST_PYTHON_VERSION/bin +elif [ -d $SCID/tools/ActivPython/venv/$SPYTEST_PYTHON_VERSION/bin ]; then + export SCID_PYTHON_BIN=$SCID/tools/ActivPython/venv/$SPYTEST_PYTHON_VERSION/bin else - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/venv/3.8.0/bin - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/3.7.1/bin - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/3.6.6/bin - export SCID_PYTHON_BIN=$SCID/tools/ActivPython/current/bin -fi - -if [ -z "$SCID_TGEN_PATH=" ]; then - export SCID_TGEN_PATH=$SCID/tgen + export SCID_PYTHON_BIN=$SCID/tools/ActivPython/venv/3.8.0/bin + export SCID_PYTHON_BIN=$SCID/tools/ActivPython/current/bin fi -if [ -z "$SPYTEST_PYTHON" ]; then - export SPYTEST_PYTHON=$SCID_PYTHON_BIN/python +export SCID_TGEN_PATH=$SCID/tgen +if [ -z "$SPYTEST_PYTHON" -o -n "$SPYTEST_PYTHON_VERSION" ]; then + if [ -f $SCID_PYTHON_BIN/python$SPYTEST_PYTHON_VERSION ]; then + export SPYTEST_PYTHON=$SCID_PYTHON_BIN/python$SPYTEST_PYTHON_VERSION + else + export SPYTEST_PYTHON=$SCID_PYTHON_BIN/python + fi fi export PATH=$SCID/tools/bin:$PATH @@ -41,7 +46,10 @@ if [ -z "$SCID_TCL85_BIN" ]; then fi if [ ! -d $SCID_TCL85_BIN ]; then # use native tcl 8.5 - SCID_TCL85_BIN=$(dirname $(which tclsh8.5)) + tchsh85=$(which tclsh8.5) + if [ -n "$tchsh85" ]; then + SCID_TCL85_BIN=$(dirname $tchsh85) + fi fi export SCID_TCL84_BIN=$SCID/tools/tcl/8.4.20/bin - +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCID/tools/lib diff --git a/spytest/bin/generate_api_docs.sh b/spytest/bin/generate_api_docs.sh deleted file mode 100755 index cb03474ebe..0000000000 --- a/spytest/bin/generate_api_docs.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -dir=$(dirname $0) -dir=$(cd $dir;pwd -P) -ddir=$(cd $dir/..;pwd -P) - -pushd $ddir/docs/source -# create rst files -$dir/python -m sphinx.apidoc -f -o . ../.. -# create html documents -$dir/python -m sphinx $ddir/docs/source $ddir/docs/build -# open index in default browser -xdg-open $ddir/docs/build/index.html - diff --git a/spytest/bin/lint.sh b/spytest/bin/lint.sh index 0414f4b984..f76d4d7e8a 100755 --- a/spytest/bin/lint.sh +++ b/spytest/bin/lint.sh @@ -1,11 +1,12 @@ -#!/bin/sh +#!/bin/bash dir=$(dirname $0) dir=$(cd $dir;pwd -P) ddir=$(cd $dir/..;pwd -P) IGNORE1="" -IGNORE2="" +INGORE2="" +INGORE3="" IGNORE1="$IGNORE1 --disable=W0311" #bad-indentation @@ -13,106 +14,206 @@ IGNORE1="$IGNORE1 --disable=C0103" #invalid-name IGNORE1="$IGNORE1 --disable=C0111" #missing-docstring IGNORE1="$IGNORE1 --disable=C0305" #trailing-newlines IGNORE1="$IGNORE1 --disable=C0325" #superfluous-parens -IGNORE1="$IGNORE1 --disable=C0326" #bad-whitespace +IGNORE2="$IGNORE2 --disable=C0326" #bad-whitespace IGNORE1="$IGNORE1 --disable=C0410" #multiple-imports IGNORE1="$IGNORE1 --disable=C0413" #wrong-import-position IGNORE1="$IGNORE1 --disable=C0415" #import-outside-toplevel -IGNORE2="$IGNORE2 --disable=W0102" #dangerous-default-value -IGNORE2="$IGNORE2 --disable=W0105" #pointless-string-statement -#IGNORE2="$IGNORE2 --disable=W0106" #expression-not-assigned -IGNORE2="$IGNORE2 --disable=W0107" #unnecessary-pass -IGNORE2="$IGNORE2 --disable=W0122" #exec-used -IGNORE2="$IGNORE2 --disable=W0123" #eval-used -IGNORE2="$IGNORE2 --disable=W0201" #attribute-defined-outside-init -IGNORE2="$IGNORE2 --disable=W0212" #protected-access +IGNORE1="$IGNORE1 --disable=W0102" #dangerous-default-value +IGNORE1="$IGNORE1 --disable=W0105" #pointless-string-statement +#IGNORE1="$IGNORE1 --disable=W0106" #expression-not-assigned +IGNORE1="$IGNORE1 --disable=W0107" #unnecessary-pass +IGNORE1="$IGNORE1 --disable=W0122" #exec-used +IGNORE1="$IGNORE1 --disable=W0123" #eval-used +IGNORE1="$IGNORE1 --disable=W0201" #attribute-defined-outside-init +IGNORE1="$IGNORE1 --disable=W0212" #protected-access IGNORE2="$IGNORE2 --disable=W0232" #no-init -IGNORE2="$IGNORE2 --disable=W0301" #unnecessary-semicolon +IGNORE1="$IGNORE1 --disable=W0237" #arguments-renamed +IGNORE1="$IGNORE1 --disable=W0301" #unnecessary-semicolon IGNORE2="$IGNORE2 --disable=W0312" #mixed-indentation -IGNORE2="$IGNORE2 --disable=W0401" #wildcard-import -IGNORE2="$IGNORE2 --disable=W0404" #reimported -IGNORE2="$IGNORE2 --disable=W0511" #fixme -IGNORE2="$IGNORE2 --disable=W0601" #global-variable-undefined -IGNORE2="$IGNORE2 --disable=W0603" #global-statement -IGNORE2="$IGNORE2 --disable=W0604" #global-at-module-level -#IGNORE2="$IGNORE2 --disable=W0611" #unused-import -#IGNORE2="$IGNORE2 --disable=W0612" #unused-variable -IGNORE2="$IGNORE2 --disable=W0613" #unused-argument -IGNORE2="$IGNORE2 --disable=W0614" #unused-wildcard-import -IGNORE2="$IGNORE2 --disable=W0621" #redefined-outer-name -IGNORE2="$IGNORE2 --disable=W0622" #redefined-builtin -IGNORE2="$IGNORE2 --disable=W0702" #bare-except -IGNORE2="$IGNORE2 --disable=W0703" #broad-except - -IGNORE2="$IGNORE2 --disable=C0112" #empty-docstring -IGNORE2="$IGNORE2 --disable=C0113" #unneeded-not -#IGNORE2="$IGNORE2 --disable=C0121" #singleton-comparison -IGNORE2="$IGNORE2 --disable=C0123" #unidiomatic-typecheck -IGNORE2="$IGNORE2 --disable=C0200" #consider-using-enumerate -IGNORE2="$IGNORE2 --disable=C0201" #consider-iterating-dictionary -IGNORE2="$IGNORE2 --disable=C0301" #line-too-long -IGNORE2="$IGNORE2 --disable=C0302" #too-many-lines -IGNORE2="$IGNORE2 --disable=C0303" #trailing-whitespace -IGNORE2="$IGNORE2 --disable=C0304" #missing-final-newline -IGNORE2="$IGNORE2 --disable=C0321" #multiple-statements +IGNORE1="$IGNORE1 --disable=W0401" #wildcard-import +#IGNORE1="$IGNORE1 --disable=W0404" #reimported +IGNORE1="$IGNORE1 --disable=W0511" #fixme +IGNORE1="$IGNORE1 --disable=W0601" #global-variable-undefined +IGNORE1="$IGNORE1 --disable=W0602" #global-variable-not-assigned +IGNORE1="$IGNORE1 --disable=W0603" #global-statement +IGNORE1="$IGNORE1 --disable=W0604" #global-at-module-level +#IGNORE1="$IGNORE1 --disable=W0611" #unused-import +#IGNORE1="$IGNORE1 --disable=W0612" #unused-variable +IGNORE1="$IGNORE1 --disable=W0613" #unused-argument +IGNORE1="$IGNORE1 --disable=W0614" #unused-wildcard-import +IGNORE1="$IGNORE1 --disable=W0621" #redefined-outer-name +IGNORE1="$IGNORE1 --disable=W0622" #redefined-builtin +IGNORE1="$IGNORE1 --disable=W0702" #bare-except +IGNORE1="$IGNORE1 --disable=W0703" #broad-except +IGNORE3="$IGNORE3 --disable=W0707" #raise-missing-from + +IGNORE1="$IGNORE1 --disable=C0112" #empty-docstring +#IGNORE1="$IGNORE1 --disable=C0113" #unneeded-not +#IGNORE1="$IGNORE1 --disable=C0121" #singleton-comparison +IGNORE1="$IGNORE1 --disable=C0123" #unidiomatic-typecheck +IGNORE1="$IGNORE1 --disable=C0200" #consider-using-enumerate +IGNORE1="$IGNORE1 --disable=C0201" #consider-iterating-dictionary +IGNORE1="$IGNORE1 --disable=C0301" #line-too-long +IGNORE1="$IGNORE1 --disable=C0302" #too-many-lines +IGNORE1="$IGNORE1 --disable=C0303" #trailing-whitespace +IGNORE1="$IGNORE1 --disable=C0304" #missing-final-newline +IGNORE1="$IGNORE1 --disable=C0321" #multiple-statements IGNORE2="$IGNORE2 --disable=C0330" #bad-continuation -IGNORE2="$IGNORE2 --disable=C0411" #wrong-import-order -IGNORE2="$IGNORE2 --disable=C0412" #ungrouped-imports +IGNORE1="$IGNORE1 --disable=C0411" #wrong-import-order +IGNORE1="$IGNORE1 --disable=C0412" #ungrouped-imports IGNORE2="$IGNORE2 --disable=C1001" #old-style-class -IGNORE2="$IGNORE2 --disable=C1801" #len-as-condition - -#IGNORE2="$IGNORE2 --disable=E0102" #function-redefined -IGNORE2="$IGNORE2 --disable=E0632" #unbalanced-tuple-unpacking -IGNORE2="$IGNORE2 --disable=E1128" #assignment-from-none -#IGNORE2="$IGNORE2 --disable=E1305" #too-many-format-args - -IGNORE2="$IGNORE2 --disable=R0101" #too-many-nested-blocks -IGNORE2="$IGNORE2 --disable=R0102" #simplifiable-if-statement -IGNORE2="$IGNORE2 --disable=R0201" #no-self-use -IGNORE2="$IGNORE2 --disable=R0205" #useless-object-inheritance -IGNORE2="$IGNORE2 --disable=R0902" #too-many-instance-attributes -IGNORE2="$IGNORE2 --disable=R0903" #too-few-public-methods -IGNORE2="$IGNORE2 --disable=R0904" #too-many-public-methods -IGNORE2="$IGNORE2 --disable=R0911" #too-many-return-statements -IGNORE2="$IGNORE2 --disable=R0912" #too-many-branches -IGNORE2="$IGNORE2 --disable=R0913" #too-many-arguments -IGNORE2="$IGNORE2 --disable=R0914" #too-many-locals -IGNORE2="$IGNORE2 --disable=R0915" #too-many-statements -IGNORE2="$IGNORE2 --disable=R0916" #too-many-boolean-expressions -IGNORE2="$IGNORE2 --disable=R1705" #no-else-return -IGNORE2="$IGNORE2 --disable=R1710" #inconsistent-return-statements -IGNORE2="$IGNORE2 --disable=R1711" #useless-return - -IGNORE2="$IGNORE2 --disable=R1714" #consider-using-in -IGNORE2="$IGNORE2 --disable=R1716" #chained-comparison -IGNORE2="$IGNORE2 --disable=R1719" #simplifiable-if-expression -IGNORE2="$IGNORE2 --disable=R1720" #no-else-raise -IGNORE2="$IGNORE2 --disable=R1723" #no-else-break -IGNORE2="$IGNORE2 --disable=R1724" #no-else-continue - -exclude="__init__.py scheduler/" -LINT_PYVER=0 -ERR_TEMP=$(mktemp) -REPORT=lint_report.log +IGNORE1="$IGNORE1 --disable=C1801" #len-as-condition + +#IGNORE1="$IGNORE1 --disable=E0102" #function-redefined +IGNORE1="$IGNORE1 --disable=E0632" #unbalanced-tuple-unpacking +IGNORE1="$IGNORE1 --disable=E1128" #assignment-from-none +#IGNORE1="$IGNORE1 --disable=E1305" #too-many-format-args + +IGNORE1="$IGNORE1 --disable=R0022" #useless-option-value +IGNORE1="$IGNORE1 --disable=R0101" #too-many-nested-blocks +IGNORE1="$IGNORE1 --disable=R0102" #simplifiable-if-statement +IGNORE1="$IGNORE1 --disable=R0201" #no-self-use +IGNORE1="$IGNORE1 --disable=R0205" #useless-object-inheritance +IGNORE1="$IGNORE1 --disable=R0902" #too-many-instance-attributes +IGNORE1="$IGNORE1 --disable=R0903" #too-few-public-methods +IGNORE1="$IGNORE1 --disable=R0904" #too-many-public-methods +IGNORE1="$IGNORE1 --disable=R0911" #too-many-return-statements +IGNORE1="$IGNORE1 --disable=R0912" #too-many-branches +IGNORE1="$IGNORE1 --disable=R0913" #too-many-arguments +IGNORE1="$IGNORE1 --disable=R0914" #too-many-locals +IGNORE1="$IGNORE1 --disable=R0915" #too-many-statements +IGNORE1="$IGNORE1 --disable=R0916" #too-many-boolean-expressions +IGNORE1="$IGNORE1 --disable=R1705" #no-else-return +IGNORE1="$IGNORE1 --disable=R1710" #inconsistent-return-statements +IGNORE1="$IGNORE1 --disable=R1711" #useless-return +IGNORE1="$IGNORE1 --disable=R1714" #consider-using-in +IGNORE1="$IGNORE1 --disable=R1716" #chained-comparison +IGNORE1="$IGNORE1 --disable=R1719" #simplifiable-if-expression +IGNORE1="$IGNORE1 --disable=R1720" #no-else-raise +IGNORE1="$IGNORE1 --disable=R1723" #no-else-break +IGNORE1="$IGNORE1 --disable=R1724" #no-else-continue +IGNORE3="$IGNORE3 --disable=R1725" #super-with-arguments +IGNORE3="$IGNORE3 --disable=R1732" #consider-using-with + +if [ "$LINT_IGNORE_UNUSED" == "1" ]; then + IGNORE1="$IGNORE1 --disable=W0611" #unused-import + IGNORE1="$IGNORE1 --disable=W0612" #unused-variable +fi + +IGNORE2="$IGNORE2 --disable=C0122" #misplaced-comparison-constant +IGNORE1="$IGNORE1 --disable=W1308" #duplicate-string-formatting-argument +IGNORE1="$IGNORE1 --disable=W1309" #f-string-without-interpolation +IGNORE1="$IGNORE1 --disable=R1715" #consider-using-get +IGNORE1="$IGNORE1 --disable=R1718" #consider-using-set-comprehension +IGNORE1="$IGNORE1 --disable=R1721" #unnecessary-comprehension +IGNORE1="$IGNORE1 --disable=R1728" #consider-using-generator +IGNORE1="$IGNORE1 --disable=W1401" #anomalous-backslash-in-string + +IGNORE3="$IGNORE3 --disable=W1514" #unspecified-encoding +IGNORE3="$IGNORE3 --disable=R1734" #use-list-literal +IGNORE3="$IGNORE3 --disable=R1735" #use-dict-literal +IGNORE3="$IGNORE3 --disable=C0206" #consider-using-dict-items +IGNORE3="$IGNORE3 --disable=C0209" #consider-using-f-string +IGNORE3="$IGNORE3 --disable=C0207" #use-maxsplit-arg +IGNORE3="$IGNORE3 --disable=R0402" #consider-using-from-import +IGNORE3="$IGNORE3 --disable=W1406" #redundant-u-string-prefix +IGNORE3="$IGNORE3 --disable=R1729" #use-a-generator + +IGNORE3="$IGNORE3 --disable=C3001" #unnecessary-lambda-assignment + +exclude="apis/gnmi/openconfig apis/gnmi unused/" +exclude="$exclude apis/yang/autogen/bindings" +exclude="$exclude apis/yang/codegen/bindings" +exclude="$exclude apis/yang/codegen/test.py" +exclude="$exclude apis/yang/codegen/gnoi_bindings" +exclude="$exclude spytest/ddm/third-party" +exclude="$exclude tests/dell/infra/bgpcfgd_test.py" + +if [ "$LINT_EXCLUDE_KNOWN_FAILS" == "1" ]; then + exclude="$exclude apis/yang/codegen" + exclude="$exclude tests/dell" + exclude="$exclude tests/ut/acl_fbs" + exclude="$exclude tests/infra_ut/data_driven" + exclude="$exclude tests/systb" +fi + +LINT_PYVER=${LINT_PYVER:=3} +TMP_FOLD=$(mktemp -d) +trap "rm -rf $TMP_FOLD" EXIT +ERR_TEMP=$TMP_FOLD/err +DBG_FILE=lint_debug.log ERR_FILE=lint_errors.log +REP_FILE=lint_report.log + +if [ "$LINT_MODIFIED" == "1" ]; then + DBG_FILE=modified_lint_debug.log + ERR_FILE=modified_lint_errors.log + REP_FILE=modified_lint_report.log + #exclude="" + exclude="$exclude __init__.py" +fi + +#IGNORE1="$IGNORE1 --disable=W0612" #unused-variable if [ "$LINT_DAILY" == "1" ]; then - REPORT=daily_lint_report.log + DBG_FILE=daily_lint_debug.log ERR_FILE=daily_lint_errors.log - #IGNORE2="$IGNORE2 --disable=W0611" #unused-import - #IGNORE2="$IGNORE2 --disable=W0612" #unused-variable - IGNORE2="$IGNORE2 --disable=W0106" #expression-not-assigned - exclude="__init__.py ddm/ tests/ut/ tests/systb/ scheduler/ tests/dell" + REP_FILE=daily_lint_report.log + #IGNORE1="$IGNORE1 --disable=W0612" #unused-variable fi +IGNORE1="$IGNORE1 --disable=W0106" #expression-not-assigned +IGNORE1="$IGNORE1 --disable=W0631" #undefined-loop-variable +IGNORE1="$IGNORE1 --disable=R1704" #redefined-argument-from-local -IGNORE="$IGNORE1" -IGNORE="$IGNORE1 $IGNORE2" +RUFF_OPTS="$RUFF_OPTS --ignore E401" #multiple-imports-on-one-line +RUFF_OPTS="$RUFF_OPTS --ignore E401" #multiple-imports-on-one-line +RUFF_OPTS="$RUFF_OPTS --ignore E402" #module-import-not-at-top-of-file +RUFF_OPTS="$RUFF_OPTS --ignore E501" #line-too-long +RUFF_OPTS="$RUFF_OPTS --ignore E701" #multiple-statements +RUFF_OPTS="$RUFF_OPTS --ignore E702" #multiple-statements-on-one-line-semicolon +RUFF_OPTS="$RUFF_OPTS --ignore E703" #useless-semicolon +RUFF_OPTS="$RUFF_OPTS --ignore E713" #not-in-test +RUFF_OPTS="$RUFF_OPTS --ignore E722" #bare-except +RUFF_OPTS="$RUFF_OPTS --ignore E731" #lambda-assignment +RUFF_OPTS="$RUFF_OPTS --ignore E741" #ambiguous-variable-name +RUFF_OPTS="$RUFF_OPTS --ignore F541" #f-string-missing-placeholders +RUFF_OPTS="$RUFF_OPTS --ignore W191" #Indentation contains tabs +RUFF_OPTS="$RUFF_OPTS --ignore W292" #No newline at end of file +RUFF_OPTS="$RUFF_OPTS --ignore W191" #Indentation contains tabs +RUFF_OPTS="$RUFF_OPTS --ignore W291" #Trailing whitespace +RUFF_OPTS="$RUFF_OPTS --ignore W293" #Blank line contains whitespace +RUFF_OPTS="$RUFF_OPTS --ignore PLW2901" +RUFF_OPTS="$RUFF_OPTS --ignore PLW0603" +RUFF_OPTS="$RUFF_OPTS --ignore PLC1901" -LINT2="$dir/python -m pylint --rcfile=$dir/.pylintrc $IGNORE" -LINT3="$dir/python3 -m pylint --rcfile=$dir/.pylintrc $IGNORE" -#PYFLAKES="$dir/python -m pyflakes" -#FLAKE8="$dir/python -m flake8 --select F --ignore=F401,F841" -#FLAKE8="$dir/python -m flake8 --select F" +LINT_TOOL="${LINT_TOOL:-ruff}" +if [ "$LINT_TOOL" = "pylint" ]; then + LINT_PYVER=3 +elif [ "$LINT_TOOL" = "pylint2" ]; then + LINT_PYVER=2 +elif [ "$LINT_TOOL" = "pylint3" ]; then + LINT_PYVER=3 +elif [ "$LINT_TOOL" = "pyflakes" ]; then + PYFLAKES="$dir/python -m pyflakes" + LINT2=""; LINT3="" +elif [ "$LINT_TOOL" = "ruff" -o "$LINT_TOOL" = "ruff-only" ]; then + RUFF="$dir/python -m ruff --select=F,E,W,PLE,PLW,PLC" +fi + +if [ "$LINT_TOOL" != "pyflakes" ]; then + RCFILE=$dir/.pylintrc$LINT_PYVER + if [ ! -f $RCFILE ]; then + RCFILE=$TMP_FOLD/.pylintrc + touch $RCFILE + fi + + LINT2="timeout 300 $dir/python2 -m pylint --max-parents=8 --rcfile=$RCFILE $IGNORE1 $IGNORE2" + LINT3="timeout 300 $dir/python3 -m pylint --rcfile=$RCFILE $IGNORE1 $IGNORE3" + #LINT3="$LINT3 --load-plugins perflint --disable=W8205" #dotted-import-in-loop + #FLAKE8="$dir/python -m flake8 --select F --ignore=F401,F841" + #FLAKE8="$dir/python -m flake8 --select F" +fi if [ $# -eq 0 ]; then files1=$(find $ddir/spytest/ -name "*.py") @@ -134,63 +235,196 @@ else done fi +cmd="grep -q" +for ex in $exclude; do + cmd="$cmd -e $ex" +done files2="" for f in $files;do - skip=0 - for ex in $exclude; do - if grep -q $ex <<< $f; then - skip=1 - break - fi - done - [ $skip -eq 1 ] || files2="$files2 $f" + if [ -z "$exclude" ]; then + files2="$files2 $f" + elif ! $cmd <<< $f; then + files2="$files2 $f" + fi done -rm -f $REPORT $ERR_FILE $ERR_TEMP +rm -f $DBG_FILE $ERR_FILE $REP_FILE $ERR_TEMP $REP_FILE.err line="\--------------------------------------------------------------------" score="Your code has been rated at 10.00" using="Using config file " -date | tee -a $REPORT | tee -a $ERR_FILE + +print() +{ + echo $(date +'%Y-%d-%m %H:%M:%S') $* +} + +print "Start...." | tee -a $DBG_FILE | tee -a $ERR_FILE for f in $files2;do + OPT_LINT_PYVER=${LINT_PYVER:=3} + + OPTS=""; OPTS2=""; OPTS3="" + if [[ $f == *utilities/common.py ]]; then + OPTS3="$OPTS3 --disable=W1515" + fi + + if [[ $f == */yang/* || $f == */infra_ut/data_driven/* ]]; then + OPT_LINT_PYVER=3 + fi + + if [[ $f == */test_ut_boot_time.py ]]; then + OPT_LINT_PYVER=3 + fi + + if [[ $f == */ut/acl_fbs/* || $f == */ut/flexDpb/* || $f == */ut/gnmi/* ]]; then + OPT_LINT_PYVER=3 + fi + + if [[ $f == */yang/codegen/* || $f == */yang/autogen/* ]]; then + OPTS3="$OPTS3 --disable=E0401" + OPTS3="$OPTS3 --disable=W0404" #reimported + OPTS3="$OPTS3 --disable=W0611" #unused-import + OPTS3="$OPTS3 --disable=R1706" + OPTS3="$OPTS3 --disable=W0127" + RUFF_OPTS="$RUFF_OPTS --ignore F401" #unused-import + RUFF_OPTS="$RUFF_OPTS --ignore E101" #mixed indentation + RUFF_OPTS="$RUFF_OPTS --ignore W191" #Indentation contains tabs + RUFF_OPTS="$RUFF_OPTS --ignore W291" #Trailing whitespace + RUFF_OPTS="$RUFF_OPTS --ignore W293" #Blank line contains whitespace + RUFF_OPTS="$RUFF_OPTS --ignore F811" #reimported + fi + + if [[ $f == */dell/* ]]; then + export SPYLINT_PYTHONPATH=$ddir/tests/dell/platform + OPTS2="$OPTS2 --disable=W0403" #relative-import + OPTS="$OPTS --disable=W0611" #unused-import + OPTS="$OPTS --disable=W0612" #unused-variable + OPTS="$OPTS --disable=W0404" #reimported + OPTS="$OPTS --disable=C0121" #singleton-comparison + OPTS="$OPTS --disable=R0123" #literal-comparison + OPTS="$OPTS --disable=E1305" #too-many-format-args + OPTS="$OPTS --disable=C0113" #unneeded-not + RUFF_OPTS="$RUFF_OPTS --ignore E101" #mixed indentation + RUFF_OPTS="$RUFF_OPTS --ignore F401" #unused-import + RUFF_OPTS="$RUFF_OPTS --ignore F841" #unused-variable + RUFF_OPTS="$RUFF_OPTS --ignore E711" #none-comparison + RUFF_OPTS="$RUFF_OPTS --ignore E712" #true-false-comparison + RUFF_OPTS="$RUFF_OPTS --ignore E714" #true-false-comparison + RUFF_OPTS="$RUFF_OPTS --ignore F632" #literal-comparison + RUFF_OPTS="$RUFF_OPTS --ignore PLW0602" + fi + + if [[ $f == */systb/campus/* ]]; then + export SPYLINT_PYTHONPATH=$ddir/tests/systb/campus + basef=$(basename $f) + if [[ $basef != test_* ]]; then + continue + fi + OPTS2="$OPTS2 --disable=W0403" #relative-import + OPTS="$OPTS --disable=W0611" #unused-import + OPTS="$OPTS --disable=W0612" #unused-variable + OPTS="$OPTS --disable=W0404" #reimported + elif [[ $f == */systb/* ]]; then + basef=$(basename $f) + if [[ $basef != test_* ]]; then + continue + fi + if [[ $f == */systb/dc/vxlan/* ]]; then + systb_dc=$ddir/tests/systb/dc + systb_vxlan=$systb_dc/vxlan + export SPYLINT_PYTHONPATH=$systb_dc:$systb_vxlan + fi + #dirf1=$(cd $(dirname $f);pwd -P) + #dirf2=$(cd $(dirname $f)/..;pwd -P) + #export SPYLINT_PYTHONPATH=$dirf1:$dirf2:$dirf1/st_common:$dirf2/st_common + OPT_LINT_PYVER=3 + OPTS="$OPTS --disable=E0611" #no-name-in-module + OPTS2="$OPTS2 --disable=W0403" #relative-import + OPTS="$OPTS --disable=W0611" #unused-import + OPTS="$OPTS --disable=W0612" #unused-variable + OPTS="$OPTS --disable=W0404" #reimported + fi + + if [[ $f == */ddm/* ]]; then + OPTS="$OPTS --disable=C0121" #singleton-comparison + OPTS="$OPTS --disable=R0123" #literal-comparison + OPTS="$OPTS --disable=W0611" #unused-import + OPTS="$OPTS --disable=W0612" #unused-variable + OPTS="$OPTS --disable=W0404" #reimported + RUFF_OPTS="$RUFF_OPTS --ignore E101" #mixed indentation + RUFF_OPTS="$RUFF_OPTS --ignore E711" #none-comparison + RUFF_OPTS="$RUFF_OPTS --ignore E712" #true-false-comparison + RUFF_OPTS="$RUFF_OPTS --ignore F632" #literal-comparison + RUFF_OPTS="$RUFF_OPTS --ignore F401" #unused-import + RUFF_OPTS="$RUFF_OPTS --ignore F841" #unused-variable + RUFF_OPTS="$RUFF_OPTS --ignore F811" #reimported + fi + + if [ -n "$RUFF" ]; then + $RUFF $RUFF_OPTS $f 2>&1 > $ERR_TEMP + grep -q "may be undefined, or defined from star imports" $ERR_TEMP >/dev/null + if [ $? -ne 0 ]; then + print ================== RUFF $f | tee -a $DBG_FILE + lc=$(wc -l < $ERR_TEMP) + if [ $lc -gt 0 ]; then + print ================== RUFF $f | tee -a $ERR_FILE + cat $ERR_TEMP | tee -a $DBG_FILE $ERR_FILE + print $lc $f | tee -a $REP_FILE + fi + continue + elif [ "$LINT_TOOL" = "ruff-only" ]; then + echo "Try with pylint $f" | tee -a $REP_FILE.err + continue + else + echo "Using pylint $f" + fi + fi + if [ -n "$FLAKE8" ]; then - echo ================== FLAKES8 $f | tee -a $REPORT - $FLAKE8 $f 2>&1 | tee -a $REPORT + print ================== FLAKES8 $f | tee -a $DBG_FILE + $FLAKE8 $f 2>&1 | tee -a $DBG_FILE fi if [ -n "$PYFLAKES" ]; then - echo ================== PYFLAKES $f | tee -a $REPORT - $PYFLAKES $f 2>&1 | tee -a $REPORT + print ================== PYFLAKES $f | tee -a $DBG_FILE + $PYFLAKES $f 2>&1 | tee -a $DBG_FILE fi if [ -z "$LINT2" -a -z "$LINT3" ]; then continue fi - if [ $LINT_PYVER -eq 2 ]; then - echo ================== PYLINT2 $f | tee -a $REPORT - $LINT2 $f 2>&1 | grep -v "$using" | tee -a $REPORT - continue - fi - if [ $LINT_PYVER -eq 3 ]; then - echo ================== PYLINT3 $f | tee -a $REPORT - $LINT3 $f 2>&1 | grep -v "$using" | tee -a $REPORT - continue - fi - $LINT3 $f 2>&1 | grep -v "$using" | grep -v $line > $ERR_TEMP - grep -q "$score" $ERR_TEMP >/dev/null - if [ $? -ne 0 ]; then - echo ================== PYLINT3 $f | tee -a $REPORT - echo ================== PYLINT3 $f | tee -a $ERR_FILE - cat $ERR_TEMP | tee -a $REPORT $ERR_FILE - continue + if [ $OPT_LINT_PYVER -eq 3 -o $OPT_LINT_PYVER -eq 0 ]; then + #echo $LINT3 $OPTS $OPTS3 $f + $LINT3 $OPTS $OPTS3 $f 2>&1 | grep -v "$using" | grep -v $line > $ERR_TEMP + lc=$(wc -l < $ERR_TEMP) + grep -q "$score" $ERR_TEMP >/dev/null + if [ $? -ne 0 -a $lc -gt 0 ]; then + print ================== PYLINT3 $f | tee -a $DBG_FILE >/dev/null + print ================== PYLINT3 $f | tee -a $ERR_FILE + cat $ERR_TEMP | tee -a $DBG_FILE $ERR_FILE + print $lc $f | tee -a $REP_FILE + continue + fi fi - $LINT2 $f 2>&1 | grep -v "$using" | grep -v $line > $ERR_TEMP - grep -q "$score" $ERR_TEMP >/dev/null - if [ $? -ne 0 ]; then - echo ================== PYLINT2 $f | tee -a $REPORT - echo ================== PYLINT2 $f | tee -a $ERR_FILE - cat $ERR_TEMP | tee -a $REPORT $ERR_FILE - continue + if [ $OPT_LINT_PYVER -eq 2 -o $OPT_LINT_PYVER -eq 0 ]; then + #echo $LINT2 $OPTS $OPTS2 $f + $LINT2 $OPTS $OPTS2 $f 2>&1 | grep -v "$using" | grep -v $line > $ERR_TEMP + lc=$(wc -l < $ERR_TEMP) + grep -q "$score" $ERR_TEMP >/dev/null + if [ $? -ne 0 -a $lc -gt 0 ]; then + print ================== PYLINT2 $f | tee -a $DBG_FILE >/dev/null + print ================== PYLINT2 $f | tee -a $ERR_FILE + cat $ERR_TEMP | tee -a $DBG_FILE $ERR_FILE + print $lc $f | tee -a $REP_FILE + continue + fi fi - echo ================== PYLINT $f | tee -a $REPORT + print ================== PYLINT $f | tee -a $DBG_FILE done +if [ -f $REP_FILE ]; then + mv $REP_FILE $ERR_TEMP + sort -rnk3 $ERR_TEMP > $REP_FILE + err_count=$(awk -F" " '{x+=$3}END{print x}' $REP_FILE) +fi +print "================ COMPLETED $err_count ==================" >> $DBG_FILE +print "================ COMPLETED $err_count ==================" >> $ERR_FILE diff --git a/spytest/bin/python b/spytest/bin/python index de93f2602c..73023b5a76 100755 --- a/spytest/bin/python +++ b/spytest/bin/python @@ -4,14 +4,22 @@ dir=$(dirname $0) dir=$(cd $dir;pwd -P) ddir=$(cd $dir/..;pwd -P) -# sourde environment +# source environment . $dir/env -if [ "$SPYTEST_PYTHON_VERSION" != "3.6.6" -a "$SPYTEST_PYTHON_VERSION" != "3.7.1" ]; then - PYTHONPATH=$($SPYTEST_PYTHON -c 'import site; print(site.getsitepackages()[0])') +VER=$($SPYTEST_PYTHON --version 2>&1 | cut -d . -f1) +if [ "$VER" != "Python 3" ]; then + PYTHONPATH=$($SPYTEST_PYTHON -c 'import site; print(site.getsitepackages()[0])') fi -export PYTHONPATH=$PYTHONPATH:$ddir + +NEW_PYTHONPATH=$ddir +if [ -n "$SPYLINT_PYTHONPATH" ]; then + NEW_PYTHONPATH=$NEW_PYTHONPATH:$SPYLINT_PYTHONPATH +fi +if [ -n "$PYTHONPATH" ]; then + NEW_PYTHONPATH=$NEW_PYTHONPATH:$PYTHONPATH +fi +export PYTHONPATH=$NEW_PYTHONPATH export PATH=$dir:$PATH exec $SPYTEST_PYTHON "$@" - diff --git a/spytest/bin/python2 b/spytest/bin/python2 new file mode 100755 index 0000000000..2bc28c2c87 --- /dev/null +++ b/spytest/bin/python2 @@ -0,0 +1,4 @@ +#!/bin/sh + +export SPYTEST_PYTHON_VERSION=2.7.14 +exec $(dirname $0)/python "$@" diff --git a/spytest/bin/python3 b/spytest/bin/python3 index db356b709e..42f1eef454 100755 --- a/spytest/bin/python3 +++ b/spytest/bin/python3 @@ -1,5 +1,4 @@ #!/bin/sh -export SPYTEST_PYTHON_VERSION=3.7.1 -export SPYTEST_PYTHON_VERSION=3.6.6 +export SPYTEST_PYTHON_VERSION=3.8.12 exec $(dirname $0)/python "$@" diff --git a/spytest/bin/requirements0.txt b/spytest/bin/requirements0.txt deleted file mode 100644 index 52f7a24259..0000000000 --- a/spytest/bin/requirements0.txt +++ /dev/null @@ -1,24 +0,0 @@ -pyfiglet -pylint==1.8.1 -textfsm -netmiko==2.4.2 -pytest>=4.4.1,<=4.6.5 -pytest-timeout -pytest-xdist==1.28.0 -gitpython -ansible -jinja2 -future>=0.16.0 -psutil -prettytable -tabulate -pycryptodome -cryptography >= 2.5 -natsort -redis -requests -jsonpatch -rpyc -Pyro4 -scapy==2.4.3rc1 -netaddr diff --git a/spytest/bin/requirements1.txt b/spytest/bin/requirements1.txt deleted file mode 100644 index 95028ee64b..0000000000 --- a/spytest/bin/requirements1.txt +++ /dev/null @@ -1,13 +0,0 @@ -pyflakes -flake8 -autoflake -autopep8 -scapy==2.4.3rc1 -sphinx -python-jenkins -jira -pysnmp -pyang -mmh3 -Pyro4 -protobuf diff --git a/spytest/bin/spytest b/spytest/bin/spytest index ccbda289b9..f9d92cbc9e 100755 --- a/spytest/bin/spytest +++ b/spytest/bin/spytest @@ -2,18 +2,18 @@ ''':' export LD_LIBRARY_PATH=/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH -exec $(dirname $0)/python "$0" "$@" +exec $(dirname $0)/python $SPYTEST_PYTHON_ARGS "$0" "$@" ''' -import os, re, sys, warnings - -warnings.filterwarnings(action='ignore', module='.*paramiko.*') +import os, re, sys root = os.path.join(os.path.dirname(__file__), '..') root = os.path.abspath(root) +tests = os.path.join(root, "tests") sys.path.append(os.path.join(root)) sys.path.append(os.path.join(root, "apis")) os.environ["SPYTEST_USER_ROOT"] = os.path.abspath(".") +os.environ["PYTHONDONTWRITEBYTECODE"] = "1" if __name__ == '__main__': arg_list = [] @@ -27,7 +27,9 @@ if __name__ == '__main__': sys.argv.insert(1, "-c") sys.argv.insert(2, os.path.join(root, "spytest", "pytest.ini")) sys.argv.extend(["-p", "spytest.splugin"]) - - os.chdir(os.path.join(root, "tests")) + sys.argv.extend(["-W", "once::DeprecationWarning"]) + sys.argv.extend(["--rootdir", tests]) + + os.chdir(tests) from spytest.main import main sys.exit(main()) diff --git a/spytest/bin/spytest2 b/spytest/bin/spytest2 new file mode 100755 index 0000000000..07923844d6 --- /dev/null +++ b/spytest/bin/spytest2 @@ -0,0 +1,4 @@ +#!/bin/sh + +export SPYTEST_PYTHON_VERSION=2.7.14 +exec $(dirname $0)/spytest "$@" diff --git a/spytest/bin/spytest3 b/spytest/bin/spytest3 index 5dc2c7777e..f2ae764ee2 100755 --- a/spytest/bin/spytest3 +++ b/spytest/bin/spytest3 @@ -1,5 +1,4 @@ #!/bin/sh -export SPYTEST_PYTHON_VERSION=3.6.6 -export SPYTEST_PYTHON_VERSION=3.7.1 +export SPYTEST_PYTHON_VERSION=3.8.12 exec $(dirname $0)/spytest "$@" diff --git a/spytest/bin/tgen_folders.txt b/spytest/bin/tgen_folders.txt index 932f5c5771..afff04363c 100644 --- a/spytest/bin/tgen_folders.txt +++ b/spytest/bin/tgen_folders.txt @@ -11,8 +11,6 @@ /projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/ixiangpf/perl /projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/ixiangpf/python /projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/ixiangpf/python/ixiangpf_commands -/projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/ixiangpf/python/ixiangpf_commands/__pycache__ -/projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/ixiangpf/python/__pycache__ /projects/scid/tgen/ixia/8.42/lib/hltapi/library/common/robot_plugin /projects/scid/tgen/ixia/8.42/lib/hltapi/library/utracker /projects/scid/tgen/ixia/8.42/lib/hltapi/bin @@ -26,7 +24,6 @@ /projects/scid/tgen/ixia/8.42/lib/ixtc /projects/scid/tgen/ixia/8.42/lib/sftp /projects/scid/tgen/ixia/8.42/lib/PythonApi -/projects/scid/tgen/ixia/8.42/lib/PythonApi/__pycache__ /projects/scid/tgen/ixia/8.42/lib/RubyApi /projects/scid/tgen/ixia/8.42/lib/IxTclProtocol /projects/scid/tgen/ixia/8.42/lib/IxTclProtocol/Generic @@ -149,7 +146,6 @@ /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiPythonWrapper /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiForPython -/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiForPython/__pycache__ /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries/SunOS /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries/Linux @@ -800,7 +796,6 @@ /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/ixiangpf/perl /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/ixiangpf/python /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/ixiangpf/python/ixiangpf_commands -/projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/ixiangpf/python/ixiangpf_commands/__pycache__ /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/robot_plugin /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/common/ixia_hl_lib-9.00 /projects/scid/tgen/ixia/all/ixia/hlapi/9.00.1977.39/library/utracker @@ -815,11 +810,24 @@ /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/ixiangpf/perl /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/ixiangpf/python /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/ixiangpf/python/ixiangpf_commands -/projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/ixiangpf/python/ixiangpf_commands/__pycache__ /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/robot_plugin /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/common/ixia_hl_lib-9.10 /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/library/utracker /projects/scid/tgen/ixia/all/ixia/hlapi/9.10.2007.43/bin +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8 +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/dependencies +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixia_hl_lib-9.31 +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf/tcl +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf/documentation +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf/perl +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf/python +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/ixiangpf/python/ixiangpf_commands +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/common/robot_plugin +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/library/utracker +/projects/scid/tgen/ixia/all/ixia/hlapi/9.31.2301.8/bin /projects/scid/tgen/ixia/all/ixia/hlapi/8.42.0.6 /projects/scid/tgen/ixia/all/ixia/hlapi/8.42.0.6/library /projects/scid/tgen/ixia/all/ixia/hlapi/8.42.0.6/library/dependencies @@ -846,7 +854,9 @@ /projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/lib/ixTcl1.0 /projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/lib/ixTcl1.0/Dap /projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/lib/ixTcl1.0/Generic +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/Results /projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/bin +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.10.16.6/Logs /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20 /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20/lib /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20/lib/Scriptgen @@ -858,6 +868,18 @@ /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20/lib/ixTcl1.0/Dap /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20/lib/ixTcl1.0/Generic /projects/scid/tgen/ixia/all/ixia/ixos-api/9.00.0.20/bin +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1 +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/sftp_openssh +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/Scriptgen +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ssh +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/sftp +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ixTclServices +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ixTclServices/Generic +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ixTcl1.0 +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ixTcl1.0/Dap +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/lib/ixTcl1.0/Generic +/projects/scid/tgen/ixia/all/ixia/ixos-api/9.31.35.1/bin /projects/scid/tgen/ixia/all/ixia/ixos-api/8.42.6.1 /projects/scid/tgen/ixia/all/ixia/ixos-api/8.42.6.1/lib /projects/scid/tgen/ixia/all/ixia/ixos-api/8.42.6.1/lib/Scriptgen @@ -1239,6 +1261,401 @@ /projects/scid/tgen/ixia/all/ixia/ixnetwork/9.00.1919.80/SampleScripts/IxNetwork/NGPF/REST/Multicast /projects/scid/tgen/ixia/all/ixia/ixnetwork/9.00.1919.80/SampleScripts/IxNetwork/NGPF/REST/Statistics /projects/scid/tgen/ixia/all/ixia/ixnetwork/9.00.1919.80/SampleScripts/IxNetwork/NGPF/REST/Interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/JSON +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/JSON/backportPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/JSON/PP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/LWP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/LWP/Authen +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/LWP/Debug +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/LWP/Protocol +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/IPv6Sock +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Try +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/HTTP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/HTTP/Cookies +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/HTTP/Headers +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/HTTP/Request +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Time +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Protocol +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Protocol/WebSocket +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Protocol/WebSocket/Cookie +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/Protocol/WebSocket/Handshake +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/URI +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/URI/urn +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PerlApi/dependencies/URI/file +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PythonApi +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PythonApi/dependencies +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PythonApi/dependencies/websocket +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PythonApi/dependencies/websocket/tests +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/PythonApi/dependencies/websocket/tests/data +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/RubyApi +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/json +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/json/c +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/json/tests +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/sha1 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/websocket +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/base64 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/log +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/dependencies/log/msgs +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetwork/Generic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxPublisher +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclNetworkConnector +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/ixtc +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclProtocol +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTclProtocol/Generic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/IxTcl1.0 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/scriptgen-protocols +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/lib/TclApi/scriptgen-protocols/Protocols +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/bin +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/CommonUtils +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/AuthAccess +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/AuthAccess/PPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/AuthAccess/TWAMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/AuthAccess/IP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/QuickStreams +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/CP-DP Convergence +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/AdvancedTraffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/FrameRateOnTheFly +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/PacketEditor +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/API-TrafficItem +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/Dscp-Tos +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Traffic/SV-Api +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/SDN/OpenFlow +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/ISIS/isisMt_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/BGP/bgpRouteImport +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/BGP/bgpAd_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/CFM-ITU-PBB-TE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/LACP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/LACP/polacp_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/posm_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/RoutingSwitching/rateControl_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/ipv6gateway_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/InterFace&PortManagement +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/InterFace&PortManagement/Port-Management +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/InterFace&PortManagement/ipv4-ipv6-interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/AdvAES_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Framework/ProtocolStats+LearnedInfo +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/RSVP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/RSVP/rsvpEnh_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/RSVP/RSVP-GR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/MVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/MVPN/P2MP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/MVPN/mvpnNew_5.50 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/LDP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/MPLS/LDP/LDP-PW +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/IntegtatedTest +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/TestRun +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/IxReporter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/QuickTest +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast/MLD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast/MLD/Mld-Router +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast/IGMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast/IGMP/Igmp-Router +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Multicast/PIM-BSR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Statistics/generic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Tcl/Statistics/custom-view +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Perl +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Perl/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Perl/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Python +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Python/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Python/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Ruby +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Ruby/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/Ruby/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/REST +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/REST/IxReporter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/REST/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/Classic/REST/Statistics/custom-view +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/VM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/VM/REST +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/CarrierEthernet +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/CarrierEthernet/CFM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/ISIS-SR-MS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BGP-EPE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/ISIS-SRLG-LINK-PROTECTION +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/TWAMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BIER +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/NGPF-OpenFlowController +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BGP-Flowspec +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/Netconf +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/L3vpn-G-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/OSPFv3-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/OSPF-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/FLEXALGO-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BGP-SR-Policy +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/L3vpn-Over-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/OVSDB-Controller +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/NGPF-OpenFlowSwitch +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/PCEP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/SRv6-OAM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BGP-LS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/gRIBI +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/BGP-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/SDN/ISIS-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching/BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching/BFDoverVXLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Timing and Synchronization +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/Inter-AS-option-C +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/MPLSOAM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/MPLSOAM/S-BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/NG-MVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/H-L3VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS-VPN/EVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Link Aggregation +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/PPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/CUSP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/L2TP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/IEEE802.1x +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/BondedGRE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/TLV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Access/DHCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Framework +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS/BGP_RFC3107 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS/RSVP_P2MP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS/RSVP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/MPLS/LDP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/L2 Security +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/AVB +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/AVB/MSRP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/AVB/gPTP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/QuickTest +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Capture +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Multicast/MLD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Multicast/IGMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Multicast/PIM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Multicast/IPTV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter/EVPN-VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter/FabricPath +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter/VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter/TRILL +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/DataCenter/VxLANv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/Interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Tcl/ESMC +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/CarrierEthernet +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/CarrierEthernet/CFM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/ISIS-SR-MS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BGP-EPE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/ISIS-SRLG-LINK-PROTECTION +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BIER +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/NGPF-OpenFlowController +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BGP-Flowspec +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/Netconf +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/NGPF_OpenFlowSwitch +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/OSPF-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BGP-SR-Policy +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/OVSDB-Controller +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/PCEP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BGP-LS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/gRIBI +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/BGP-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/SDN/ISIS-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/RoutingSwitching/BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Timing and Synchronization +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS-VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS-VPN/Inter-AS-option-C +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS-VPN/NG-MVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS-VPN/H-L3VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS-VPN/EVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Link Aggregation +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/PPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/L2TP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/IEEE802.1x +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/BondedGRE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/TLV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Access/DHCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS/BGP_RFC3107 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS/RSVP_P2MP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS/RSVP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/MPLS/LDP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/L2 Security +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/AVB +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/AVB/MSRP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Capture +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Multicast/MLD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Multicast/IGMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Multicast/PIM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Multicast/IPTV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter/EVPN-VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter/FabricPath +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter/VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter/TRILL +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/DataCenter/VxLANv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/Interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Perl/ESMC +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/CarrierEthernet +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/CarrierEthernet/CFM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/NGPF-OFSwitch +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/ISIS-SR-MS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BGP-EPE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/ISIS-SRLG-LINK-PROTECTION +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/TWAMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BIER +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BGP-Flowspec +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/NGPF-OFController +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/Netconf +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/L3vpn-G-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/OSPFv3-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/OSPF-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/FLEXALGO-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BGP-SR-Policy +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/L3vpn-over-SRv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/OVSDB-Controller +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/PCEP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/SRv6-OAM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BGP-LS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/gRIBI +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/BGP-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/SDN/ISIS-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching/BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching/BFDoverVXLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Timing and Synchronization +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/Inter-AS-option-C +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/MPLSOAM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/MPLSOAM/S-BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/NG-MVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/H-L3VPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS-VPN/EVPN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Link Aggregation +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/PPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/ANCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/CUSP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/IEEE802.1x +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/BondedGRE +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/TLV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Access/DHCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Framework +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Framework/multivalue +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS/BGP_RFC3107 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS/RSVP_P2MP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS/RSVP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/MPLS/LDP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/L2 Security +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/AVB +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/AVB/MSRP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/AVB/gPTP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Capture +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Multicast/MLD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Multicast/IGMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Multicast/PIM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Multicast/IPTV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter/EVPN-VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter/FabricPath +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter/VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter/TRILL +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/DataCenter/VxLANv6 +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/Interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Python/ESMC +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/SDN/OSPF-SR +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/SDN/PCEP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/SDN/BGP-LS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/RoutingSwitching/BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Link Aggregation +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Access +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Access/PPP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Access/ANCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Access/TLV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Access/DHCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/MPLS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/MPLS/RSVP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/MPLS/LDP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/AVB +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/AVB/MSRP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/AVB/gPTP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Capture +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Multicast/MLD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Multicast/IGMP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Multicast/PIM +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Multicast/IPTV +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/DataCenter +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/DataCenter/FabricPath +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/DataCenter/VxLAN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/DataCenter/TRILL +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/Ruby/Interfaces +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Traffic +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/SDN +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/SDN/gRIBI +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/RoutingSwitching +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/RoutingSwitching/BFD +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/RoutingSwitching/ISIS +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/RoutingSwitching/BGP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/RoutingSwitching/OSPF +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Link Aggregation +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Access +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Access/DHCP +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/capture +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Multicast +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Statistics +/projects/scid/tgen/ixia/all/ixia/ixnetwork/9.31.2302.4/SampleScripts/IxNetwork/NGPF/REST/Interfaces /projects/scid/tgen/ixia/all/ixia/ixnetwork/8.42.1250.2 /projects/scid/tgen/ixia/all/ixia/ixnetwork/8.42.1250.2/lib /projects/scid/tgen/ixia/all/ixia/ixnetwork/8.42.1250.2/lib/IxTclNetwork diff --git a/spytest/bin/tools_install.sh b/spytest/bin/tools_install.sh index f759066f32..00f9586f98 100755 --- a/spytest/bin/tools_install.sh +++ b/spytest/bin/tools_install.sh @@ -1,20 +1,23 @@ #!/bin/bash -set -x +mkdir -p /tmp/$USER +exec &> >(tee /tmp/$USER/tools_install.log) + +#export SCID=/tmp/projects/scid cat << EOF The following files are expected to be present in /project/scid/install ActiveTcl-8.5.19.8519-x86_64-linux-glibc-2.5-403583.tar.gz ActivePython-2.7.14.2717-linux-x86_64-glibc-2.12-404899.tar.gz - ActivePython-3.6.6.3606-linux-x86_64-glibc-2.12.tar.gz - ActivePython-3.7.1.0000-linux-x86_64-glibc-2.12-b2ae37a5.tar.gz + all_ixia.tar.gz + all_stc.tar.gz EOF dir=$(dirname $0) dir=$(cd $dir;pwd -P) scid=$(cd $dir/..;pwd -P) -# source environment +# sourde environment . $dir/env if [ -f $dir/.tools_env ]; then @@ -26,13 +29,15 @@ pushd $SCID/install untar() { - file=$1 + here=$PWD + file=$1;shift + in=$1 if [ ! -f $file ]; then echo "$file not exists" if [ -f $dir/.tools_env ]; then bfile=$(basename $file) if [ -n "$PKG_URL" ]; then - wget -O /tmp/$bfile $PKG_URL/$file /tmp/$bfile + wget -inet4-only -O /tmp/$bfile $PKG_URL/$file /tmp/$bfile elif [ -n "$PKG_SERVER" ]; then sshpass -p $PKG_PASS scp -o StrictHostKeyChecking=no $PKG_USER@$PKG_SERVER:$PKG_ROOT/$file /tmp/$bfile fi @@ -43,7 +48,9 @@ untar() exit 1 fi fi - tar -zxf $file + [ -n "$in" ] && pushd $in + tar -zxf $here/$file + [ -n "$in" ] && popd } install_tcl64_85() @@ -77,139 +84,54 @@ install_python2() popd popd rm -rf ActivePython-2.7.14.2717-linux-x86_64-glibc-2.12-404899 + export SCID_PYTHON_BIN="" + export SPYTEST_PYTHON_VERSION=2.7.14 $dir/upgrade_requirements.sh fi popd } -reinstall_python2() -{ - src="2.7.14" - pushd $SCID/tools/ActivPython - dst=$(readlink current) - rm -rf $src.old - mv $src $src.old - if [ "$dst" = "$src" ]; then - rm current; ln -s $src.old current - fi - popd - install_python2 - pushd $SCID/tools/ActivPython - if [ "$dst" = "$src" ]; then - rm current;ln -s $src current - fi - rm -rf $src.old - popd - $dir/upgrade_requirements.sh -} - -install_python366() -{ - pushd $SCID/install - INSTALL=$SCID/tools/ActivPython/3.6.6; rm -rf $INSTALL - untar ActivePython-3.6.6.3606-linux-x86_64-glibc-2.12.tar.gz - pushd ActivePython-3.6.6.3606-linux-x86_64-glibc-2.12-* - ./install.sh -v -I $INSTALL - pushd $SCID/tools/ActivPython - [ -f 3.6.6/bin/python ] || ln -s python3 3.6.6/bin/python - cp -rf $SCID/tools/ActivTcl/current/lib/tclx8.4/ 3.6.6/lib/ - popd - popd - rm -rf ActivePython-3.6.6.3606-linux-x86_64-glibc-2.12-* - export SPYTEST_PYTHON_VERSION=3.6.6 - $dir/upgrade_requirements.sh - popd -} - -install_python371() +install_python3() { pushd $SCID/install - INSTALL=$SCID/tools/ActivPython/3.7.1; rm -rf $INSTALL - untar ActivePython-3.7.1.0000-linux-x86_64-glibc-2.12-b2ae37a5.tar.gz - pushd ActivePython-3.7.1.0000-linux-x86_64-glibc-2.12-* + INSTALL=$SCID/tools/ActivPython/3.8.8; rm -rf $INSTALL + untar ActivePython-3.8.8.0000-linux-x86_64-glibc-2.17-5222f37a.tar.gz + pushd ActivePython-3.8.8.0000-linux-x86_64-glibc-2.17-* ./install.sh -v -I $INSTALL pushd $SCID/tools/ActivPython - [ -f 3.7.1/bin/python ] || ln -s python3 3.7.1/bin/python - cp -rf $SCID/tools/ActivTcl/current/lib/tclx8.4/ 3.7.1/lib/ + [ -f 3.8.8/bin/python ] || ln -s python3 3.8.8/bin/python + cp -rf $SCID/tools/ActivTcl/current/lib/tclx8.4/ 3.8.8/lib/ popd popd - rm -rf ActivePython-3.7.1.0000-linux-x86_64-glibc-2.12-* - export SPYTEST_PYTHON_VERSION=3.7.1 + rm -rf ActivePython-3.8.8.3606-linux-x86_64-glibc-2.12-* + export SCID_PYTHON_BIN="" + export SPYTEST_PYTHON_VERSION=3.8.8 $dir/upgrade_requirements.sh popd } -install_python3() -{ - install_python366 - #install_python371 -} - -reinstall_python3xx() -{ - src=$1 - pushd $SCID/tools/ActivPython - dst=$(readlink current) - rm -rf $src.old - mv $src $src.old - if [ "$dst" = "$src" ]; then - rm current; ln -s $src.old current - fi - popd - [ src = "3.6.6" ] && install_python366 - [ src = "3.7.1" ] && install_python371 - pushd $SCID/tools/ActivPython - if [ "$dst" = "$src" ]; then - rm current;ln -s $src current - fi - rm -rf $src.old - popd -} - -reinstall_python3() -{ - reinstall_python3xx "3.6.6" - #reinstall_python3xx "3.7.1" -} - -install_ixia_842() -{ - mkdir -p $SCID/tgen/ixia/ - pushd $SCID/tgen/ixia/ - rm -f 8.42 - untar IXIA_8.42EA.tar.gz - ln -s IXIA_8.42EA 8.42 - popd -} - install_ixia_all() { - mkdir -p $SCID/tgen/ixia/ - pushd $SCID/tgen/ixia/ - rm -f all - untar all_ixia.tar.gz + pushd $SCID/install + rm -rf $SCID/tgen/ixia/all + untar all_ixia.tar.gz $SCID/tgen/ixia popd } -install_stc_491() +install_stc_all() { - mkdir -p $SCID/tgen/stc/ - pushd $SCID/tgen/stc/ - untar Spirent_TestCenter_4.91.tar.gz - ln -s Spirent_TestCenter_4.91 4.91 + pushd $SCID/install + rm -rf $SCID/tgen/stc + mkdir $SCID/tgen/stc + untar all_stc.tar.gz $SCID/tgen/stc popd } install_tcl64_85 install_python2 -install_python3 +#install_python3 if [ -f $dir/.tools_env ]; then - #install_ixia_842 install_ixia_all - #install_stc_491 + install_stc_all fi - -#reinstall_python3 -#reinstall_python2 - diff --git a/spytest/bin/ubuntu18_deps.sh b/spytest/bin/ubuntu18_deps.sh index 8bf34a53ed..126b70c1ca 100755 --- a/spytest/bin/ubuntu18_deps.sh +++ b/spytest/bin/ubuntu18_deps.sh @@ -3,3 +3,8 @@ sudo apt install -y python python-pip sudo apt install -y tcl8.5 tclx sudo apt install -y iputils-ping +sudo apt install -y snmp +sudo apt install -y libnet-snmp-perl +sudo apt install -y libcrypt-des-perl +sudo apt install -y libdigest-hmac-perl +sudo apt install -y libcrypt-rijndael-perl diff --git a/spytest/bin/upgrade_requirements.sh b/spytest/bin/upgrade_requirements.sh index 3639a26d6b..6e132a9c0f 100755 --- a/spytest/bin/upgrade_requirements.sh +++ b/spytest/bin/upgrade_requirements.sh @@ -1,22 +1,139 @@ -#!/bin/sh +#!/bin/bash -dir=$(dirname $0) +#bsub -q sj-slx -m lc-lvn-0291 -Is bash -# sourde environment +mkdir -p /tmp/$USER +exec &> >(tee /tmp/$USER/upgrade-$SPYTEST_PYTHON_VERSION-requirements.log) + +dir=$(cd $(dirname $0);pwd -P) + +# source environment . $dir/env -export CC=gcc -export CPP=cpp -export CXX=c++ +export PATH=/tools/bin:$PATH +export GCCVER=5.4.0 +export CC=${PREFIX}gcc +export CPP=${PREFIX}cpp +export CXX=${PREFIX}c++ export LIBS= -export LDSHARED="gcc -pthread -shared" +export LDSHARED="${PREFIX}gcc -pthread -shared" export PYMSSQL_BUILD_WITH_BUNDLED_FREETDS=1 -$SPYTEST_PYTHON -m pip install -r $dir/requirements0.txt -#$SPYTEST_PYTHON -m pip install -r $dir/requirements1.txt +TMPFILE=$(mktemp) + +cat << EOF > $TMPFILE.1 +readline +pyfiglet +textfsm +pytest-timeout +gitpython +ansible-core +jinja2 +psutil +prettytable +tabulate +pycryptodome +natsort +redis +requests +jsonpatch +rpyc +Pyro4 +netaddr +# GNMI +crc16 +autoflake +pyang +pyparsing +pyangbind +yapf +#yabgp +pexpect +pytz +docker==2.7.0 +pdbpp +pylint-pytest +pylint-protobuf +ruff +EOF + +cat << EOF > $TMPFILE.2 +pylint>=1.9.5 +paramiko==2.11.0 +netmiko==2.4.2 +pytest>=4.4.1,<=4.6.5 +pytest-xdist==1.28.0 +future>=0.16.0 +cryptography >= 2.5 +scapy==2.4.3rc1 +# GNMI +protobuf>=3.15.6,<=3.20.* +deepdiff==3.3.0 +grpcio>=1.8.3,<=1.20.1 +grpcio-tools>=1.8.3,<=1.20.1 +regex<2022.1.18 +EOF + +cat << EOF > $TMPFILE.3 +pylint>=2.7.2 +netmiko>=3.3.3,<=3.4.0 +pytest<=6.2.5 +pytest-xdist<=2.5.0 +future>=0.16.0 +cryptography >= 2.5 +scapy>=2.4.4 +# GNMI +protobuf>=3.15.6,<=3.20.3 +pyopenssl +deepdiff>=5.5.0 +grpcio>=1.8.3 +grpcio-tools>=1.8.3 +black +pyenchant +sys-prctl +openpyxl +EOF + +$SPYTEST_PYTHON -V 2>&1 | grep "Python 3" +if [ $? -eq 0 ]; then + is_v3=1 +else + is_v3=0 +fi + +if [ $is_v3 -eq 0 ]; then + cat $TMPFILE.1 $TMPFILE.2 > $TMPFILE.0 +else + cat $TMPFILE.1 $TMPFILE.3 > $TMPFILE.0 +fi + +#$SPYTEST_PYTHON -m pip install python-jenkins; exit 0 + +#INSTALL_OPTS="--upgrade --force-reinstall" +#INSTALL_OPTS="$INSTALL_OPTS --verbose" +$SPYTEST_PYTHON -m pip install --upgrade pip +#$SPYTEST_PYTHON -m pip install --upgrade pyopenssl +#$SPYTEST_PYTHON -m pip uninstall -y regex bitarray pyang pyangbind +$SPYTEST_PYTHON -m pip --no-cache-dir install $INSTALL_OPTS wheel +$SPYTEST_PYTHON -m pip --no-cache-dir install $INSTALL_OPTS sqlite3 +$SPYTEST_PYTHON -m pip --no-cache-dir install $INSTALL_OPTS -r $TMPFILE.0 + +if [ $is_v3 -ne 0 ]; then + $SPYTEST_PYTHON -m pip install --upgrade git+https://github.com/sachinholla/pyangbind.git@0.8.1+spytest.20220216#egg=pyangbind + + VERSION=$($SPYTEST_PYTHON -V 2>&1 | cut -d\ -f 2) + VERSION=(${VERSION//./ }) + if [[ ${VERSION[0]} -ge 3 ]] && [[ ${VERSION[1]} -ge 9 ]] ; then + $SPYTEST_PYTHON -m pip uninstall -y pytest pytest-xdist + $SPYTEST_PYTHON -m pip install --no-input pytest-xdist==1.28.0 pytest==5.4.3 + else + $SPYTEST_PYTHON -m pip install --upgrade git+https://github.com/ramakristipatibrcm/pytest-xdist.git@spytest#egg=pytest-xdist + fi +fi + +read -rs -n1 -t30 -p "Press any key or wait to continue ..." if [ -d $SCID_PYTHON_BIN ]; then $SPYTEST_PYTHON -m compileall $SCID_PYTHON_BIN/.. chmod -R go+r $SCID_PYTHON_BIN/.. fi - diff --git a/spytest/utilities/common.py b/spytest/utilities/common.py index ea948ac696..979b90ddb9 100644 --- a/spytest/utilities/common.py +++ b/spytest/utilities/common.py @@ -3,6 +3,7 @@ import sys import csv import glob +import time import base64 import random import socket @@ -25,29 +26,26 @@ from . import json_helpers as jsonutil +from . import ctrl_chars + if sys.version_info[0] >= 3: unicode = str basestring = str -def to_ascii(msg): - msg = re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', ' ', msg) - msg = re.sub(r'[^\x00-\x7F]+', ' ', msg) - try: - return msg.encode('ascii', 'ignore').decode('ascii') - except Exception as exp: - print(str(exp)) - return "non-ascii characters" - -def list_files_tree(dir_path, pattern="*"): +def list_files_tree(dir_path, pattern="*", recursive=True): matches = [] - for root, _, filenames in os.walk(dir_path): + if recursive: + res = os.walk(dir_path) + else: + res = [next(os.walk(dir_path))] + for root, _, filenames in res: for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) return matches -def list_files(entry, pattern="*"): +def list_files(entry, pattern="*", recursive=True): if os.path.isdir(entry): - return list_files_tree(entry, pattern) + return list_files_tree(entry, pattern, recursive) if os.path.isfile(entry): return [entry] return glob.glob(entry) @@ -63,14 +61,25 @@ def find_file(filename, paths=[]): return filename1 return None +def grep_file(filepath, regex, first=False): + regObj = re.compile(regex) + res = [] + with open(filepath) as f: + for line in f: + if regObj.match(line): + res.append(line) + if first: break + return res + def ensure_folder(path): path = os.path.abspath(path) if not os.path.exists(path): os.makedirs(path) + return path def ensure_parent(filename): path = os.path.dirname(filename) - ensure_folder(path) + return ensure_folder(path) def open_file(filename, mode="r"): @@ -82,26 +91,41 @@ def open_file(filename, mode="r"): return open(filename, mode, newline='') +def delete_folder(folder): + try: shutil.rmtree(folder) + except Exception: pass + def delete_file(filename): if os.path.exists(filename): os.remove(filename) return True return False +def copyfile(src, dst, check=True): + if check: ensure_folder(dst) + shutil.copy2(src, dst) + +def copy_file(src, dst, check=True): + copyfile(src, dst, check) + +def rename_file(src, dst): + ensure_parent(dst) + if os.path.exists(src): + shutil.move(src, dst) + def copytree(src, dst, symlinks=False, ignore=None): - ensure_folder(dst) for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): copytree(s, d, symlinks, ignore) else: - shutil.copy2(s, d) + copyfile(s, d, False) def write_file(filename, data, mode="w"): if not filename: return data ensure_parent(filename) - try: data2 = to_ascii(data) + try: data2 = ctrl_chars.tostring(data) except Exception: data2 = data fh = open(filename, mode) fh.write(data2) @@ -129,7 +153,6 @@ def make_list2(*args): for a in arg: if a is not None: retval.append(a) - retval.extend(arg) else: retval.append(arg) return retval @@ -168,9 +191,8 @@ def match_entry(ent, match): def select_entry(ent, select): newd = dict() for col in select: - if col not in ent: - return None - newd[col] = ent[col] + if col in ent: + newd[col] = ent[col] return newd # collect the matched/all entries @@ -206,7 +228,7 @@ def sprint_data(d, msg=""): return rv def print_data(d, msg=""): - print(sprint_data(d, msg)) + print (sprint_data(d, msg)) def sprint_yaml(d, msg="", default_flow_style=False): rv = "========================{}===========================\n".format(msg) @@ -215,7 +237,7 @@ def sprint_yaml(d, msg="", default_flow_style=False): return rv def print_yaml(d, msg="", default_flow_style=False): - print(sprint_yaml(d, msg, default_flow_style)) + print (sprint_yaml(d, msg, default_flow_style)) def random_integer(min=0, max=10): return random.randint(min, max) @@ -236,7 +258,7 @@ def random_password(slen=10): def random_vlan_list(count=1, exclude=[]): retval = [] while count > 0: - val = random.randint(2, 4094) + val = random.randint(2, 3966) if exclude and val in exclude: pass elif val not in retval: @@ -247,12 +269,30 @@ def random_vlan_list(count=1, exclude=[]): def get_proc_name(): return sys._getframe(1).f_code.co_name +def get_location(lvl=0): + callerframerecord = inspect.stack()[lvl+1] + frame = callerframerecord[0] + finfo = inspect.getframeinfo(frame) + return "{}:{}".format(os.path.basename(finfo.filename), finfo.lineno) + def get_line_number(lvl=0): cf = inspect.currentframe() for _ in range(lvl): if cf.f_back: cf = cf.f_back - return cf.f_back.f_lineno + return cf.f_back.f_lineno if cf.f_back else 0 + +def get_line_numbers(lvl=0, count=4): + lines = [] + for _ in range(count): + lvl = lvl + 1 + line = get_line_number(lvl) + if line == 0: + break + lines.append(get_line_number(lvl)) + if count == 1: + return lines[0] + return "/".join([str(line) for line in lines]) def trace(fmt, *args): sys.stdout.write(fmt % args) @@ -278,43 +318,65 @@ def copy_items(src, dst, include=None, exclude=None): if k not in exclude: dst[k] = v +def is_unicode_string(arg): + return bool(isinstance(arg, (unicode, str, bytes))) + def is_unicode(arg): return bool(isinstance(arg, unicode)) +def to_unicode(arg): + return unicode(arg) + def is_basestring(arg): return bool(isinstance(arg, basestring)) def do_eval(arg): + # nosemgrep-next-line return eval(arg) -def ipcheck(addr): +def ipcheck(addr, max_attempts=1, logf=None, addr_type="", sleep=0): + for attempt in range(1, max_attempts+1): + try: + subprocess.check_output(["ping", "-c", "2", "-w", "2", str(addr)]) + return True + except subprocess.CalledProcessError as exp: + if logf: + msg = "{}IP {} is not reachable - attempt {} {}" + logf(msg.format(addr_type, addr, attempt, str(exp))) + if attempt <= max_attempts: + time.sleep(sleep) + return False + +def urlcheck(url): + data = parse_url(url) + from http.client import HTTPConnection + conn = HTTPConnection(data["netloc"]) try: - subprocess.check_output(["ping", "-c", "1", "-w", "2", str(addr)]) - return True - except subprocess.CalledProcessError: - return False + conn.request('HEAD', data["path"]) + res = conn.getresponse() + return bool(res.status == 200), None + except Exception as exp: + return False, exp def sprintf(fmt, *args): return fmt % args -def md5(fname): - hash_md5 = hashlib.md5() - with open(fname, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) +def md5(fname, data=None): + # nosemgrep-next-line + hash_md5 = hashlib.md5(data or b"") + if fname: + with open(fname, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) return hash_md5.hexdigest() -def str_encode(s): - if sys.version_info[0] >= 3: - rv = str.encode(s) - return rv - return s +def str_encode(s, etype="ascii"): + try: return s.encode(etype) + except Exception: return s -def str_decode(s): - if sys.version_info[0] >= 3: - rv = s.decode() if s else s - return rv - return s +def str_decode(s, etype="ascii"): + try: return s.decode(etype) + except Exception: return s def b64encode(file_path): fh = open_file(file_path) @@ -460,25 +522,60 @@ def split_byall(text, tostr=False, sep=",;"): retval.append(ent) return retval -def read_lines(filepath, strip=True): - fh = open(filepath, 'r') - data = fh.readlines() - fh.close() +def read_lines(filepath, strip=True, default=[]): + try: + fh = open(filepath, 'r') + data = fh.readlines() + fh.close() + except Exception: + data = default if strip: data = map(str.strip, data) else: data = map(str, data) - return data + return list(data) def find_duplicate(items): retval, unique = [], [] - for item in items: + for item in items or []: if item not in unique: unique.append(item) else: retval.append(item) return retval, unique +def remove_duplicates(*args): + for arg in args: + _, unique = find_duplicate(arg) + del arg[:] + arg.extend(unique) + +def list_flatten(l, rv=None): + rv = rv or [] + for i in l: + if isinstance(i, list): + list_flatten(i, rv) + else: + rv.append(i) + return rv + +def list_insert(lst, *args): + rv = list(lst) + for arg in args: + if isinstance(arg, list): + tmp = list_flatten(arg) + else: + tmp = [arg] + for i in tmp: + if i not in rv: + rv.append(i) + return rv + +def list_append(lst, *args): + for arg in args: + if arg not in lst: + lst.append(arg) + def read_csv(filepath): rows = [] try: @@ -501,6 +598,7 @@ def write_csv_writer(cols, rows, writer, append=False): writer.writerow(d) def write_csv_file(cols, rows, filepath, append=False): + ensure_parent(filepath) if sys.version_info.major < 3: mode = "ab" if append else "wb" fd = open(filepath, mode) @@ -565,51 +663,120 @@ def write_html_table2(cols, rows, filepath=None, links=None, colors=None, color_ html = j2_apply(template, cols=cols, rows=l_rows, row_css=row_css, cell_css=cell_css) return write_file(filepath, html) +def get_cdn_base(cdn=None): + cdn0 = "https://cdn.datatables.net/v/dt/jq-3.6.0/dt-1.12.1/b-2.2.3/b-colvis-2.2.3/b-html5-2.2.3/fh-3.2.4/" + return cdn0 if cdn is None else cdn + +def copy_web_include(dst_path): + web_incl_path = os.path.join(os.path.dirname(__file__), "web") + copyfile(os.path.join(web_incl_path, "datatables.min.css"), dst_path) + copyfile(os.path.join(web_incl_path, "datatables.min.js"), dst_path) + # links, colors and align are dictionaries or None # where key is column name or None and value is list of links/colors/align # None key is used for entire row # text-align None=center, True=Left, False=Right -def write_html_table3(cols, rows, filepath=None, links=None, colors=None, align=None, total=True): - js_tmpl = textwrap.dedent("""\ +# total data in rows None: not present True: Last row False: First row +# total_pos False: Head True: FOOT None: Hide +def write_html_table3(cols, rows, filepath=None, links=None, colors=None, + align=None, total=True, total_pos=False, addl_cols=None, + cdn=None, fixedHeader=None): + cdn = get_cdn_base(cdn) + js_tmpl = textwrap.dedent(r""" - - - - - - - + +