From 781ae9f57174764d12fea752fcb313c7c701f2c1 Mon Sep 17 00:00:00 2001 From: Jing Kan Date: Mon, 18 Apr 2022 12:54:34 +0800 Subject: [PATCH 01/20] [config] Do not enable pfcwd for BmcMgmtToRRouter (#2136) Signed-off-by: Jing Kan jika@microsoft.com --- config/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index ed38bcc841..c3415fbd11 100644 --- a/config/main.py +++ b/config/main.py @@ -1604,7 +1604,7 @@ def load_minigraph(db, no_service_restart): # get the device type device_type = _get_device_type() - if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'EPMS': + if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'BmcMgmtToRRouter' and device_type != 'EPMS': clicommon.run_command("pfcwd start_default", display_cmd=True) # Write latest db version string into db From f9dc6811dba5ac96136a98bab9cd7387de6bda9b Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Mon, 18 Apr 2022 13:15:27 +0800 Subject: [PATCH 02/20] [intfutil] Display RJ45 port and portchannel speed in 'M' instead of 'G' when it's <= 1000M (#2110) * enhance port speed for RJ45 ports Signed-off-by: Kebo Liu * fix speed notation for the speed of LAG port based on RJ45 ports * on RJ45 ports display speed with unit 'M' when it's <= 1000M Signed-off-by: Kebo Liu --- scripts/intfutil | 26 ++++++++++-- tests/intfutil_test.py | 28 +++++++++---- tests/mock_tables/appl_db.json | 46 ++++++++++++++++++++ tests/mock_tables/config_db.json | 10 ++--- tests/mock_tables/state_db.json | 72 ++++++++++++++++++++++++++++++++ tests/tpid_test.py | 4 ++ 6 files changed, 169 insertions(+), 17 deletions(-) diff --git a/scripts/intfutil b/scripts/intfutil index 5d8e743262..784140aced 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -48,6 +48,7 @@ PORT_ADV_SPEEDS = 'adv_speeds' PORT_INTERFACE_TYPE = 'interface_type' PORT_ADV_INTERFACE_TYPES = 'adv_interface_types' PORT_TPID = "tpid" +OPTICS_TYPE_RJ45 = 'RJ45' VLAN_SUB_INTERFACE_SEPARATOR = "." VLAN_SUB_INTERFACE_TYPE = "802.1q-encapsulation" @@ -131,6 +132,19 @@ def appl_db_sub_intf_keys_get(appl_db, sub_intf_list, sub_intf_name): return appl_db_sub_intf_keys +def appl_db_port_speed_parse(in_speed, optics_type): + """ + Parse the speed received from application DB + """ + # fetched speed is in megabits per second + speed = int(in_speed) + if optics_type == OPTICS_TYPE_RJ45 and speed <= 1000: + out_speed = '{}M'.format(speed) + else: + out_speed = '{}G'.format(int(speed/1000)) + + return out_speed + def appl_db_port_status_get(appl_db, intf_name, status_type): """ Get the port status @@ -140,12 +154,14 @@ def appl_db_port_status_get(appl_db, intf_name, status_type): if status is None: return "N/A" if status_type == PORT_SPEED and status != "N/A": - status = '{}G'.format(status[:-3]) + optics_type = state_db_port_optics_get(appl_db, intf_name, PORT_OPTICS_TYPE) + status = appl_db_port_speed_parse(status, optics_type) elif status_type == PORT_ADV_SPEEDS and status != "N/A" and status != "all": + optics_type = state_db_port_optics_get(appl_db, intf_name, PORT_OPTICS_TYPE) speed_list = status.split(',') new_speed_list = [] for s in natsorted(speed_list): - new_speed_list.append('{}G'.format(s[:-3])) + new_speed_list.append(appl_db_port_speed_parse(s, optics_type)) status = ','.join(new_speed_list) return status @@ -284,16 +300,18 @@ def po_speed_dict(po_int_dict, appl_db): # If no speed was returned, append None without format po_list.append(None) else: - interface_speed = '{}G'.format(interface_speed[:-3]) + optics_type = state_db_port_optics_get(appl_db, value[0], PORT_OPTICS_TYPE) + interface_speed = appl_db_port_speed_parse(interface_speed, optics_type) po_list.append(interface_speed) elif len(value) > 1: for intf in value: temp_speed = port_oper_speed_get_raw(appl_db, intf) + optics_type = state_db_port_optics_get(appl_db, intf, PORT_OPTICS_TYPE) temp_speed = int(temp_speed) if temp_speed else 0 agg_speed_list.append(temp_speed) interface_speed = sum(agg_speed_list) interface_speed = str(interface_speed) - interface_speed = '{}G'.format(interface_speed[:-3]) + interface_speed = appl_db_port_speed_parse(interface_speed, optics_type) po_list.append(interface_speed) po_speed_dict = dict(po_list[i:i+2] for i in range(0, len(po_list), 2)) return po_speed_dict diff --git a/tests/intfutil_test.py b/tests/intfutil_test.py index cfca1dbd6d..0129573881 100644 --- a/tests/intfutil_test.py +++ b/tests/intfutil_test.py @@ -14,7 +14,11 @@ Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC --------------- --------------- ------- ----- ----- --------- --------------- ------ ------- --------------- ---------- Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off + Ethernet16 16 100M 9100 N/A etp5 trunk up up RJ45 off + Ethernet24 24 1G 9100 N/A etp6 trunk up up QSFP28 or later off + Ethernet28 28 1000M 9100 N/A etp8 trunk up up RJ45 off Ethernet32 13,14,15,16 40G 9100 rs etp9 PortChannel1001 up up N/A off + Ethernet36 9,10,11,12 10M 9100 N/A etp10 routed up up RJ45 off Ethernet112 93,94,95,96 40G 9100 rs etp29 PortChannel0001 up up N/A off Ethernet116 89,90,91,92 40G 9100 rs etp30 PortChannel0002 up up N/A off Ethernet120 101,102,103,104 40G 9100 rs etp31 PortChannel0003 up up N/A off @@ -33,14 +37,18 @@ """ show_interface_description_output="""\ - Interface Oper Admin Alias Description ------------ ------ ------- --------- -------------------- - Ethernet0 down up Ethernet0 ARISTA01T2:Ethernet1 - Ethernet32 up up etp9 Servers7:eth0 -Ethernet112 up up etp29 ARISTA01T1:Ethernet1 -Ethernet116 up up etp30 ARISTA02T1:Ethernet1 -Ethernet120 up up etp31 ARISTA03T1:Ethernet1 -Ethernet124 up up etp32 ARISTA04T1:Ethernet1 + Interface Oper Admin Alias Description +----------- ------ ------- --------- --------------------- + Ethernet0 down up Ethernet0 ARISTA01T2:Ethernet1 + Ethernet16 up up etp5 ARISTA04T1:Ethernet16 + Ethernet24 up up etp6 ARISTA02T1:Ethernet24 + Ethernet28 up up etp8 ARISTA03T1:Ethernet28 + Ethernet32 up up etp9 Servers7:eth0 + Ethernet36 up up etp10 Servers8:eth0 +Ethernet112 up up etp29 ARISTA01T1:Ethernet1 +Ethernet116 up up etp30 ARISTA02T1:Ethernet1 +Ethernet120 up up etp31 ARISTA03T1:Ethernet1 +Ethernet124 up up etp32 ARISTA04T1:Ethernet1 """ show_interface_description_Ethernet0_output="""\ @@ -66,7 +74,11 @@ Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin ----------- --------------- ------- ------------ ------ ----------- ------ ------- Ethernet0 enabled 25G 10G,50G CR4 CR4,CR2 down up + Ethernet16 N/A 100M N/A N/A N/A up up + Ethernet24 N/A 1G N/A N/A N/A up up + Ethernet28 N/A 1000M N/A N/A N/A up up Ethernet32 disabled 40G all N/A all up up + Ethernet36 N/A 10M N/A N/A N/A up up Ethernet112 N/A 40G N/A N/A N/A up up Ethernet116 N/A 40G N/A N/A N/A up up Ethernet120 N/A 40G N/A N/A N/A up up diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 0da2ec6c1d..81fa7c7297 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -41,6 +41,52 @@ "adv_interface_types": "CR4,CR2", "autoneg": "on" }, + "PORT_TABLE:Ethernet16": { + "index": "4", + "lanes": "16", + "alias": "etp5", + "description": "ARISTA04T1:Ethernet16", + "speed": "100", + "oper_status": "up", + "pfc_asym": "off", + "mtu": "9100", + "admin_status": "up" + }, + "PORT_TABLE:Ethernet36": { + "index": "9", + "lanes": "9,10,11,12", + "alias": "etp10", + "description": "Servers8:eth0", + "speed": "10", + "oper_status": "up", + "pfc_asym": "off", + "mtu": "9100", + "tpid": "0x8100", + "admin_status": "up" + }, + "PORT_TABLE:Ethernet24": { + "index": "6", + "lanes": "24", + "alias": "etp6", + "description": "ARISTA02T1:Ethernet24", + "speed": "1000", + "oper_status": "up", + "pfc_asym": "off", + "mtu": "9100", + "tpid": "0x8100", + "admin_status": "up" + }, + "PORT_TABLE:Ethernet28": { + "index": "7", + "lanes": "28", + "alias": "etp8", + "description": "ARISTA03T1:Ethernet28", + "speed": "1000", + "oper_status": "up", + "pfc_asym": "off", + "mtu": "9100", + "admin_status": "up" + }, "PORT_TABLE:Ethernet32": { "index": "8", "lanes": "13,14,15,16", diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 2f4002cb9f..4d01db96ad 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -70,11 +70,11 @@ "alias": "etp5", "description": "Servers3:eth0", "index": "4", - "lanes": "45,46,47,48", + "lanes": "16", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "40000" + "speed": "100" }, "PORT|Ethernet20": { "admin_status": "up", @@ -96,7 +96,7 @@ "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "40000" + "speed": "1000" }, "PORT|Ethernet28": { "admin_status": "up", @@ -107,7 +107,7 @@ "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "40000" + "speed": "1000" }, "PORT|Ethernet32": { "admin_status": "up", @@ -129,7 +129,7 @@ "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", - "speed": "40000" + "speed": "10" }, "PORT|Ethernet40": { "admin_status": "up", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 958b7a8e03..2a5ab76181 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -133,6 +133,78 @@ "txpowerlowalarm": "-10.5012", "txpowerlowwarning": "-7.5007" }, + "TRANSCEIVER_INFO|Ethernet16": { + "type": "RJ45", + "hardware_rev": "N/A", + "serial": "N/A", + "manufacturer": "N/A", + "model": "N/A", + "vendor_oui": "N/A", + "vendor_date": "N/A", + "connector": "N/A", + "encoding": "N/A", + "ext_identifier": "N/A", + "ext_rateselect_compliance": "N/A", + "cable_type": "N/A", + "cable_length": "N/A", + "specification_compliance": "N/A", + "nominal_bit_rate": "N/A", + "application_advertisement": "N/A" + }, + "TRANSCEIVER_INFO|Ethernet36": { + "type": "RJ45", + "hardware_rev": "N/A", + "serial": "N/A", + "manufacturer": "N/A", + "model": "N/A", + "vendor_oui": "N/A", + "vendor_date": "N/A", + "connector": "N/A", + "encoding": "N/A", + "ext_identifier": "N/A", + "ext_rateselect_compliance": "N/A", + "cable_type": "N/A", + "cable_length": "N/A", + "specification_compliance": "N/A", + "nominal_bit_rate": "N/A", + "application_advertisement": "N/A" + }, + "TRANSCEIVER_INFO|Ethernet24": { + "type": "QSFP28 or later", + "hardware_rev": "AC", + "serial": "MT1706FT02066", + "manufacturer": "Mellanox", + "model": "MFA1A00-C003", + "vendor_oui": "00-02-c9", + "vendor_date": "2017-01-13 ", + "connector": "No separable connector", + "encoding": "64B66B", + "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", + "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", + "cable_type": "Length Cable Assembly(m)", + "cable_length": "3", + "specification_compliance": "{'10/40G Ethernet Compliance Code': '40G Active Cable (XLPPI)'}", + "nominal_bit_rate": "255", + "application_advertisement": "N/A" + }, + "TRANSCEIVER_INFO|Ethernet28": { + "type": "RJ45", + "hardware_rev": "N/A", + "serial": "N/A", + "manufacturer": "N/A", + "model": "N/A", + "vendor_oui": "N/A", + "vendor_date": "N/A", + "connector": "N/A", + "encoding": "N/A", + "ext_identifier": "N/A", + "ext_rateselect_compliance": "N/A", + "cable_type": "N/A", + "cable_length": "N/A", + "specification_compliance": "N/A", + "nominal_bit_rate": "N/A", + "application_advertisement": "N/A" + }, "TRANSCEIVER_STATUS|Ethernet0": { "status": "67", "error": "Blocking Error|High temperature" diff --git a/tests/tpid_test.py b/tests/tpid_test.py index 0c3ca4feee..b4c56ff084 100644 --- a/tests/tpid_test.py +++ b/tests/tpid_test.py @@ -48,7 +48,11 @@ Interface Alias Oper Admin TPID --------------- --------- ------ ------- ------ Ethernet0 Ethernet0 down up 0x9200 + Ethernet16 etp5 up up N/A + Ethernet24 etp6 up up 0x8100 + Ethernet28 etp8 up up N/A Ethernet32 etp9 up up 0x8100 + Ethernet36 etp10 up up 0x8100 Ethernet112 etp29 up up 0x8100 Ethernet116 etp30 up up 0x8100 Ethernet120 etp31 up up 0x8100 From 29771e7f8dd6bbf7caa56de92c03fe82e45364fb Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:28:30 +0300 Subject: [PATCH 03/20] [techsupport] improve robustness (#2117) - What I did Execute the command in a separate bash process under timeout. Due to with '--foreground' the child processes are not killed and still have the file descriptor opened to piped process which will basically hang forever. From the "timeout" man page: --foreground when not running timeout directly from a shell prompt, allow COMMAND to read from the TTY and get TTY signals; in this mode, children of COMMAND will not be timed out So, a command like this: timeout --foreground 5s some_process_that_spawn_childs_that_produce_output | tee some_file Will actually hang forever. The '--foreground' option is still needed. So the proposed solution is to run that construct in another bash process: timeout --foreground 5s bash -c "some_process_that_spawn_childs_that_produce_output | tee some_file" Moved hw-mgmt-dump to collect_mellanox function and execute it under timeout. Handle global timeout: When global timeout is reached the SIGTERM is sent to generate_dump process, upon SIGTERM the generate_dump process will finalize its work by creating a dump of what already has been collected, providing a dump even when timeout is reached. The generate_dump will be killed in case it cannot create dump after initial SIGTERM. - How I did it I implemented the above mentioned features. - How to verify it Artificially hang the FW, execute "show techsupport" and verify even in case timeout is reached the dump is generated. Signed-off-by: Stepan Blyschak --- scripts/generate_dump | 130 +++++++++++++++++++++++++------------- show/main.py | 21 +++--- tests/techsupport_test.py | 24 +++++++ 3 files changed, 118 insertions(+), 57 deletions(-) create mode 100644 tests/techsupport_test.py diff --git a/scripts/generate_dump b/scripts/generate_dump index 85a7f6a057..9223314955 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -13,8 +13,12 @@ EXT_RECVSIG=3 EXT_RETRY=4 EXT_TAR_FAILED=5 EXT_PROCFS_SAVE_FAILED=6 +EXT_INTERRUPTED=7 +EXT_TERMINATED=8 EXT_INVALID_ARGUMENT=10 +TIMEOUT_EXIT_CODE=124 + TAR=tar MKDIR=mkdir RM=rm @@ -61,6 +65,8 @@ rm_lock_and_exit() handle_exit() { ECODE=$? + echo "Cleaning up working directory $TARDIR" + $RM -rf $TARDIR echo "Removing lock. Exit: $ECODE" >&2 $RM $V -rf ${LOCKDIR} # Echo the filename as the last statement if the generation succeeds @@ -69,11 +75,16 @@ handle_exit() fi } -handle_signal() +handle_sigint() { echo "Generate Dump received interrupt" >&2 - $RM $V -rf $TARDIR - exit $EXT_RECVSIG + exit $EXT_INTERRUPTED +} + +handle_sigterm() { + echo "Dump generation terminated" >&2 + finalize + exit $EXT_TERMINATED } handle_error() { @@ -83,6 +94,10 @@ handle_error() { fi } +escape_quotes() { + echo $1 | sed 's/\"/\\\"/g' +} + save_bcmcmd() { trap 'handle_error $? $LINENO' ERR local start_t=$(date +%s%3N) @@ -93,6 +108,7 @@ save_bcmcmd() { local do_gzip=${3:-false} local tarpath="${BASE}/dump/$filename" local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" + local cmd=$(escape_quotes "$cmd") if [ ! -d $LOGDIR ]; then $MKDIR $V -p $LOGDIR fi @@ -106,12 +122,12 @@ save_bcmcmd() { # as one argument, e.g. vtysh -c "COMMAND HERE" needs to have # "COMMAND HERE" bunched together as 1 arg to vtysh -c if $NOOP; then - echo "${timeout_cmd} $cmd &> '${filepath}'" + echo "${timeout_cmd} bash -c \"${cmd}\" &> '${filepath}'" else ret=0 - eval "${timeout_cmd} $cmd" &> "${filepath}" || ret=$? + eval "${timeout_cmd} bash -c \"${cmd}\" &> '${filepath}'" || ret=$? if [ $ret -ne 0 ]; then - if [ $ret -eq 124 ]; then + if [ $ret -eq $TIMEOUT_EXIT_CODE ]; then echo "Command: $cmd timedout after ${TIMEOUT_MIN} minutes." else RC=0 @@ -207,6 +223,8 @@ save_cmd() { redirect_eval="" fi + local cmd=$(escape_quotes "$cmd") + local cleanup_method_declration=$(declare -f $cleanup_method) # eval required here to re-evaluate the $cmd properly at runtime # This is required if $cmd has quoted strings that should be bunched # as one argument, e.g. vtysh -c "COMMAND HERE" needs to have @@ -215,25 +233,29 @@ save_cmd() { tarpath="${tarpath}.gz" filepath="${filepath}.gz" # cleanup_method will run in a sub-shell, need declare it first - local cleanup_method_declration=$(declare -f $cleanup_method) local cmds="$cleanup_method_declration; $cmd $redirect_eval | $cleanup_method | gzip -c > '${filepath}'" if $NOOP; then echo "${timeout_cmd} bash -c \"${cmds}\"" else RC=0 eval "${timeout_cmd} bash -c \"${cmds}\"" || RC=$? - if [ $RC -ne 0 ]; then + if [ $RC -eq $TIMEOUT_EXIT_CODE ]; then echo "Command: $cmds timedout after ${TIMEOUT_MIN} minutes." + elif [ $RC -ne 0 ]; then + echo "Command: $cmds failed with RC $RC" fi fi else + local cmds="$cleanup_method_declration; $cmd | $cleanup_method $redirect '$filepath'" if $NOOP; then - echo "${timeout_cmd} $cmd | $cleanup_method $redirect '$filepath'" + echo "${timeout_cmd} bash -c \"${cmds}\"" else RC=0 - eval "${timeout_cmd} $cmd | $cleanup_method" "$redirect" "$filepath" || RC=$? - if [ $RC -ne 0 ]; then - echo "Command: $cmd timedout after ${TIMEOUT_MIN} minutes." + eval "${timeout_cmd} bash -c \"${cmds}\"" || RC=$? + if [ $RC -eq $TIMEOUT_EXIT_CODE ]; then + echo "Command: $cmds timedout after ${TIMEOUT_MIN} minutes." + elif [ $RC -ne 0 ]; then + echo "Command: $cmds failed with RC $RC" fi fi fi @@ -484,20 +506,20 @@ save_bgp_neighbor() { local asic_id=${1:-""} local ns=$(get_vtysh_namespace $asic_id) - neighbor_list_v4=$(${timeout_cmd} vtysh $ns -c "show ip bgp neighbors" | grep "BGP neighbor is" | awk -F '[, ]' '{print $4}') + neighbor_list_v4=$(${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}'") for word in $neighbor_list_v4; do save_cmd "vtysh $ns -c \"show ip bgp neighbors $word advertised-routes\"" "ip.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show ip bgp neighbors $word routes\"" "ip.bgp.neighbor.$word.rcv$asic_id" done - neighbor_list_v6=$(vtysh $ns -c "show bgp ipv6 neighbors" | grep "BGP neighbor is" | awk -F '[, ]' '{print $4}' | fgrep ':') + neighbor_list_v6=$(${timeout_cmd} bash -c "vtysh $ns -c 'show bgp ipv6 neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}' | fgrep ':'") for word in $neighbor_list_v6; do save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word advertised-routes\"" "ipv6.bgp.neighbor.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show bgp ipv6 neighbors $word routes\"" "ipv6.bgp.neighbor.$word.rcv$asic_id" done - vrf_list=`${timeout_cmd} vtysh $ns -c "show vrf" | awk -F" " '{print $2}'` + vrf_list=`${timeout_cmd} bash -c "vtysh $ns -c 'show vrf' | awk -F" " '{print \$2}'"` for vrf in $vrf_list; do - neighbor_list=`${timeout_cmd} vtysh $ns -c "show ip bgp vrf $vrf neighbors" | grep "BGP neighbor is" | awk -F '[, ]' '{print $4}'` + neighbor_list=`${timeout_cmd} bash -c "vtysh $ns -c 'show ip bgp vrf $vrf neighbors' | grep 'BGP neighbor is' | awk -F '[, ]' '{print \$4}'"` for word in $neighbor_list; do save_cmd "vtysh $ns -c \"show ip bgp vrf $vrf neighbors $word advertised-routes\"" "ip.bgp.neighbor.$vrf.$word.adv$asic_id" save_cmd "vtysh $ns -c \"show ip bgp vrf $vrf neighbors $word routes\"" "ip.bgp.neighbor.$vrf.$word.rcv$asic_id" @@ -737,7 +759,7 @@ save_platform_info() { save_cmd "show platform psustatus" "psustatus" save_cmd "show platform ssdhealth" "ssdhealth" save_cmd "show platform temperature" "temperature" - save_cmd "show platform fan" "fan" + save_cmd "show platform fan" "fan" fi } @@ -856,6 +878,7 @@ enable_logrotate() { ############################################################################### collect_mellanox() { trap 'handle_error $? $LINENO' ERR + local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" @@ -865,12 +888,12 @@ collect_mellanox() { copy_from_docker syncd $sai_dump_folder $sai_dump_folder echo "$sai_dump_folder" for file in `ls $sai_dump_folder`; do - save_file ${sai_dump_folder}/${file} sai_sdk_dump true + save_file ${sai_dump_folder}/${file} sai_sdk_dump true done ${CMD_PREFIX}rm -rf $sai_dump_folder ${CMD_PREFIX}docker exec syncd rm -rf $sai_dump_folder - + # Save SDK error dumps local sdk_dump_path=`${CMD_PREFIX}docker exec syncd cat /tmp/sai.profile|grep "SAI_DUMP_STORE_PATH"|cut -d = -f2` if [[ -d $sdk_dump_path ]]; then @@ -880,6 +903,26 @@ collect_mellanox() { done rm -rf /tmp/sdk-dumps fi + + # run 'hw-management-generate-dump.sh' script and save the result file + HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh + if [ -f "$HW_DUMP_FILE" ]; then + ${CMD_PREFIX}${timeout_cmd} /usr/bin/hw-management-generate-dump.sh $ALLOW_PROCESS_STOP + ret=$? + if [ $ret -ne 0 ]; then + if [ $ret -eq $TIMEOUT_EXIT_CODE ]; then + echo "hw-management dump timedout after ${TIMEOUT_MIN} minutes." + else + echo "hw-management dump failed ..." + fi + else + save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false + rm -f /tmp/hw-mgmt-dump* + fi + else + echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" + fi + } ############################################################################### @@ -1087,12 +1130,11 @@ save_crash_files() { get_asic_count() { trap 'handle_error $? $LINENO' ERR local redirect_eval="2>&1" - if ! $SAVE_STDERR + if ! $SAVE_STDERR then redirect_eval="" fi - local cmd="show platform summary --json | python -c 'import sys, json; \ - print(json.load(sys.stdin)[\"asic_count\"])'" + local cmd="python -c 'from sonic_py_common.multi_asic import get_num_asics; print(get_num_asics())'" echo `eval ${cmd} ${redirect_eval}` } @@ -1199,6 +1241,11 @@ main() { end_t=$(date +%s%3N) echo "[ Capture Proc State ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO + # Save logs and cores early + save_log_files + save_crash_files + save_warmboot_files + # Save all the processes within each docker save_cmd "show services" services.summary @@ -1265,14 +1312,14 @@ main() { save_bfd_info save_redis_info - if $DEBUG_DUMP + if $DEBUG_DUMP then save_dump_state_all_ns fi save_cmd "docker ps -a" "docker.ps" save_cmd "docker top pmon" "docker.pmon" - + if [[ -d ${PLUGINS_DIR} ]]; then local -r dump_plugins="$(find ${PLUGINS_DIR} -type f -executable)" for plugin in $dump_plugins; do @@ -1333,25 +1380,16 @@ main() { end_t=$(date +%s%3N) echo "[ TAR /etc Files ] : $(($end_t-$start_t)) msec" >> $TECHSUPPORT_TIME_INFO - save_log_files - save_warmboot_files - save_crash_files + finalize +} - # run 'hw-management-generate-dump.sh' script and save the result file - HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh - if [ -f "$HW_DUMP_FILE" ]; then - /usr/bin/hw-management-generate-dump.sh $ALLOW_PROCESS_STOP - save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false - rm -f /tmp/hw-mgmt-dump* - else - echo "HW Mgmt dump script $HW_DUMP_FILE does not exist" - fi +############################################################################### +# Finalize dump generation +############################################################################### +finalize() { # Save techsupport timing profile info save_file $TECHSUPPORT_TIME_INFO log false - # clean up working tar dir before compressing - $RM $V -rf $TARDIR - if $DO_COMPRESS; then RC=0 $GZIP $V $TARFILE || RC=$? @@ -1364,13 +1402,14 @@ main() { # Invoke the TechSupport Cleanup Hook setsid python3 /usr/local/bin/techsupport_cleanup.py ${TARFILE} &> /tmp/techsupport_cleanup.log & - + if ! $SAVE_STDERR then exit $RETURN_CODE fi } + ############################################################################### # Remove secret from pipeline inout and output result to pipeline. # Globals: @@ -1416,7 +1455,7 @@ remove_secret_from_etc_files() { # Remove snmp community string from snmp.yml sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml - + # Remove secret from /etc/sonic/config_db.json cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json @@ -1475,9 +1514,9 @@ OPTIONS "24 March", "yesterday", etc. -t TIMEOUT_MINS Command level timeout in minutes - -r + -r Redirect any intermediate errors to STDERR - -d + -d Collect the output of debug dump cli EOF } @@ -1527,7 +1566,7 @@ while getopts ":xnvhzas:t:r:d" opt; do r) SAVE_STDERR=false ;; - d) + d) DEBUG_DUMP=true ;; /?) @@ -1553,7 +1592,8 @@ if $MKDIR "${LOCKDIR}" &>/dev/null; then echo "$$" > "${PIDFILE}" # This handler will exit the script upon receiving these interrupts # Trap configured on EXIT will be triggered by the exit from handle_signal function - trap 'handle_signal' SIGINT SIGHUP SIGQUIT SIGTERM + trap 'handle_sigterm' SIGHUP SIGQUIT SIGTERM + trap 'handle_sigint' SIGINT echo "Lock succesfully accquired and installed signal handlers" # Proceed with the actual code if [[ ! -z "${V}" ]]; then diff --git a/show/main.py b/show/main.py index 9d50cbf15e..3f3e367463 100755 --- a/show/main.py +++ b/show/main.py @@ -68,25 +68,22 @@ GEARBOX_TABLE_PHY_PATTERN = r"_GEARBOX_TABLE:phy:*" +COMMAND_TIMEOUT = 300 + # To be enhanced. Routing-stack information should be collected from a global # location (configdb?), so that we prevent the continous execution of this # bash oneliner. To be revisited once routing-stack info is tracked somewhere. def get_routing_stack(): + result = None command = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1 | head -n 1" try: - proc = subprocess.Popen(command, - stdout=subprocess.PIPE, - shell=True, - text=True) - stdout = proc.communicate()[0] - proc.wait() + stdout = subprocess.check_output(command, shell=True, timeout=COMMAND_TIMEOUT) result = stdout.rstrip('\n') + except Exception as err: + click.echo('Failed to get routing stack: {}'.format(err), err=True) - except OSError as e: - raise OSError("Cannot detect routing-stack") - - return (result) + return result # Global Routing-Stack variable @@ -1149,7 +1146,7 @@ def users(verbose): @click.option('--redirect-stderr', '-r', is_flag=True, help="Redirect an intermediate errors to STDERR") def techsupport(since, global_timeout, cmd_timeout, verbose, allow_process_stop, silent, debug_dump, redirect_stderr): """Gather information for troubleshooting""" - cmd = "sudo timeout -s SIGTERM --foreground {}m".format(global_timeout) + cmd = "sudo timeout --kill-after={}s -s SIGTERM --foreground {}m".format(COMMAND_TIMEOUT, global_timeout) if allow_process_stop: cmd += " -a" @@ -1164,7 +1161,7 @@ def techsupport(since, global_timeout, cmd_timeout, verbose, allow_process_stop, cmd += " -s '{}'".format(since) if debug_dump: - cmd += " -d " + cmd += " -d" cmd += " -t {}".format(cmd_timeout) if redirect_stderr: diff --git a/tests/techsupport_test.py b/tests/techsupport_test.py new file mode 100644 index 0000000000..64bc133627 --- /dev/null +++ b/tests/techsupport_test.py @@ -0,0 +1,24 @@ +import pytest +import show.main +from unittest.mock import patch, Mock +from click.testing import CliRunner + +EXPECTED_BASE_COMMAND = 'sudo timeout --kill-after=300s -s SIGTERM --foreground ' + +@patch("show.main.run_command") +@pytest.mark.parametrize( + "cli_arguments,expected", + [ + ([], '30m generate_dump -v -t 5'), + (['--since', '2 days ago'], "30m generate_dump -v -s '2 days ago' -t 5"), + (['-g', '50'], '50m generate_dump -v -t 5'), + (['--allow-process-stop'], '30m -a generate_dump -v -t 5'), + (['--silent'], '30m generate_dump -t 5'), + (['--debug-dump', '--redirect-stderr'], '30m generate_dump -v -d -t 5 -r'), + ] +) +def test_techsupport(run_command, cli_arguments, expected): + runner = CliRunner() + result = runner.invoke(show.main.cli.commands['techsupport'], cli_arguments) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + From 3732ac5845780a7a19d6ff55c25f91853bf3a816 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Mon, 18 Apr 2022 21:31:44 +0800 Subject: [PATCH 04/20] Add CLI for route flow counter feature (#2031) HLD: https://github.com/Azure/SONiC/pull/908 Command reference : https://github.com/Azure/sonic-utilities/pull/2069 - What I did Add CLIs for route flow counter feature - How I did it Add show command show flowcnt-route config and command group show flowcnt-route stats Add config command group config flowcnt-route pattern Add clear command group sonic-clear flowcnt-route - How to verify it 1. Full unit test cover 2. Manual test 3. sonic-mgmt test cases --- clear/main.py | 49 ++ config/flow_counters.py | 158 +++++ config/main.py | 12 +- counterpoll/main.py | 39 ++ flow_counter_util/__init__.py | 0 flow_counter_util/route.py | 79 +++ scripts/flow_counters_stat | 318 +++++++++- setup.py | 1 + show/flow_counters.py | 71 +++ show/main.py | 1 + tests/counterpoll_input/config_db.json | 5 +- tests/counterpoll_test.py | 42 ++ tests/flow_counter_stats_test.py | 737 ++++++++++++++++++++++- tests/mock_tables/asic0/counters_db.json | 16 + tests/mock_tables/asic1/counters_db.json | 10 + tests/mock_tables/config_db.json | 4 + tests/mock_tables/counters_db.json | 28 + tests/mock_tables/state_db.json | 3 + utilities_common/cli.py | 39 +- 19 files changed, 1594 insertions(+), 18 deletions(-) create mode 100644 config/flow_counters.py create mode 100644 flow_counter_util/__init__.py create mode 100644 flow_counter_util/route.py diff --git a/clear/main.py b/clear/main.py index 1ad42ad786..6980bb8be1 100755 --- a/clear/main.py +++ b/clear/main.py @@ -4,7 +4,9 @@ import sys import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util +from flow_counter_util.route import exit_if_route_flow_counter_not_support from utilities_common import util_base from show.plugins.pbh import read_pbh_counters from config.plugins.pbh import serialize_pbh_counters @@ -484,6 +486,53 @@ def flowcnt_trap(): run_command(command) +# ("sonic-clear flowcnt-route") +@cli.group(invoke_without_command=True) +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.pass_context +def flowcnt_route(ctx, namespace): + """Clear all route flow counters""" + exit_if_route_flow_counter_not_support() + if ctx.invoked_subcommand is None: + command = "flow_counters_stat -c -t route" + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + +# ("sonic-clear flowcnt-route pattern") +@flowcnt_route.command() +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix-pattern', required=True) +def pattern(prefix_pattern, vrf, namespace): + """Clear route flow counters by pattern""" + command = "flow_counters_stat -c -t route --prefix_pattern {}".format(prefix_pattern) + if vrf: + command += ' --vrf {}'.format(vrf) + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + +# ("sonic-clear flowcnt-route route") +@flowcnt_route.command() +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix', required=True) +def route(prefix, vrf, namespace): + """Clear route flow counters by prefix""" + command = "flow_counters_stat -c -t route --prefix {}".format(prefix) + if vrf: + command += ' --vrf {}'.format(vrf) + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/config/flow_counters.py b/config/flow_counters.py new file mode 100644 index 0000000000..51ef547434 --- /dev/null +++ b/config/flow_counters.py @@ -0,0 +1,158 @@ +import click +import ipaddress + +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, DEFAULT_VRF, PATTERN_SEPARATOR +from flow_counter_util.route import build_route_pattern, extract_route_pattern, exit_if_route_flow_counter_not_support +from utilities_common.cli import AbbreviationGroup, pass_db +from utilities_common import cli # To make mock work in unit test + +# +# 'flowcnt-route' group ('config flowcnt-route ...') +# + + +@click.group(cls=AbbreviationGroup, invoke_without_command=False) +def flowcnt_route(): + """Route flow counter related configuration tasks""" + pass + + +@flowcnt_route.group() +def pattern(): + """Set pattern for route flow counter""" + pass + + +@pattern.command(name='add') +@click.option('-y', '--yes', is_flag=True) +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--max', 'max_allowed_match', type=click.IntRange(1, 50), default=30, show_default=True, help='Max allowed match count') +@click.argument('prefix-pattern', required=True) +@pass_db +def pattern_add(db, yes, vrf, max_allowed_match, prefix_pattern): + """Add pattern for route flow counter""" + _update_route_flow_counter_config(db, vrf, max_allowed_match, prefix_pattern, True, yes) + + +@pattern.command(name='remove') +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.argument('prefix-pattern', required=True) +@pass_db +def pattern_remove(db, vrf, prefix_pattern): + """Remove pattern for route flow counter""" + _update_route_flow_counter_config(db, vrf, None, prefix_pattern, False) + + +def _update_route_flow_counter_config(db, vrf, max_allowed_match, prefix_pattern, add, yes=False): + """ + Update route flow counter config + :param db: db object + :param vrf: vrf string, empty vrf will be treated as default vrf + :param max_allowed_match: max allowed match count, $FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD will be used if not specified + :param prefix_pattern: route prefix pattern, automatically add prefix length if not specified + :param add: True to add/set the configuration, otherwise remove + :param yes: Don't ask question if True + :return: + """ + exit_if_route_flow_counter_not_support() + + if add: + try: + net = ipaddress.ip_network(prefix_pattern, strict=False) + except ValueError as e: + click.echo('Invalid prefix pattern: {}'.format(prefix_pattern)) + exit(1) + + if '/' not in prefix_pattern: + prefix_pattern += '/' + str(net.prefixlen) + + key = build_route_pattern(vrf, prefix_pattern) + for _, cfgdb in db.cfgdb_clients.items(): + if _try_find_existing_pattern_by_ip_type(cfgdb, net, key, yes): + entry_data = cfgdb.get_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, key) + old_max_allowed_match = entry_data.get(FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD) + if old_max_allowed_match is not None and int(old_max_allowed_match) == max_allowed_match: + click.echo('The route pattern already exists, nothing to be changed') + exit(1) + cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + key, + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: str(max_allowed_match)}) + else: + found = False + key = build_route_pattern(vrf, prefix_pattern) + for _, cfgdb in db.cfgdb_clients.items(): + pattern_table = cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + + for existing_key in pattern_table: + exist_vrf, existing_prefix = extract_route_pattern(existing_key) + if (exist_vrf == vrf or (vrf is None and exist_vrf == DEFAULT_VRF)) and existing_prefix == prefix_pattern: + found = True + cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, key, None) + if not found: + click.echo("Failed to remove route pattern: {} does not exist".format(key)) + exit(1) + + +def _try_find_existing_pattern_by_ip_type(cfgdb, input_net, input_key, yes): + """Try to find the same IP type pattern from CONFIG DB. + 1. If found a pattern with the same IP type, but the patter does not equal, ask user if need to replace the old with new one + a. If user types "yes", remove the old one, return False + b. If user types "no", exit + 2. If found a pattern with the same IP type and the pattern equal, return True + 3. If not found a pattern with the same IP type, return False + + Args: + cfgdb (object): CONFIG DB object + input_net (object): Input ip_network object + input_key (str): Input key + yes (bool): Whether ask user question + + Returns: + bool: True if found the same pattern in CONFIG DB + """ + input_type = type(input_net) # IPv4 or IPv6 + found_invalid = [] + found = None + pattern_table = cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + for existing_key in pattern_table: + if isinstance(existing_key, tuple): + existing_prefix = existing_key[1] + existing_key = PATTERN_SEPARATOR.join(existing_key) + else: + _, existing_prefix = extract_route_pattern(existing_key) + + # In case user configures an invalid pattern via CONFIG DB. + if not existing_prefix: # Invalid pattern such as: "vrf1|" + click.echo('Detect invalid route pattern in existing configuration {}'.format(existing_key)) + found_invalid.append(existing_key) + continue + + try: + existing_net = ipaddress.ip_network(existing_prefix, strict=False) + except ValueError as e: # Invalid pattern such as: "vrf1|invalid" + click.echo('Detect invalid route pattern in existing configuration {}'.format(existing_key)) + found_invalid.append(existing_key) + continue + + if type(existing_net) == input_type: + found = existing_key + break + + if found == input_key: + return True + + if not found and found_invalid: + # If not found but there is an invalid one, ask user to replace the invalid one + found = found_invalid[0] + + if found: + if not yes: + answer = cli.query_yes_no('Only support 1 IPv4 route pattern and 1 IPv6 route pattern, remove existing pattern {}?'.format(found)) + else: + answer = True + if answer: + click.echo('Replacing existing route pattern {} with {}'.format(existing_key, input_key)) + cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, existing_key, None) + else: + exit(0) + return False diff --git a/config/main.py b/config/main.py index c3415fbd11..197fb33662 100644 --- a/config/main.py +++ b/config/main.py @@ -35,6 +35,7 @@ from . import chassis_modules from . import console from . import feature +from . import flow_counters from . import kdump from . import kube from . import muxcable @@ -789,7 +790,7 @@ def _per_namespace_swss_ready(service_name): return False def _swss_ready(): - list_of_swss = [] + list_of_swss = [] num_asics = multi_asic.get_num_asics() if num_asics == 1: list_of_swss.append("swss.service") @@ -802,7 +803,7 @@ def _swss_ready(): if _per_namespace_swss_ready(service_name) == False: return False - return True + return True def _is_system_starting(): out = clicommon.run_command("sudo systemctl is-system-running", return_cmd=True) @@ -1076,6 +1077,7 @@ def config(ctx): config.add_command(chassis_modules.chassis) config.add_command(console.console) config.add_command(feature.feature) +config.add_command(flow_counters.flowcnt_route) config.add_command(kdump.kdump) config.add_command(kube.kubernetes) config.add_command(muxcable.muxcable) @@ -1482,10 +1484,10 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, disable_arp_cach config_gen_opts = "" - + if os.path.isfile(INIT_CFG_FILE): config_gen_opts += " -j {} ".format(INIT_CFG_FILE) - + if file_format == 'config_db': config_gen_opts += ' -j {} '.format(file) else: @@ -6239,7 +6241,7 @@ def del_subinterface(ctx, subinterface_name): sub_intfs = [k for k,v in subintf_config_db.items() if type(k) != tuple] if subinterface_name not in sub_intfs: ctx.fail("{} does not exists".format(subinterface_name)) - + ips = {} ips = [ k[1] for k in config_db.get_table('VLAN_SUB_INTERFACE') if type(k) == tuple and k[0] == subinterface_name ] for ip in ips: diff --git a/counterpoll/main.py b/counterpoll/main.py index e23f4b9c59..f3befe1311 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -1,5 +1,6 @@ import click import json +from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate @@ -347,6 +348,40 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_TRAP", fc_info) +# Route flow counter commands +@cli.group() +@click.pass_context +def flowcnt_route(ctx): + """ Route flow counter commands """ + exit_if_route_flow_counter_not_support() + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + +@flowcnt_route.command() +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def interval(ctx, poll_interval): + """ Set route flow counter query interval """ + fc_info = {} + fc_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +@flowcnt_route.command() +@click.pass_context +def enable(ctx): + """ Enable route flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +@flowcnt_route.command() +@click.pass_context +def disable(ctx): + """ Disable route flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + @cli.command() def show(): """ Show the counter configuration """ @@ -363,6 +398,7 @@ def show(): acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL) tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') + route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') header = ("Type", "Interval (in ms)", "Status") data = [] @@ -388,6 +424,9 @@ def show(): data.append(["TUNNEL_STAT", rif_info.get("POLL_INTERVAL", DEFLT_10_SEC), rif_info.get("FLEX_COUNTER_STATUS", DISABLE)]) if trap_info: data.append(["FLOW_CNT_TRAP_STAT", trap_info.get("POLL_INTERVAL", DEFLT_10_SEC), trap_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if route_info: + data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), + route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) diff --git a/flow_counter_util/__init__.py b/flow_counter_util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/flow_counter_util/route.py b/flow_counter_util/route.py new file mode 100644 index 0000000000..a905b646e7 --- /dev/null +++ b/flow_counter_util/route.py @@ -0,0 +1,79 @@ +import os +import sys +from swsscommon.swsscommon import SonicV2Connector + +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2": + modules_path = os.path.join(os.path.dirname(__file__), "..") + test_path = os.path.join(modules_path, "tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, test_path) + import mock_tables.dbconnector # lgtm[py/unused-import] +except KeyError: + pass + + +COUNTERS_ROUTE_TO_PATTERN_MAP = 'COUNTERS_ROUTE_TO_PATTERN_MAP' +FLOW_COUNTER_CAPABILITY_TABLE = 'FLOW_COUNTER_CAPABILITY_TABLE' +FLOW_COUNTER_CAPABILITY_KEY = 'route' +FLOW_COUNTER_CAPABILITY_SUPPORT_FIELD = 'support' +FLOW_COUNTER_ROUTE_PATTERN_TABLE = 'FLOW_COUNTER_ROUTE_PATTERN' +FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD = 'max_match_count' +FLOW_COUNTER_ROUTE_CONFIG_HEADER = ['Route pattern', 'VRF', 'Max'] +DEFAULT_MAX_MATCH = 30 +DEFAULT_VRF = 'default' +PATTERN_SEPARATOR = '|' + + +def extract_route_pattern(route_pattern): + """Extract vrf and prefix from route pattern, route pattrn shall be formated like: "Vrf_1:1.1.1.1/24" + or "1.1.1.1/24" + + Args: + route_pattern (str): route pattern string + sep (str, optional): Defaults to PATTERN_SEPARATOR. + + Returns: + [tuple]: vrf and prefix + """ + if isinstance(route_pattern, tuple): + return route_pattern + items = route_pattern.split(PATTERN_SEPARATOR) + if len(items) == 1: + return DEFAULT_VRF, items[0] + elif len(items) == 2: + return items[0], items[1] + else: + return None, None + + +def build_route_pattern(vrf, prefix): + if vrf and vrf != 'default': + return '{}{}{}'.format(vrf, PATTERN_SEPARATOR, prefix) + else: + return prefix + + +def get_route_flow_counter_capability(): + state_db = SonicV2Connector(host="127.0.0.1") + state_db.connect(state_db.STATE_DB) + + return state_db.get_all(state_db.STATE_DB, '{}|{}'.format(FLOW_COUNTER_CAPABILITY_TABLE, FLOW_COUNTER_CAPABILITY_KEY)) + + +def exit_if_route_flow_counter_not_support(): + capabilities = get_route_flow_counter_capability() + if not capabilities: + print('Waiting for swss to initialize route flow counter capability, please try again later') + exit(1) + + support = capabilities.get(FLOW_COUNTER_CAPABILITY_SUPPORT_FIELD) + if support is None: + print('Waiting for swss to initialize route flow counter capability, please try again later') + exit(1) + + if support != 'true': + print('Route flow counter is not supported on this platform') + exit(1) + + return diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index 8901d92f66..61c754e333 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -24,13 +24,19 @@ except KeyError: pass import utilities_common.multi_asic as multi_asic_util +from flow_counter_util.route import build_route_pattern, extract_route_pattern, exit_if_route_flow_counter_not_support, DEFAULT_VRF, COUNTERS_ROUTE_TO_PATTERN_MAP +from utilities_common import constants from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate # Flow counter meta data, new type of flow counters can extend this dictinary to reuse existing logic flow_counter_meta = { 'trap': { 'headers': ['Trap Name', 'Packets', 'Bytes', 'PPS'], - 'name_map': 'COUNTERS_TRAP_NAME_MAP', + 'name_map': 'COUNTERS_TRAP_NAME_MAP' + }, + 'route': { + 'headers': ['Route pattern', 'VRF', 'Matched routes', 'Packets', 'Bytes'], + 'name_map': 'COUNTERS_ROUTE_NAME_MAP' } } flow_counters_fields = ['SAI_COUNTER_STAT_PACKETS', 'SAI_COUNTER_STAT_BYTES'] @@ -41,7 +47,6 @@ diff_column_positions = set([0, 1]) FLOW_COUNTER_TABLE_PREFIX = "COUNTERS:" RATES_TABLE_PREFIX = 'RATES:' PPS_FIELD = 'RX_PPS' -STATUS_NA = 'N/A' class FlowCounterStats(object): @@ -150,7 +155,7 @@ class FlowCounterStats(object): full_table_id = RATES_TABLE_PREFIX + counter_oid counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, PPS_FIELD) - values.append(STATUS_NA if counter_data is None else counter_data) + values.append('0' if counter_data is None else counter_data) values.append(counter_oid) data[ns][name] = values return data @@ -168,7 +173,7 @@ class FlowCounterStats(object): full_table_id = FLOW_COUNTER_TABLE_PREFIX + counter_oid for field in flow_counters_fields: counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, field) - values.append(STATUS_NA if counter_data is None else counter_data) + values.append('0' if counter_data is None else counter_data) return values def _save(self, data): @@ -255,6 +260,295 @@ class FlowCounterStats(object): return need_update_cache +class RouteFlowCounterStats(FlowCounterStats): + SHOW_BY_PREFIX_HEADERS = ['Route', 'VRF', 'Route pattern', 'Packets', 'Bytes'] + + def __init__(self, args): + super(RouteFlowCounterStats,self).__init__(args) + + def _print_data(self, headers, table): + """Print statistic data based on output format + + Args: + headers (list): Table headers + table (list): Table data + """ + if self.args.json: + # The first column of the table might have duplicate value, have to + # add an extra index field to make table_as_json work + print(table_as_json(([i] + line for i, line in enumerate(table)), ['Index'] + headers)) + else: + print(tabulate(table, headers, tablefmt='simple', stralign='right')) + + def _prepare_show_data(self): + """Prepare table headers and table data for output. If "--prefix" is specified, fetch data that matches + the given prefix; if "--prefix_pattern" is specified, fetch data that matches the given pattern; + otherwise, fetch all data. + Returns: + headers (list): Table headers + table (list): Table data + """ + if self.args.prefix: + return self._prepare_show_data_by_prefix() + else: + return self._prepare_show_data_by_pattern() + + def _prepare_show_data_by_prefix(self): + """Prepare table header and table data by given prefix + Returns: + headers (list): Table headers + table (list): Table data + """ + table = [] + + headers = self._adjust_headers(self.SHOW_BY_PREFIX_HEADERS) + for ns, pattern_entry in self.data.items(): + if self.args.namespace is not None and self.args.namespace != ns: + continue + for route_pattern, prefix_entry in pattern_entry.items(): + if self.args.prefix in prefix_entry: + vrf, prefix_pattern = extract_route_pattern(route_pattern) + if vrf != self.args.vrf: + continue + + values = prefix_entry[self.args.prefix] + if self.multi_asic.is_multi_asic: + row = [ns] + else: + row = [] + row.extend([self.args.prefix, + self.args.vrf, + prefix_pattern, + format_number_with_comma(values[0]), + format_number_with_comma(values[1])]) + table.append(row) + + return headers, table + + def _prepare_show_data_by_pattern(self): + """Prepare table header and table data by given pattern. If pattern is not specified, show all data. + Returns: + headers (list): Table headers + table (list): Table data + """ + table = [] + + headers = self._adjust_headers(self.headers) + for ns, pattern_entries in natsorted(self.data.items()): + if self.args.namespace is not None and self.args.namespace != ns: + continue + if self.args.prefix_pattern: + route_pattern = build_route_pattern(self.args.vrf, self.args.prefix_pattern) + if route_pattern in pattern_entries: + self._fill_table_for_prefix_pattern(table, ns, pattern_entries[route_pattern], self.args.prefix_pattern, self.args.vrf) + break + else: + for route_pattern, prefix_entries in natsorted(pattern_entries.items()): + vrf, prefix_pattern = extract_route_pattern(route_pattern) + self._fill_table_for_prefix_pattern(table, ns, prefix_entries, prefix_pattern, vrf) + + return headers, table + + def _fill_table_for_prefix_pattern(self, table, ns, prefix_entries, prefix_pattern, vrf): + """Fill table data for prefix pattern + Args: + table (list): Table data to fill + ns (str): Namespace + prefix_entries (dict): Prefix to value map + prefix_pattern (str): Prefix pattern + vrf (str): VRF + """ + is_first_row = True + for prefix, values in natsorted(prefix_entries.items()): + if self.multi_asic.is_multi_asic: + row = [ns if is_first_row or self.args.json else ''] + else: + row = [] + row.extend([prefix_pattern if is_first_row or self.args.json else '', + vrf if is_first_row or self.args.json else '', + prefix, + format_number_with_comma(values[0]), + format_number_with_comma(values[1])]) + table.append(row) + if is_first_row: + is_first_row = False + + def clear(self): + """Clear statistic based on arguments. + 1. If "--prefix" is specified, clear the data matching the prefix + 2. If "--prefix_pattern" is specified, clear the data matching the pattern + 3. Otherwise, clear all data + If "--namepsace" is not specified, clear the data belongs to the default namespace. + If "--vrf" is not specified, use default VRF. + """ + if self.args.prefix: + self.clear_by_prefix() + elif self.args.prefix_pattern: + self.clear_by_pattern() + else: + super(RouteFlowCounterStats, self).clear() + + @multi_asic_util.run_on_multi_asic + def clear_by_prefix(self): + """Clear the data matching the prefix + """ + ns = constants.DEFAULT_NAMESPACE if self.args.namespace is None else self.args.namespace + if ns != self.multi_asic.current_namespace: + return + + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + prefix_vrf = build_route_pattern(self.args.vrf, self.args.prefix) + if not name_map or prefix_vrf not in name_map: + print('Cannot find {} in COUNTERS_DB {} table'.format(self.args.prefix, self.name_map)) + return + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + if not route_to_pattern_map or prefix_vrf not in route_to_pattern_map: + print('Cannot find {} in {} table'.format(self.args.prefix, COUNTERS_ROUTE_TO_PATTERN_MAP)) + return + + self.data = self._load() + if not self.data: + self.data = {} + + if ns not in self.data: + self.data[ns] = {} + + route_pattern = route_to_pattern_map[prefix_vrf] + if route_pattern not in self.data[ns]: + self.data[ns][route_pattern] = {} + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + self.data[ns][route_pattern][self.args.prefix] = values + self._save(self.data) + print('Flow Counters of the specified route were successfully cleared') + + @multi_asic_util.run_on_multi_asic + def clear_by_pattern(self): + """Clear the data matching the specified pattern + """ + ns = constants.DEFAULT_NAMESPACE if self.args.namespace is None else self.args.namespace + if ns != self.multi_asic.current_namespace: + return + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + expect_route_pattern = build_route_pattern(self.args.vrf, self.args.prefix_pattern) + matching_prefix_vrf_list = [prefix_vrf for prefix_vrf, route_pattern in route_to_pattern_map.items() if route_pattern == expect_route_pattern] + if not matching_prefix_vrf_list: + print('Cannot find {} in COUNTERS_DB {} table'.format(self.args.prefix_pattern, COUNTERS_ROUTE_TO_PATTERN_MAP)) + return + + data_to_update = {} + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + for prefix_vrf in matching_prefix_vrf_list: + if prefix_vrf not in name_map: + print('Warning: cannot find {} in {}'.format(prefix_vrf, self.name_map)) + continue + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + _, prefix = extract_route_pattern(prefix_vrf) + data_to_update[prefix] = values + + self.data = self._load() + if not self.data: + self.data = {} + + if not self.data or ns not in self.data: + self.data[ns] = {} + + if expect_route_pattern not in self.data[ns]: + self.data[ns][expect_route_pattern] = {} + + self.data[ns][expect_route_pattern].update(data_to_update) + self._save(self.data) + + def _get_stats_from_db(self): + """Get flow counter statistic from DB. + Returns: + dict: A dictionary. E.g: {: {(): {: [, , ]}}} + """ + ns = self.multi_asic.current_namespace + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + data = {ns: {}} + if not name_map: + return data + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + if not route_to_pattern_map: + return data + + for prefix_vrf, route_pattern in route_to_pattern_map.items(): + if route_pattern not in data[ns]: + data[ns][route_pattern] = {} + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + _, prefix = extract_route_pattern(prefix_vrf) + data[ns][route_pattern][prefix] = values + + return data + + def _diff(self, old_data, new_data): + """Do a diff between new data and old data. + Args: + old_data (dict): E.g: {: {(): {: [, , ]}}} + new_data (dict): E.g: {: {(): {: [, , ]}}} + """ + need_update_cache = False + if not old_data: + return need_update_cache + + for ns, stats in new_data.items(): + if ns not in old_data: + continue + + old_stats = old_data[ns] + for route_pattern, prefix_entries in stats.items(): + if route_pattern not in old_stats: + continue + + old_prefix_entries = old_stats[route_pattern] + for name, values in prefix_entries.items(): + if name not in old_prefix_entries: + continue + + old_values = old_prefix_entries[name] + if values[-1] != old_values[-1]: + # Counter OID not equal means the generic counter was removed and added again. Removing a generic counter would cause + # the stats value restart from 0. To avoid get minus value here, it should not do diff in case + # counter OID is changed. + old_values[-1] = values[-1] + for i in diff_column_positions: + old_values[i] = '0' + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + has_negative_diff = False + for i in diff_column_positions: + # If any diff has negative value, set all counter values to 0 and update cache + if int(values[i]) < int(old_values[i]): + has_negative_diff = True + break + + if has_negative_diff: + for i in diff_column_positions: + old_values[i] = '0' + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + for i in diff_column_positions: + values[i] = ns_diff(values[i], old_values[i]) + + return need_update_cache + + def main(): parser = argparse.ArgumentParser(description='Display the flow counters', formatter_class=argparse.RawTextHelpFormatter, @@ -268,11 +562,23 @@ Examples: parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') parser.add_argument('-j', '--json', action='store_true', help='Display in JSON format') parser.add_argument('-n','--namespace', default=None, help='Display flow counters for specific namespace') - parser.add_argument('-t', '--type', required=True, choices=['trap'],help='Flow counters type') + parser.add_argument('-t', '--type', required=True, choices=['trap', 'route'],help='Flow counters type') + group = parser.add_mutually_exclusive_group() + group.add_argument('--prefix_pattern', help='Prefix pattern') # for route flow counter only, ignored by other type + group.add_argument('--prefix', help='Prefix') # for route flow counter only, ignored by other type + parser.add_argument('--vrf', help='VRF name', default=DEFAULT_VRF) # for route flow counter only, ignored by other type args = parser.parse_args() - stats = FlowCounterStats(args) + if args.type == 'trap': + stats = FlowCounterStats(args) + elif args.type == 'route': + exit_if_route_flow_counter_not_support() + stats = RouteFlowCounterStats(args) + else: + print('Invalid flow counter type: {}'.format(args.type)) + exit(1) + if args.clear: stats.clear() else: diff --git a/setup.py b/setup.py index a8bf39170c..7ee1eb8574 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ 'ssdutil', 'pfc', 'psuutil', + 'flow_counter_util', 'fdbutil', 'fwutil', 'pcieutil', diff --git a/show/flow_counters.py b/show/flow_counters.py index 9870c83080..0767a2a9a5 100644 --- a/show/flow_counters.py +++ b/show/flow_counters.py @@ -2,6 +2,11 @@ import utilities_common.cli as clicommon import utilities_common.multi_asic as multi_asic_util +from tabulate import tabulate + +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, FLOW_COUNTER_ROUTE_CONFIG_HEADER, DEFAULT_MAX_MATCH +from flow_counter_util.route import extract_route_pattern, exit_if_route_flow_counter_not_support + # # 'flowcnt-trap' group ### # @@ -20,3 +25,69 @@ def stats(verbose, namespace): if namespace is not None: cmd += " -n {}".format(namespace) clicommon.run_command(cmd, display_cmd=verbose) + +# +# 'flowcnt-route' group ### +# + +@click.group(cls=clicommon.AliasedGroup) +def flowcnt_route(): + """Show route flow counter related information""" + exit_if_route_flow_counter_not_support() + + +@flowcnt_route.command() +@clicommon.pass_db +def config(db): + """Show route flow counter configuration""" + route_pattern_table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + data = [] + for key, entry in route_pattern_table.items(): + vrf, prefix = extract_route_pattern(key) + max = entry.get(FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, str(DEFAULT_MAX_MATCH)) + data.append([prefix, vrf, max]) + + click.echo(tabulate(data, headers=FLOW_COUNTER_ROUTE_CONFIG_HEADER, tablefmt="simple", missingval="")) + + +@flowcnt_route.group(invoke_without_command=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.pass_context +def stats(ctx, verbose, namespace): + """Show statistics of all route flow counters""" + if ctx.invoked_subcommand is None: + command = "flow_counters_stat -t route" + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) + + +@stats.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix-pattern', required=True) +def pattern(prefix_pattern, vrf, verbose, namespace): + """Show statistics of route flow counters by pattern""" + command = "flow_counters_stat -t route --prefix_pattern \"{}\"".format(prefix_pattern) + if vrf: + command += ' --vrf {}'.format(vrf) + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) + + +@stats.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix', required=True) +def route(prefix, vrf, verbose, namespace): + """Show statistics of route flow counters by prefix""" + command = "flow_counters_stat -t route --prefix {}".format(prefix) + if vrf: + command += ' --vrf {}'.format(vrf) + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) diff --git a/show/main.py b/show/main.py index 3f3e367463..01fd592c7a 100755 --- a/show/main.py +++ b/show/main.py @@ -177,6 +177,7 @@ def cli(ctx): cli.add_command(dropcounters.dropcounters) cli.add_command(feature.feature) cli.add_command(fgnhg.fgnhg) +cli.add_command(flow_counters.flowcnt_route) cli.add_command(flow_counters.flowcnt_trap) cli.add_command(kdump.kdump) cli.add_command(interfaces.interfaces) diff --git a/tests/counterpoll_input/config_db.json b/tests/counterpoll_input/config_db.json index 61ceb071c2..40ff750db6 100644 --- a/tests/counterpoll_input/config_db.json +++ b/tests/counterpoll_input/config_db.json @@ -787,6 +787,9 @@ }, "FLOW_CNT_TRAP": { "FLEX_COUNTER_STATUS": "enable" + }, + "FLOW_CNT_ROUTE": { + "FLEX_COUNTER_STATUS": "enable" } }, "PORT": { @@ -2666,4 +2669,4 @@ "size": "56368" } } -} +} \ No newline at end of file diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 7f5251e998..7a8171825a 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -27,6 +27,7 @@ PG_DROP_STAT 10000 enable ACL 10000 enable FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable """ class TestCounterpoll(object): @@ -155,6 +156,18 @@ def test_update_trap_counter_status(self, status): table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') assert status == table["FLOW_CNT_TRAP"]["FLEX_COUNTER_STATUS"] + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_route_flow_counter_status(self, status): + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands[status], [], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["FLOW_CNT_ROUTE"]["FLEX_COUNTER_STATUS"] + def test_update_trap_counter_interval(self): runner = CliRunner() db = Db() @@ -179,6 +192,35 @@ def test_update_trap_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + def test_update_route_counter_interval(self): + runner = CliRunner() + db = Db() + test_interval = "20000" + + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["FLOW_CNT_ROUTE"]["POLL_INTERVAL"] + + test_interval = "500" + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + expected = "Invalid value for \"POLL_INTERVAL\": 500 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + + test_interval = "40000" + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + + expected = "Invalid value for \"POLL_INTERVAL\": 40000 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/flow_counter_stats_test.py b/tests/flow_counter_stats_test.py index 807ed61223..dc5bb22dee 100644 --- a/tests/flow_counter_stats_test.py +++ b/tests/flow_counter_stats_test.py @@ -7,8 +7,11 @@ import show.main as show import clear.main as clear +import config.main as config from .utils import get_result_and_return_code +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD +from utilities_common.db import Db from utilities_common.general import load_module_from_source test_path = os.path.dirname(os.path.abspath(__file__)) @@ -72,9 +75,202 @@ asic1 dhcp 0 0 45.25/s """ +expect_show_route_pattern = """\ +Route pattern VRF Max +--------------- ------- ----- +1.1.0.0/24 Vrf1 30 +2000::1/64 default 30 +""" + +expect_show_all_route_stats = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.2/31 1,000 2,000 + 2001::/64 default 2001::1/64 50 1,000 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_by_pattern_v4 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.2/31 1,000 2,000 +""" + +expect_show_route_stats_by_pattern_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 2001::/64 default 2001::1/64 50 1,000 +""" + +expect_show_route_stats_by_pattern_and_vrf_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ----- ---------------- --------- ------- + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_by_pattern_empty = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ----- ---------------- --------- ------- +""" + +expect_show_route_stats_by_route_v4 = """\ + Route VRF Route pattern Packets Bytes +---------- ------- --------------- --------- ------- +1.1.1.1/31 default 1.1.1.0/24 100 2,000 +""" + +expect_show_route_stats_by_route_v6 = """\ + Route VRF Route pattern Packets Bytes +---------- ------- --------------- --------- ------- +2001::1/64 default 2001::/64 50 1,000 +""" + +expect_show_route_stats_by_route_and_vrf_v6 = """\ + Route VRF Route pattern Packets Bytes +---------- ----- --------------- --------- ------- +2001::1/64 Vrf_1 2001::/64 1,000 25,000 +""" + +expect_after_clear_route_stats_all = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 0 0 + 2001::/64 Vrf_1 2001::1/64 0 0 +""" + +expect_after_clear_route_stats_by_pattern_v4 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 50 1,000 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_after_clear_route_stats_by_pattern_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 0 0 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_all_json = """\ +{ + "0": { + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "2": { + "Bytes": "1,000", + "Matched routes": "2001::1/64", + "Packets": "50", + "Route pattern": "2001::/64", + "VRF": "default" + }, + "3": { + "Bytes": "25,000", + "Matched routes": "2001::1/64", + "Packets": "1,000", + "Route pattern": "2001::/64", + "VRF": "Vrf_1" + } +} +""" + +expect_show_route_stats_by_pattern_v4_json = """\ +{ + "0": { + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + } +} +""" + +expect_show_route_stats_by_pattern_and_vrf_v6_json = """\ +{ + "0": { + "Bytes": "25,000", + "Packets": "1,000", + "Route": "2001::1/64", + "Route pattern": "2001::/64", + "VRF": "Vrf_1" + } +} +""" + +expect_show_route_stats_all_multi_asic = """\ + ASIC ID Route pattern VRF Matched routes Packets Bytes +--------- --------------- ------- ---------------- --------- ------- + asic0 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.3/31 200 4,000 + asic1 1.1.1.0/24 default 1.1.1.2/31 1,000 2,000 +""" -def delete_cache(): - cmd = 'flow_counters_stat -t trap -d' +expect_show_route_stats_all_json_multi_asic = """\ +{ + "0": { + "ASIC ID": "asic0", + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "ASIC ID": "asic0", + "Bytes": "4,000", + "Matched routes": "1.1.1.3/31", + "Packets": "200", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "2": { + "ASIC ID": "asic1", + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + } +} +""" + +expect_after_clear_route_stats_all_multi_asic = """\ + ASIC ID Route pattern VRF Matched routes Packets Bytes +--------- --------------- ------- ---------------- --------- ------- + asic0 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.3/31 0 0 + asic1 1.1.1.0/24 default 1.1.1.2/31 0 0 +""" + +def delete_cache(stat_type='trap'): + cmd = 'flow_counters_stat -t {} -d'.format(stat_type) get_result_and_return_code(cmd) @@ -165,12 +361,12 @@ def setup_class(cls): @classmethod def teardown_class(cls): print("TEARDOWN") + delete_cache() os.environ["PATH"] = os.pathsep.join( os.environ["PATH"].split(os.pathsep)[:-1] ) os.environ["UTILITIES_UNIT_TESTING"] = "0" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" - delete_cache() from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) @@ -207,3 +403,538 @@ def test_clear(self): assert result.exit_code == 0 assert result.output == expect_show_output_multi_asic_after_clear + + +class TestConfigRoutePattern: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + @mock.patch('utilities_common.cli.query_yes_no') + def test_add_remove_pattern(self, mock_input): + runner = CliRunner() + db = Db() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + assert '30' == table[prefix][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert '50' == table[prefix][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + mock_input.return_value = False + vrf = 'Vrf1' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + prefix_v6 = '2000::/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix_v6 in table + + mock_input.return_value = True + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + + assert (vrf, prefix) in table + assert '50' == table[(vrf, prefix)][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + ['--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (vrf, prefix) not in table + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + [prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix_v6 not in table + + @mock.patch('utilities_common.cli.query_yes_no', mock.MagicMock(return_value=True)) + def test_replace_invalid_pattern(self): + runner = CliRunner() + db = Db() + db.cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + 'vrf1|', + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: '30'}) + prefix = '1.1.1.0/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + db.cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, prefix, None) + db.cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + 'vrf1|invalid', + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: '30'}) + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + def test_add_invalid_pattern(self): + runner = CliRunner() + prefix = 'invalid' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix] + ) + + print(result.output) + assert result.exit_code == 1 + + def test_remove_non_exist_pattern(self): + runner = CliRunner() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + [prefix] + ) + + assert result.exit_code == 1 + assert 'Failed to remove route pattern: {} does not exist'.format(prefix) in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + ['invalid'] + ) + assert result.exit_code == 1 + assert 'Failed to remove route pattern: {} does not exist'.format('invalid') in result.output + + def test_add_pattern_repeatly(self): + runner = CliRunner() + db = Db() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + print(result.output) + assert result.exit_code == 1 + assert 'already exists' in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'vnet1', '-y', prefix], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'vnet1', '-y', prefix], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + prefix_v6 = '2000::/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', 'vrf1', '-y', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', 'vrf1', '-y', prefix_v6], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + + def test_add_pattern_without_prefix_length(self): + runner = CliRunner() + db = Db() + prefix = '1.1.0.0' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (prefix + '/32') in table + + prefix_v6 = '2000::1' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (prefix_v6 + '/128') in table + + def test_show_config(self): + runner = CliRunner() + db = Db() + prefix = '1.1.0.0/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'Vrf1', prefix], obj=db + ) + + prefix_v6 = '2000::1/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix_v6], obj=db + ) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["config"], + [], obj=db, catch_exceptions=False + ) + + assert result.exit_code == 0 + print(result.output) + assert result.output == expect_show_route_pattern + + +class TestRouteStats: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + delete_cache(stat_type='route') + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + def test_show_all_stats(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_show_all_route_stats == result.output + + def test_show_by_pattern(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['1.1.1.0/24'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_v4 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['2001::/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['--vrf', 'Vrf_1', '2001::/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_and_vrf_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['invalid'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_empty + print(result.output) + + def test_show_by_route(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['1.1.1.1/31'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_v4 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['2001::1/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['--vrf', 'Vrf_1', '2001::1/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_and_vrf_v6 + print(result.output) + + def test_show_json(self): + cmd = 'flow_counters_stat -t route -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_all_json + + cmd = 'flow_counters_stat -t route -j --prefix_pattern 1.1.1.0/24' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_by_pattern_v4_json + + cmd = 'flow_counters_stat -t route -j --prefix 2001::1/64 --vrf Vrf_1' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_by_pattern_and_vrf_v6_json + + def test_clear_all(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"], [] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all == result.output + print(result.output) + + def test_clear_by_pattern(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['1.1.1.0/24'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_by_pattern_v4 == result.output + print(result.output) + + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['2001::/64'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_by_pattern_v6 == result.output + print(result.output) + + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['--vrf', 'Vrf_1', '2001::/64'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all == result.output + print(result.output) + + def test_diff(self): + args = mock.MagicMock() + args.type = 'route' + args.delete = False + args.namespace = None + args.json = False + stats = flow_counters_stat.RouteFlowCounterStats(args) + stats._collect = mock.MagicMock() + old_data = { + '': { + '1.1.1.0/24': { + '1.1.1.1/24': ['100', '200', '1'], + '1.1.1.2/24': ['100', '100', '2'], + '1.1.1.3/24': ['100', '200', '3'] + } + } + } + stats._save(old_data) + stats.data = { + '': { + '1.1.1.0/24': { + '1.1.1.1/24': ['200', '300', '4'], + '1.1.1.2/24': ['100', '50', '2'], + '1.1.1.3/24': ['200', '300', '3'] + } + } + } + + stats._collect_and_diff() + cached_data = stats._load() + assert cached_data['']['1.1.1.0/24']['1.1.1.1/24'] == ['0', '0', '4'] + assert cached_data['']['1.1.1.0/24']['1.1.1.2/24'] == ['0', '0', '2'] + assert cached_data['']['1.1.1.0/24']['1.1.1.3/24'] == ['100', '200', '3'] + + +class TestRouteStatsMultiAsic: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + delete_cache(stat_type='route') + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + delete_cache(stat_type='route') + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + + def test_show_all_stats(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + print(result.output) + assert expect_show_route_stats_all_multi_asic == result.output + + def test_show_json(self): + cmd = 'flow_counters_stat -t route -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_all_json_multi_asic + + def test_clear_all(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"], [] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all_multi_asic == result.output + print(result.output) diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 9b1688c743..5c144393f2 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -1815,5 +1815,21 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 50.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.3/31": "oid:0x1600000000034d", + "1.1.1.1/31": "oid:0x1600000000034e" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.1/31": "1.1.1.0/24", + "1.1.1.3/31": "1.1.1.0/24" + }, + "COUNTERS:oid:0x1600000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000034d":{ + "SAI_COUNTER_STAT_PACKETS": 200, + "SAI_COUNTER_STAT_BYTES": 4000 } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index 720b0f099f..aed3b22b58 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -1010,5 +1010,15 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 45.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.2/31": "oid:0x1600000000034f" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.2/31": "1.1.1.0/24" + }, + "COUNTERS:oid:0x1600000000034f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 2000 } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 4d01db96ad..f416e4a941 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1621,6 +1621,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|FLOW_CNT_ROUTE": { + "POLL_INTERVAL": "10000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index b0b386303b..b79c839288 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -2041,5 +2041,33 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 50.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.1/31": "oid:0x1600000000034e", + "1.1.1.2/31": "oid:0x1600000000034f", + "2001::1/64": "oid:0x1600000000035e", + "Vrf_1|2001::1/64": "oid:0x1600000000035f" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.1/31": "1.1.1.0/24", + "1.1.1.2/31": "1.1.1.0/24", + "2001::1/64": "2001::/64", + "Vrf_1|2001::1/64": "Vrf_1|2001::/64" + }, + "COUNTERS:oid:0x1600000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000034f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000035e":{ + "SAI_COUNTER_STAT_PACKETS": 50, + "SAI_COUNTER_STAT_BYTES": 1000 + }, + "COUNTERS:oid:0x1600000000035f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 25000 } } diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 2a5ab76181..f9e4c54e2e 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -793,6 +793,9 @@ "xcvrd_switch_standby_end": "2021-May-13 10:01:15.696051", "xcvrd_switch_standby_start": "2021-May-13 10:01:15.690835" }, + "FLOW_COUNTER_CAPABILITY_TABLE|route": { + "support": "true" + }, "LINK_PROBE_STATS|Ethernet0": { "pck_loss_count": "612", "pck_expected_count": "840", diff --git a/utilities_common/cli.py b/utilities_common/cli.py index bd678ed2f0..c05069adcf 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -577,11 +577,11 @@ def json_dump(data): data, sort_keys=True, indent=2, ensure_ascii=False, default=json_serial ) - + def interface_is_untagged_member(db, interface_name): - """ Check if interface is already untagged member""" + """ Check if interface is already untagged member""" vlan_member_table = db.get_table('VLAN_MEMBER') - + for key,val in vlan_member_table.items(): if(key[1] == interface_name): if (val['tagging_mode'] == 'untagged'): @@ -628,3 +628,36 @@ def handle_parse_result(self, ctx, opts, args): "Illegal usage: %s is mutually exclusive with arguments %s" % (self.name, ', '.join(self.mutually_exclusive)) ) return super(MutuallyExclusiveOption, self).handle_parse_result(ctx, opts, args) + + +def query_yes_no(question, default="yes"): + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + """ + valid = {"yes": True, "y": True, "ye": True, + "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower().strip() + if default is not None and choice == '': + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' " + "(or 'y' or 'n').\n") From 154a801df929038a5d086723a5ab76b86cc5145d Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Tue, 19 Apr 2022 13:45:12 +0800 Subject: [PATCH 05/20] Enhance "config interface type/advertised-type" to be blocked on RJ45 ports (#2112) * Support type check for rj45 Signed-off-by: Stephen Sun * Add test case for config interface type/advertised-types for RJ45 Signed-off-by: Stephen Sun * Fix review comments Signed-off-by: Stephen Sun * Fix unit test issue in config advertised types Signed-off-by: Stephen Sun Co-authored-by: Stephen Sun --- scripts/portconfig | 10 ++++++++++ tests/config_an_test.py | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/scripts/portconfig b/scripts/portconfig index 30321043da..25647cc45c 100755 --- a/scripts/portconfig +++ b/scripts/portconfig @@ -75,6 +75,7 @@ class portconfig(object): self.is_lag = False self.is_lag_mbr = False self.parent = port + self.is_rj45_port = False # Set up db connections if namespace is None: self.db = ConfigDBConnector() @@ -90,6 +91,9 @@ class portconfig(object): lag_tables = self.db.get_table(PORT_CHANNEL_TABLE_NAME) lag_mbr_tables = self.db.get_table(PORT_CHANNEL_MBR_TABLE_NAME) if port in port_tables: + port_transceiver_info = self.state_db.get_all(self.state_db.STATE_DB, "TRANSCEIVER_INFO|{}".format(port)) + if port_transceiver_info: + self.is_rj45_port = True if port_transceiver_info.get("type") == "RJ45" else False for key in lag_mbr_tables: if port == key[1]: self.parent = key[0] @@ -155,6 +159,9 @@ class portconfig(object): self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_ADV_SPEEDS_CONFIG_FIELD_NAME: adv_speeds}) def set_interface_type(self, port, interface_type): + if self.is_rj45_port: + print("Setting RJ45 ports' type is not supported") + exit(1) if self.verbose: print("Setting interface_type %s on port %s" % (interface_type, port)) if interface_type not in VALID_INTERFACE_TYPE_SET: @@ -164,6 +171,9 @@ class portconfig(object): self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_INTERFACE_TYPE_CONFIG_FIELD_NAME: interface_type}) def set_adv_interface_types(self, port, adv_interface_types): + if self.is_rj45_port: + print("Setting RJ45 ports' advertised types is not supported") + exit(1) if self.verbose: print("Setting adv_interface_types %s on port %s" % (adv_interface_types, port)) diff --git a/tests/config_an_test.py b/tests/config_an_test.py index 3f9864ec71..f16099e40c 100644 --- a/tests/config_an_test.py +++ b/tests/config_an_test.py @@ -62,6 +62,8 @@ def test_config_type(self, ctx): result = self.basic_check("type", ["Ethernet0", "Invalid"], ctx, operator.ne) assert 'Invalid interface type specified' in result.output assert 'Valid interface types:' in result.output + result = self.basic_check("type", ["Ethernet16", "Invalid"], ctx, operator.ne) + assert "Setting RJ45 ports' type is not supported" in result.output def test_config_adv_types(self, ctx): self.basic_check("advertised-types", ["Ethernet0", "CR4,KR4"], ctx) @@ -74,6 +76,8 @@ def test_config_adv_types(self, ctx): assert 'Invalid interface type specified' in result.output assert 'duplicate' in result.output self.basic_check("advertised-types", ["Ethernet0", ""], ctx, operator.ne) + result = self.basic_check("advertised-types", ["Ethernet16", "Invalid"], ctx, operator.ne) + assert "Setting RJ45 ports' advertised types is not supported" in result.output def basic_check(self, command_name, para_list, ctx, op=operator.eq, expect_result=0): runner = CliRunner() From cb3a0477d5ba67696791a5fe852057ffded32ade Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Wed, 20 Apr 2022 12:40:53 +0800 Subject: [PATCH 06/20] Support option --ports of config qos reload for reloading ports' QoS and buffer configuration to default (#2125) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What I did CLI: config qos reload --ports --ports : a set of interfaces with empty QoS and buffer configurations (typically they have just been created via DPB). Format: {,port}, like “Ethernet0” or “Ethernet4,Ethernet5,Ethernet6,Ethernet7” Each port in the list should exist in the CONFIG_DB.PORT table The flow: Render the template qos_config.j2 and buffer_config.j2, generating a temporary json file. (This is one step in “config qos reload”). Parse the json file, extracting all the items on the ports in the port_list Apply all the extracted items into the CONFIG_DB Signed-off-by: Stephen Sun --- config/main.py | 167 ++++++++++++++++++++++- doc/Command-Reference.md | 30 ++++ tests/config_test.py | 52 +++++++ tests/qos_config_input/0/update_qos.json | 57 ++++++++ tests/qos_config_input/1/update_qos.json | 57 ++++++++ tests/qos_config_input/update_qos.json | 57 ++++++++ 6 files changed, 419 insertions(+), 1 deletion(-) create mode 100644 tests/qos_config_input/0/update_qos.json create mode 100644 tests/qos_config_input/1/update_qos.json create mode 100644 tests/qos_config_input/update_qos.json diff --git a/config/main.py b/config/main.py index 197fb33662..3d549b991c 100644 --- a/config/main.py +++ b/config/main.py @@ -2258,6 +2258,7 @@ def _update_buffer_calculation_model(config_db, model): @qos.command('reload') @click.pass_context +@click.option('--ports', is_flag=False, required=False, help="List of ports that needs to be updated") @click.option('--no-dynamic-buffer', is_flag=True, help="Disable dynamic buffer calculation") @click.option( '--json-data', type=click.STRING, @@ -2267,8 +2268,13 @@ def _update_buffer_calculation_model(config_db, model): '--dry_run', type=click.STRING, help="Dry run, writes config to the given file" ) -def reload(ctx, no_dynamic_buffer, dry_run, json_data): +def reload(ctx, no_dynamic_buffer, dry_run, json_data, ports): """Reload QoS configuration""" + if ports: + log.log_info("'qos reload --ports {}' executing...".format(ports)) + _qos_update_ports(ctx, ports, dry_run, json_data) + return + log.log_info("'qos reload' executing...") _clear_qos() @@ -2340,6 +2346,165 @@ def reload(ctx, no_dynamic_buffer, dry_run, json_data): if buffer_model_updated: print("Buffer calculation model updated, restarting swss is required to take effect") +def _qos_update_ports(ctx, ports, dry_run, json_data): + """Reload QoS configuration""" + _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() + sonic_version_file = device_info.get_sonic_version_file() + + portlist = ports.split(',') + portset_to_handle = set(portlist) + portset_handled = set() + + namespace_list = [DEFAULT_NAMESPACE] + if multi_asic.get_num_asics() > 1: + namespace_list = multi_asic.get_namespaces_from_linux() + + # Tables whose key is port only + tables_single_index = [ + 'PORT_QOS_MAP', + 'BUFFER_PORT_INGRESS_PROFILE_LIST', + 'BUFFER_PORT_EGRESS_PROFILE_LIST'] + # Tables whose key is port followed by other element + tables_multi_index = [ + 'QUEUE', + 'BUFFER_PG', + 'BUFFER_QUEUE'] + + if json_data: + from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" + else: + from_db = "-d" + + items_to_update = {} + config_dbs = {} + + for ns in namespace_list: + if ns is DEFAULT_NAMESPACE: + asic_id_suffix = "" + config_db = ConfigDBConnector() + else: + asic_id = multi_asic.get_asic_id_from_name(ns) + if asic_id is None: + click.secho("Command 'qos update' failed with invalid namespace '{}'".format(ns), fg="yellow") + raise click.Abort() + asic_id_suffix = str(asic_id) + + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect() + config_dbs[ns] = config_db + if is_dynamic_buffer_enabled(config_db): + buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers_dynamic.json.j2") + else: + buffer_template_file = os.path.join(hwsku_path, asic_id_suffix, "buffers.json.j2") + + if not os.path.isfile(buffer_template_file): + click.secho("Buffer definition template not found at {}".format(buffer_template_file), fg="yellow") + ctx.abort() + + qos_template_file = os.path.join(hwsku_path, asic_id_suffix, "qos.json.j2") + + if not os.path.isfile(qos_template_file): + click.secho("QoS definition template not found at {}".format(qos_template_file), fg="yellow") + ctx.abort() + + # Remove multi indexed entries first + for table_name in tables_multi_index: + entries = config_db.get_keys(table_name) + for key in entries: + port, _ = key + if not port in portset_to_handle: + continue + config_db.set_entry(table_name, '|'.join(key), None) + + cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + command = "{} {} {} -t {},config-db -t {},config-db -y {} --print-data".format( + SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file, qos_template_file, sonic_version_file + ) + jsonstr = clicommon.run_command(command, display_cmd=False, return_cmd=True) + + jsondict = json.loads(jsonstr) + port_table = jsondict.get('PORT') + if port_table: + ports_to_update = set(port_table.keys()).intersection(portset_to_handle) + if not ports_to_update: + continue + else: + continue + + portset_handled.update(ports_to_update) + + items_to_apply = {} + + for table_name in tables_single_index: + table_items_rendered = jsondict.get(table_name) + if table_items_rendered: + for key, data in table_items_rendered.items(): + port = key + if not port in ports_to_update: + continue + # Push the rendered data to config-db + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + items_to_apply[table_name][key] = data + + for table_name in tables_multi_index: + table_items_rendered = jsondict.get(table_name) + if table_items_rendered: + for key, data in table_items_rendered.items(): + port = key.split('|')[0] + if not port in ports_to_update: + continue + # Push the result to config-db + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + items_to_apply[table_name][key] = data + + # Handle CABLE_LENGTH + # This table needs to be specially handled because the port is not the index but the field name + # The idea is for all the entries in template, the same entries in CONFIG_DB will be merged together + # Eg. there is entry AZURE rendered from template for ports Ethernet0, Ethernet4 with cable length "5m": + # and entry AZURE in CONFIG_DB for ports Ethernet8, Ethernet12, Ethernet16 with cable length "40m" + # The entry that will eventually be pushed into CONFIG_DB is + # {"AZURE": {"Ethernet0": "5m", "Ethernet4": "5m", "Ethernet8": "40m", "Ethernet12": "40m", "Ethernet16": "40m"}} + table_name = 'CABLE_LENGTH' + cable_length_table = jsondict.get(table_name) + if cable_length_table: + for key, item in cable_length_table.items(): + cable_length_from_db = config_db.get_entry(table_name, key) + cable_length_from_template = {} + for port in ports_to_update: + cable_len = item.get(port) + if cable_len: + cable_length_from_template[port] = cable_len + # Reaching this point, + # - cable_length_from_template contains cable length rendered from the template, eg Ethernet0 and Ethernet4 in the above example + # - cable_length_from_db contains cable length existing in the CONFIG_DB, eg Ethernet8, Ethernet12, and Ethernet16 in the above exmaple + + if not items_to_apply.get(table_name): + items_to_apply[table_name] = {} + + if cable_length_from_db: + cable_length_from_db.update(cable_length_from_template) + items_to_apply[table_name][key] = cable_length_from_db + else: + items_to_apply[table_name][key] = cable_length_from_template + + if items_to_apply: + items_to_update[ns] = items_to_apply + + if dry_run: + with open(dry_run + ns, "w+") as f: + json.dump(items_to_apply, f, sort_keys=True, indent=4) + else: + jsonstr = json.dumps(items_to_apply) + cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + command = "{} {} --additional-data '{}' --write-to-db".format(SONIC_CFGGEN_PATH, cmd_ns, jsonstr) + clicommon.run_command(command, display_cmd=False) + + if portset_to_handle != portset_handled: + click.echo("The port(s) {} are not updated because they do not exist".format(portset_to_handle - portset_handled)) + def is_dynamic_buffer_enabled(config_db): """Return whether the current system supports dynamic buffer calculation""" device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost') diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 2843bd0ca6..dc750d2bbc 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -7586,6 +7586,36 @@ Some of the example QOS configurations that users can modify are given below. When there are no changes in the platform specific configutation files, they internally use the file "/usr/share/sonic/templates/buffers_config.j2" and "/usr/share/sonic/templates/qos_config.j2" to generate the configuration. ``` +**config qos reload --ports port_list** + +This command is used to reload the default QoS configuration on a group of ports. +Typically, the default QoS configuration is in the following tables. +1) PORT_QOS_MAP +2) QUEUE +3) BUFFER_PG +4) BUFFER_QUEUE +5) BUFFER_PORT_INGRESS_PROFILE_LIST +6) BUFFER_PORT_EGRESS_PROFILE_LIST +7) CABLE_LENGTH + +If there was QoS configuration in the above tables for the ports: + + - if `--force` option is provied, the existing QoS configuration will be replaced by the default QoS configuration, + - otherwise, the command will exit with nothing updated. + +- Usage: + ``` + config qos reload --ports [,port] + ``` + +- Example: + ``` + admin@sonic:~$ sudo config qos reload --ports Ethernet0,Ethernet4 + + In this example, it updates the QoS configuration on port Ethernet0 and Ethernet4 to default. + If there was QoS configuration on the ports, the command will clear the existing QoS configuration on the port and reload to default. + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#qos) ## sFlow diff --git a/tests/config_test.py b/tests/config_test.py index 72110f805e..88f6015ee5 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -442,6 +442,24 @@ def test_qos_reload_single( ) assert filecmp.cmp(output_file, expected_result, shallow=False) + def test_qos_update_single( + self, get_cmd_module, setup_qos_mock_apis + ): + (config, show) = get_cmd_module + json_data = '{"DEVICE_METADATA": {"localhost": {}}, "PORT": {"Ethernet0": {}}}' + runner = CliRunner() + output_file = os.path.join(os.sep, "tmp", "qos_config_update.json") + cmd_vector = ["reload", "--ports", "Ethernet0", "--json-data", json_data, "--dry_run", output_file] + result = runner.invoke(config.config.commands["qos"], cmd_vector) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + cwd = os.path.dirname(os.path.realpath(__file__)) + expected_result = os.path.join( + cwd, "qos_config_input", "update_qos.json" + ) + assert filecmp.cmp(output_file, expected_result, shallow=False) + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -495,6 +513,40 @@ def test_qos_reload_masic( file = "{}{}".format(output_file, asic) assert filecmp.cmp(file, expected_result, shallow=False) + def test_qos_update_masic( + self, get_cmd_module, setup_qos_mock_apis, + setup_multi_broadcom_masic + ): + (config, show) = get_cmd_module + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "qos_update_output") + print("Saving output in {}<0,1,2..>".format(output_file)) + num_asic = device_info.get_num_npus() + for asic in range(num_asic): + try: + file = "{}{}".format(output_file, asic) + os.remove(file) + except OSError: + pass + json_data = '{"DEVICE_METADATA": {"localhost": {}}, "PORT": {"Ethernet0": {}}}' + result = runner.invoke( + config.config.commands["qos"], + ["reload", "--ports", "Ethernet0,Ethernet4", "--json-data", json_data, "--dry_run", output_file] + ) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + cwd = os.path.dirname(os.path.realpath(__file__)) + + for asic in range(num_asic): + expected_result = os.path.join( + cwd, "qos_config_input", str(asic), "update_qos.json" + ) + + assert filecmp.cmp(output_file + "asic{}".format(asic), expected_result, shallow=False) + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/qos_config_input/0/update_qos.json b/tests/qos_config_input/0/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/0/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file diff --git a/tests/qos_config_input/1/update_qos.json b/tests/qos_config_input/1/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/1/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file diff --git a/tests/qos_config_input/update_qos.json b/tests/qos_config_input/update_qos.json new file mode 100644 index 0000000000..09749a5123 --- /dev/null +++ b/tests/qos_config_input/update_qos.json @@ -0,0 +1,57 @@ +{ + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet0": "300m" + } + }, + "PORT_QOS_MAP": { + "Ethernet0": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "QUEUE": { + "Ethernet0|0": { + "scheduler": "scheduler.0" + }, + "Ethernet0|1": { + "scheduler": "scheduler.0" + }, + "Ethernet0|2": { + "scheduler": "scheduler.0" + }, + "Ethernet0|3": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|4": { + "scheduler": "scheduler.1", + "wred_profile": "AZURE_LOSSLESS" + }, + "Ethernet0|5": { + "scheduler": "scheduler.0" + }, + "Ethernet0|6": { + "scheduler": "scheduler.0" + } + } +} \ No newline at end of file From 22a388b695effb53fef7724bb9a906f346e7e508 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Fri, 22 Apr 2022 11:23:09 +0300 Subject: [PATCH 07/20] [show] fix get routing stack routine (#2137) - What I did Fixed error message: "Failed to get routing stack: a bytes-like object is required, not 'str'" introduced by #2117 - How I did it Add missing 'text' parameter - How to verify it Run on switch Signed-off-by: Stepan Blyschak --- show/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/show/main.py b/show/main.py index 01fd592c7a..1821f6c9d8 100755 --- a/show/main.py +++ b/show/main.py @@ -78,7 +78,7 @@ def get_routing_stack(): command = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1 | head -n 1" try: - stdout = subprocess.check_output(command, shell=True, timeout=COMMAND_TIMEOUT) + stdout = subprocess.check_output(command, shell=True, text=True, timeout=COMMAND_TIMEOUT) result = stdout.rstrip('\n') except Exception as err: click.echo('Failed to get routing stack: {}'.format(err), err=True) From 697aae36c18c533140020575b7d5835b56a24f24 Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Mon, 25 Apr 2022 18:22:39 +0800 Subject: [PATCH 08/20] Fix speed parsing when speed is NOT fetched from APPL_DB (#2138) - What I did In PR #2110 introduced an enhancement to support display port speed with different unit when it is RJ45 ports, however, it only covered the case that the speed was fetched from APPL_DB. Recently there is a change to fetch the speed from STATE_DB in some cases, the same logic also needs to be applied in this case. - How I did it Rename appl_db_port_speed_parse to a more generic name port_speed_parse. Call port_speed_parse when speed is fetched from STATE_DB. In UT test, add port status entries to mocked STATE_DB to cover the logic that get port speed from STATE_DB. - How to verify it UT and test it on platform support RJ45 ports Signed-off-by: Kebo Liu --- scripts/intfutil | 17 +++++++++-------- tests/mock_tables/state_db.json | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/scripts/intfutil b/scripts/intfutil index 784140aced..c25b896b59 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -132,9 +132,9 @@ def appl_db_sub_intf_keys_get(appl_db, sub_intf_list, sub_intf_name): return appl_db_sub_intf_keys -def appl_db_port_speed_parse(in_speed, optics_type): +def port_speed_parse(in_speed, optics_type): """ - Parse the speed received from application DB + Parse the speed received from DB """ # fetched speed is in megabits per second speed = int(in_speed) @@ -155,13 +155,13 @@ def appl_db_port_status_get(appl_db, intf_name, status_type): return "N/A" if status_type == PORT_SPEED and status != "N/A": optics_type = state_db_port_optics_get(appl_db, intf_name, PORT_OPTICS_TYPE) - status = appl_db_port_speed_parse(status, optics_type) + status = port_speed_parse(status, optics_type) elif status_type == PORT_ADV_SPEEDS and status != "N/A" and status != "all": optics_type = state_db_port_optics_get(appl_db, intf_name, PORT_OPTICS_TYPE) speed_list = status.split(',') new_speed_list = [] for s in natsorted(speed_list): - new_speed_list.append(appl_db_port_speed_parse(s, optics_type)) + new_speed_list.append(port_speed_parse(s, optics_type)) status = ','.join(new_speed_list) return status @@ -173,8 +173,9 @@ def port_oper_speed_get(db, intf_name): oper_status = db.get(db.APPL_DB, PORT_STATUS_TABLE_PREFIX + intf_name, PORT_OPER_STATUS) if oper_speed is None or oper_speed == "N/A" or oper_status != "up": return appl_db_port_status_get(db, intf_name, PORT_SPEED) - - return '{}G'.format(oper_speed[:-3]) + else: + optics_type = state_db_port_optics_get(db, intf_name, PORT_OPTICS_TYPE) + return port_speed_parse(oper_speed, optics_type) def port_oper_speed_get_raw(db, intf_name): """ @@ -301,7 +302,7 @@ def po_speed_dict(po_int_dict, appl_db): po_list.append(None) else: optics_type = state_db_port_optics_get(appl_db, value[0], PORT_OPTICS_TYPE) - interface_speed = appl_db_port_speed_parse(interface_speed, optics_type) + interface_speed = port_speed_parse(interface_speed, optics_type) po_list.append(interface_speed) elif len(value) > 1: for intf in value: @@ -311,7 +312,7 @@ def po_speed_dict(po_int_dict, appl_db): agg_speed_list.append(temp_speed) interface_speed = sum(agg_speed_list) interface_speed = str(interface_speed) - interface_speed = appl_db_port_speed_parse(interface_speed, optics_type) + interface_speed = port_speed_parse(interface_speed, optics_type) po_list.append(interface_speed) po_speed_dict = dict(po_list[i:i+2] for i in range(0, len(po_list), 2)) return po_speed_dict diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index f9e4c54e2e..fc0fb8bb0a 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -801,5 +801,26 @@ "pck_expected_count": "840", "link_prober_unknown_start": "2022-Jan-26 03:13:05.366900", "link_prober_unknown_end": "2022-Jan-26 03:17:35.446580" + }, + "PORT_TABLE|Ethernet16": { + "state": "ok", + "netdev_oper_status": "up", + "admin_status": "up", + "mtu": "9100", + "speed": "100" + }, + "PORT_TABLE|Ethernet36": { + "state": "ok", + "netdev_oper_status": "up", + "admin_status": "up", + "mtu": "9100", + "speed": "10" + }, + "PORT_TABLE|Ethernet24": { + "state": "ok", + "netdev_oper_status": "up", + "admin_status": "up", + "mtu": "9100", + "speed": "1000" } } From 8c07d5997a28f180ab5becdb407ecf49ac45b0e5 Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Mon, 25 Apr 2022 13:25:29 +0300 Subject: [PATCH 09/20] [Mellanox] [reboot] [asan] stop asan-enabled containers on reboot (#2107) During a cold reboot on an image built with ENABLE_ASAN=y, swss and syncd (only on Mellanox platform) are explicitly stopped. This will make the relevant processes receive SIGTERM and generate the ASAN reports. The behavior for fast/warm reboots is not affected. The behavior of regular (ENABLE_ASAN=n) images is not affected. - What I did Modified the reboot script so the asan logs are generated on reboot. - How I did it Added stop_services_asan() to reboot script. - How to verify it Added "asan: 'yes'" to the /etc/sonic/sonic_version.yml Checked that relevant containers are stopped and logs are generated Signed-off-by: Yakiv Huryk --- scripts/reboot | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/scripts/reboot b/scripts/reboot index 7746ee7d69..fa7315b3b9 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -25,6 +25,7 @@ REBOOT_USER=$(logname) PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) ASIC_TYPE=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) SUBTYPE=$(sonic-cfggen -d -v DEVICE_METADATA.localhost.subtype) +ASAN=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asan) VERBOSE=no EXIT_NEXT_IMAGE_NOT_EXISTS=4 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 @@ -78,6 +79,12 @@ function stop_sonic_services() stop_pmon_service } +function stop_services_asan() +{ + debug "Stopping swss for ASAN" + systemctl stop swss +} + function clear_warm_boot() { # If reboot is requested, make sure the outstanding warm-boot is cleared @@ -187,6 +194,11 @@ tag_images # Stop SONiC services gracefully. stop_sonic_services +# Stop ASAN-enabled services so the report can be generated +if [[ x"$ASAN" == x"yes" ]]; then + stop_services_asan +fi + clear_warm_boot # Update the reboot cause file to reflect that user issued 'reboot' command From d012be9e2d1696898a1c0eb4be0db40b63fb3ccd Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Mon, 25 Apr 2022 18:26:31 +0800 Subject: [PATCH 10/20] [Command-Reference] Add CLI docs for route flow counter (#2069) - What I did Add CLIs document for route flow counter feature, CLI PR: #2031 - How I did it Add CLIs document for route flow counter feature - How to verify it Run build --- doc/Command-Reference.md | 112 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index dc750d2bbc..748a273d85 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -57,6 +57,7 @@ * [Flow Counters](#flow-counters) * [Flow Counters show commands](#flow-counters-show-commands) * [Flow Counters clear commands](#flow-counters-clear-commands) + * [Flow Counters config commands](#flow-counters-config-commands) * [Gearbox](#gearbox) * [Gearbox show commands](#gearbox-show-commands) * [Interfaces](#interfaces) @@ -3137,9 +3138,10 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#featur ## Flow Counters -This section explains all the Flow Counters show commands and clear commands that are supported in SONiC. Flow counters are usually used for debugging, troubleshooting and performance enhancement processes. Flow counters supports case like: +This section explains all the Flow Counters show commands, clear commands and config commands that are supported in SONiC. Flow counters are usually used for debugging, troubleshooting and performance enhancement processes. Flow counters supports case like: - Host interface traps (number of received traps per Trap ID) + - Routes matching the configured prefix pattern (number of hits and number of bytes) ### Flow Counters show commands @@ -3169,6 +3171,50 @@ Because clear (see below) is handled on a per-user basis different users may see asic1 dhcp 200 3,000 45.25/s ``` +**show flowcnt-route stats** + +This command is used to show the current statistics for route flow patterns. + +Because clear (see below) is handled on a per-user basis different users may see different counts. + +- Usage: + ``` + show flowcnt-route stats + show flowcnt-route stats pattern [--vrf ] + show flowcnt-route stats route [--vrf ] + ``` + +- Example: + ``` + admin@sonic:~$ show flowcnt-route stats + Route pattern VRF Matched routes Packets Bytes + -------------------------------------------------------------------------------------- + 3.3.0.0/16 default 3.3.1.0/24 100 4543 + 3.3.2.3/32 3443 929229 + 3.3.0.0/16 0 0 + 2000::/64 default 2000::1/128 100 4543 + ``` + +The "pattern" subcommand is used to display the route flow counter statistics by route pattern. + +- Example: + ``` + admin@sonic:~$ show flowcnt-route stats pattern 3.3.0.0/16 + Route pattern VRF Matched routes Packets Bytes + -------------------------------------------------------------------------------------- + 3.3.0.0/16 default 3.3.1.0/24 100 4543 + 3.3.2.3/32 3443 929229 + 3.3.0.0/16 0 0 + ``` + +The "route" subcommand is used to display the route flow counter statistics by route prefix. + ``` + admin@sonic:~$ show flowcnt-route stats route 3.3.3.2/32 --vrf Vrf_1 + Route VRF Route Pattern Packets Bytes + ----------------------------------------------------------------------------------------- + 3.3.3.2/32 Vrf_1 3.3.0.0/16 100 4543 + ``` + ### Flow Counters clear commands **sonic-clear flowcnt-trap** @@ -3186,6 +3232,70 @@ This command is used to clear the current statistics for the registered host int Trap Flow Counters were successfully cleared ``` +**sonic-clear flowcnt-route** + +This command is used to clear the current statistics for the route flow counter. This is done on a per-user basis. + +- Usage: + ``` + sonic-clear flowcnt-route + sonic-clear flowcnt-route pattern [--vrf ] + sonic-clear flowcnt-route route [--vrf ] + ``` + +- Example: + ``` + admin@sonic:~$ sonic-clear flowcnt-route + Route Flow Counters were successfully cleared + ``` + +The "pattern" subcommand is used to clear the route flow counter statistics by route pattern. + +- Example: + ``` + admin@sonic:~$ sonic-clear flowcnt-route pattern 3.3.0.0/16 --vrf Vrf_1 + Flow Counters of all routes matching the configured route pattern were successfully cleared + ``` + +The "route" subcommand is used to clear the route flow counter statistics by route prefix. + +- Example: + ``` + admin@sonic:~$ sonic-clear flowcnt-route route 3.3.3.2/32 --vrf Vrf_1 + Flow Counters of the specified route were successfully cleared + ``` + +### Flow Counters config commands + +**config flowcnt-route pattern add** + +This command is used to add or update the route pattern which is used by route flow counter to match route entries. + +- Usage: + ``` + config flowcnt-route pattern add [--vrf ] [--max ] + ``` + +- Example: + ``` + admin@sonic:~$ config flowcnt-route pattern add 2.2.0.0/16 --vrf Vrf_1 --max 50 + ``` + +**config flowcnt-route pattern remove** + +This command is used to remove the route pattern which is used by route flow counter to match route entries. + +- Usage: + ``` + config flowcnt-route pattern remove [--vrf ] + ``` + +- Example: + ``` + admin@sonic:~$ config flowcnt-route pattern remove 2.2.0.0/16 --vrf Vrf_1 + ``` + + Go Back To [Beginning of the document](#) or [Beginning of this section](#flow-counters) ## Gearbox From fbfa8bcb3a1458f50fa96a5a51512ad2a3ebbdfa Mon Sep 17 00:00:00 2001 From: Mohamed Ghoneim Date: Thu, 28 Apr 2022 17:03:08 -0700 Subject: [PATCH 11/20] [GCU] Enabling AddRack and adding RemoveRack tests (#2143) #### What I did Fixes #2051 RemoveRack/AddRack is confirmed to be working correctly by Renuka. Check https://github.com/Azure/sonic-mgmt/pull/5254 #### How I did it - Enabled ADD_RACK test - Created REMOVE_RACK test using opposite data to ADD_RACK, verified the generated steps manually. #### How to verify it UnitTest and the KVM test by Renuka. #### Previous command output (if the output of a command-line utility has changed) #### New command output (if the output of a command-line utility has changed) --- .../files/patch_sorter_test_success.json | 1514 +++++++++++++++++ .../patch_sorter_test.py | 4 +- 2 files changed, 1515 insertions(+), 3 deletions(-) diff --git a/tests/generic_config_updater/files/patch_sorter_test_success.json b/tests/generic_config_updater/files/patch_sorter_test_success.json index c134b0b5e2..74cb4ed728 100644 --- a/tests/generic_config_updater/files/patch_sorter_test_success.json +++ b/tests/generic_config_updater/files/patch_sorter_test_success.json @@ -4599,5 +4599,1519 @@ } ] ] + }, + "REMOVE_RACK": { + "desc": "Remove a rack and all its related settings", + "current_config": { + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet64", + "Ethernet68", + "Ethernet72" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet64", + "Ethernet68", + "Ethernet72" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "BGP_NEIGHBOR": { + "10.0.0.1": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.0", + "name": "ARISTA01T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.5": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.4", + "name": "ARISTA03T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.9": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.8", + "name": "ARISTA05T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.13": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.12", + "name": "ARISTA07T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.17": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.16", + "name": "ARISTA09T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.21": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.20", + "name": "ARISTA11T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.25": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.24", + "name": "ARISTA13T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.29": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.28", + "name": "ARISTA15T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.35": { + "asn": "64002", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.34", + "name": "ARISTA02T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.37": { + "asn": "64003", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.36", + "name": "ARISTA03T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.39": { + "asn": "64004", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.38", + "name": "ARISTA04T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.41": { + "asn": "64005", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.40", + "name": "ARISTA05T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.43": { + "asn": "64006", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.42", + "name": "ARISTA06T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.45": { + "asn": "64007", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.44", + "name": "ARISTA07T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.47": { + "asn": "64008", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.46", + "name": "ARISTA08T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.49": { + "asn": "64009", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.48", + "name": "ARISTA09T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.51": { + "asn": "64010", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.50", + "name": "ARISTA10T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.53": { + "asn": "64011", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.52", + "name": "ARISTA11T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.55": { + "asn": "64012", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.54", + "name": "ARISTA12T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.57": { + "asn": "64013", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.56", + "name": "ARISTA13T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.59": { + "asn": "64014", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.58", + "name": "ARISTA14T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.61": { + "asn": "64015", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.60", + "name": "ARISTA15T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.63": { + "asn": "64016", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.62", + "name": "ARISTA16T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::1a": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::19", + "name": "ARISTA07T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::2": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::1", + "name": "ARISTA01T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::2a": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::29", + "name": "ARISTA11T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::3a": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::39", + "name": "ARISTA15T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::4a": { + "asn": "64003", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::49", + "name": "ARISTA03T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::4e": { + "asn": "64004", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::4d", + "name": "ARISTA04T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::5a": { + "asn": "64007", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::59", + "name": "ARISTA07T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::5e": { + "asn": "64008", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::5d", + "name": "ARISTA08T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::6a": { + "asn": "64011", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::69", + "name": "ARISTA11T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::6e": { + "asn": "64012", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::6d", + "name": "ARISTA12T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::7a": { + "asn": "64015", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::79", + "name": "ARISTA15T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::7e": { + "asn": "64016", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::7d", + "name": "ARISTA16T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::12": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::11", + "name": "ARISTA05T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::22": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::21", + "name": "ARISTA09T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::32": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::31", + "name": "ARISTA13T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::46": { + "asn": "64002", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::45", + "name": "ARISTA02T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::52": { + "asn": "64005", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::51", + "name": "ARISTA05T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::56": { + "asn": "64006", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::55", + "name": "ARISTA06T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::62": { + "asn": "64009", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::61", + "name": "ARISTA09T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::66": { + "asn": "64010", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::65", + "name": "ARISTA10T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::72": { + "asn": "64013", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::71", + "name": "ARISTA13T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::76": { + "asn": "64014", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::75", + "name": "ARISTA14T0", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "fc00::a": { + "asn": "65200", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::9", + "name": "ARISTA03T2", + "nhopself": "0", + "rrclient": "0", + "admin_status": "up" + }, + "10.0.0.33": { + "admin_status": "up", + "asn": "64001", + "holdtime": "10", + "keepalive": "3", + "local_addr": "10.0.0.32", + "name": "ARISTA01T0", + "nhopself": "0", + "rrclient": "0" + }, + "fc00::42": { + "admin_status": "up", + "asn": "64001", + "holdtime": "10", + "keepalive": "3", + "local_addr": "fc00::41", + "name": "ARISTA01T0", + "nhopself": "0", + "rrclient": "0" + } + }, + "BUFFER_PG": { + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "pg_lossless_40000_40m_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "pg_lossless_40000_40m_profile" + }, + "Ethernet64|3-4": { + "profile": "pg_lossless_40000_40m_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + } + }, + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" + } + }, + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + }, + "pg_lossless_40000_40m_profile": { + "dynamic_th": "-3", + "pool": "ingress_lossless_pool", + "size": "56368", + "xoff": "55120", + "xon": "18432", + "xon_offset": "2496" + }, + "pg_lossless_40000_300m_profile": { + "dynamic_th": "-3", + "pool": "ingress_lossless_pool", + "size": "56368", + "xoff": "55120", + "xon": "18432", + "xon_offset": "2496" + } + }, + "BUFFER_QUEUE": { + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + } + }, + "DEVICE_NEIGHBOR": { + "Ethernet52": { + "name": "ARISTA13T2", + "port": "Ethernet2" + }, + "Ethernet56": { + "name": "ARISTA15T2", + "port": "Ethernet1" + }, + "Ethernet60": { + "name": "ARISTA15T2", + "port": "Ethernet2" + }, + "Ethernet68": { + "name": "ARISTA02T0", + "port": "Ethernet1" + }, + "Ethernet72": { + "name": "ARISTA03T0", + "port": "Ethernet1" + }, + "Ethernet64": { + "name": "ARISTA01T0", + "port": "Ethernet1" + } + }, + "DSCP_TO_TC_MAP": { + "AZURE": { + "0": "1", + "1": "1", + "10": "1", + "11": "1", + "12": "1", + "13": "1", + "14": "1", + "15": "1", + "16": "1", + "17": "1", + "18": "1", + "19": "1", + "2": "1", + "20": "1", + "21": "1", + "22": "1", + "23": "1", + "24": "1", + "25": "1", + "26": "1", + "27": "1", + "28": "1", + "29": "1", + "3": "3", + "30": "1", + "31": "1", + "32": "1", + "33": "1", + "34": "1", + "35": "1", + "36": "1", + "37": "1", + "38": "1", + "39": "1", + "4": "4", + "40": "1", + "41": "1", + "42": "1", + "43": "1", + "44": "1", + "45": "1", + "46": "5", + "47": "1", + "48": "6", + "49": "1", + "5": "2", + "50": "1", + "51": "1", + "52": "1", + "53": "1", + "54": "1", + "55": "1", + "56": "1", + "57": "1", + "58": "1", + "59": "1", + "6": "1", + "60": "1", + "61": "1", + "62": "1", + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "INTERFACE": { + "Ethernet68": {}, + "Ethernet68|10.0.0.34/31": {}, + "Ethernet68|FC00::45/126": {}, + "Ethernet72": {}, + "Ethernet72|10.0.0.36/31": {}, + "Ethernet72|FC00::49/126": {}, + "Ethernet64": {}, + "Ethernet64|10.0.0.32/31": {}, + "Ethernet64|FC00::41/126": {} + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } + }, + "PORT": { + "Ethernet52": { + "admin_status": "up", + "alias": "fortyGigE0/52", + "description": "ARISTA13T2:Ethernet2", + "index": "13", + "lanes": "49,50,51,52", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet56": { + "admin_status": "up", + "alias": "fortyGigE0/56", + "description": "ARISTA15T2:Ethernet1", + "index": "14", + "lanes": "57,58,59,60", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet60": { + "admin_status": "up", + "alias": "fortyGigE0/60", + "description": "ARISTA15T2:Ethernet2", + "index": "15", + "lanes": "61,62,63,64", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet64": { + "alias": "fortyGigE0/64", + "description": "ARISTA01T0:Ethernet1", + "index": "16", + "lanes": "69,70,71,72", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100", + "admin_status": "up" + }, + "Ethernet68": { + "admin_status": "up", + "alias": "fortyGigE0/68", + "description": "ARISTA02T0:Ethernet1", + "index": "17", + "lanes": "65,66,67,68", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet72": { + "admin_status": "up", + "alias": "fortyGigE0/72", + "description": "ARISTA03T0:Ethernet1", + "index": "18", + "lanes": "73,74,75,76", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + }, + "PORT_QOS_MAP": { + "Ethernet52": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + }, + "Ethernet56": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + }, + "Ethernet60": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + }, + "Ethernet68": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + }, + "Ethernet72": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + }, + "Ethernet64": { + "dscp_to_tc_map": "AZURE", + "pfc_enable": "3,4", + "pfc_to_queue_map": "AZURE", + "tc_to_pg_map": "AZURE", + "tc_to_queue_map": "AZURE" + } + }, + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } + } + }, + "patch": [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::42" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.33" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.17/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.35/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::6e/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::66/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::22/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::72/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::56/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.47/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.51/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::12/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::52/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::5e/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.37/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.41/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.45/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::4e/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.1/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.13/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::62/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::2a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.59/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.21/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::32/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.9/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::2/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::1a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.49/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.25/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::46/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::5a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.55/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.29/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.61/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.39/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.43/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.63/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::3a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.57/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::4a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.53/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::76/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::7a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.5/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::6a/admin_status" + }, + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::7e/admin_status" + }, + { + "op": "remove", + "path": "/INTERFACE/Ethernet64" + }, + { + "op": "remove", + "path": "/INTERFACE/Ethernet64|FC00::41~1126" + }, + { + "op": "remove", + "path": "/INTERFACE/Ethernet64|10.0.0.32~131" + }, + { + "op": "remove", + "path": "/PORT/Ethernet64/admin_status" + }, + { + "op": "replace", + "path": "/PORT/Ethernet64/description", + "value": "fortyGigE0/64" + }, + { + "op": "remove", + "path": "/DEVICE_NEIGHBOR/Ethernet64" + }, + { + "op": "remove", + "path": "/PORT_QOS_MAP/Ethernet64" + }, + { + "op": "remove", + "path": "/BUFFER_PG/Ethernet64|3-4" + }, + { + "op": "remove", + "path": "/BUFFER_PG/Ethernet64|0" + }, + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|3-4" + }, + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|5-6" + }, + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|0-2" + }, + { + "op": "remove", + "path": "/ACL_TABLE/EVERFLOWV6/ports/0" + }, + { + "op": "remove", + "path": "/ACL_TABLE/EVERFLOW/ports/0" + } + ], + "expected_changes": [ + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.33" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::42" + } + ], + [ + { + "op": "remove", + "path": "/DEVICE_NEIGHBOR/Ethernet64" + } + ], + [ + { + "op": "remove", + "path": "/INTERFACE/Ethernet64|10.0.0.32~131" + } + ], + [ + { + "op": "remove", + "path": "/INTERFACE/Ethernet64|FC00::41~1126" + } + ], + [ + { + "op": "remove", + "path": "/INTERFACE/Ethernet64" + } + ], + [ + { + "op": "remove", + "path": "/ACL_TABLE/EVERFLOW/ports/0" + } + ], + [ + { + "op": "remove", + "path": "/ACL_TABLE/EVERFLOWV6/ports/0" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.1/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.5/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.9/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.13/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.17/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.21/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.25/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.29/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.35/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.37/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.39/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.41/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.43/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.45/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.47/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.49/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.51/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.53/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.55/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.57/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.59/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.61/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/10.0.0.63/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::1a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::2/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::2a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::3a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::4a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::4e/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::5a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::5e/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::6a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::6e/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::7a/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::7e/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::12/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::22/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::32/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::46/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::52/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::56/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::62/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::66/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::72/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::76/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BGP_NEIGHBOR/fc00::a/admin_status" + } + ], + [ + { + "op": "replace", + "path": "/PORT/Ethernet64/description", + "value": "fortyGigE0/64" + } + ], + [ + { + "op": "remove", + "path": "/PORT/Ethernet64/admin_status" + } + ], + [ + { + "op": "remove", + "path": "/BUFFER_PG/Ethernet64|3-4" + } + ], + [ + { + "op": "remove", + "path": "/BUFFER_PG/Ethernet64|0" + } + ], + [ + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|0-2" + } + ], + [ + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|3-4" + } + ], + [ + { + "op": "remove", + "path": "/BUFFER_QUEUE/Ethernet64|5-6" + } + ], + [ + { + "op": "remove", + "path": "/PORT_QOS_MAP/Ethernet64" + } + ] + ] } } \ No newline at end of file diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py index 5530de7cef..79f2b5a76b 100644 --- a/tests/generic_config_updater/patch_sorter_test.py +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -3078,9 +3078,7 @@ def test_patch_sorter_success(self): data = Files.PATCH_SORTER_TEST_SUCCESS skip_exact_change_list_match = False for test_case_name in data: - # Skipping ADD RACK case until fixing issue https://github.com/Azure/sonic-utilities/issues/2034 - if test_case_name == "ADD_RACK": - continue + # TODO: Add CABLE_LENGTH to ADD_RACK and REMOVE_RACK tests https://github.com/Azure/sonic-utilities/issues/2034 with self.subTest(name=test_case_name): self.run_single_success_case(data[test_case_name], skip_exact_change_list_match) From a54a091a0f41c6aed69b21b9c25ca631c89233be Mon Sep 17 00:00:00 2001 From: Mohamed Ghoneim Date: Thu, 28 Apr 2022 17:17:38 -0700 Subject: [PATCH 12/20] [GCU] Supressing YANG errors from libyang while sorting (#1991) #### What I did Fixes #2025 Stopping SonicYang from printing errors during config validation or moves generation. If callers uses the `-v` option, they will see the SonicYang logs printed. Also made sure that SonicYang returned error are properly propagated, so the user can see the exact errors. #### How I did it Used the the option `print_log_enabled` while creating `SonicYang` ```python sy = sonic_yang.SonicYang(self.yang_dir, print_log_enabled=sonic_yang_print_log_enabled) ``` #### How to verify it Manually tested the change to the output, and added unit-tests to the gu_common functions that were modified. #### Previous command output (if the output of a command-line utility has changed) Given an invalid patch will remove PORT table, while the PORT table is used. ``` admin@vlab-01:~$ sudo config apply-patch remove_port.json-patch -i /FEATURE Patch Applier: Patch application starting. Patch Applier: Patch: [{"op": "remove", "path": "/PORT"}] Patch Applier: Getting current config db. Patch Applier: Simulating the target full config after applying the patch. Patch Applier: Validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: Sorting patch updates. sonic_yang(6):Note: Below table(s) have no YANG models: CONSOLE_SWITCH, DEVICE_NEIGHBOR_METADATA, DHCP_SERVER, KDUMP, RESTAPI, SNMP, SNMP_COMMUNITY, TELEMETRY libyang[0]: Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet112" points to a non-existing leaf. (path: /sonic-buffer-pg:sonic-buffer-pg/BUFFER_PG/BUFFER_PG_LIST[port='Ethernet112'][pg_num='0']/port) libyang[0]: Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet112" points to a non-existing leaf. (path: /sonic-buffer-pg:sonic-buffer-pg/BUFFER_PG/BUFFER_PG_LIST[port='Ethernet112'][pg_num='3-4']/port) libyang[0]: Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet116" points to a non-existing leaf. (path: /sonic-buffer-pg:sonic-buffer-pg/BUFFER_PG/BUFFER_PG_LIST[port='Ethernet116'][pg_num='0']/port) >>>> 10s of log lines libyang[0]: Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet96" points to a non-existing leaf. (path: /sonic-port-qos-map:sonic-port-qos-map/PORT_QOS_MAP/PORT_QOS_MAP_LIST[ifname='Ethernet96']/ifname) sonic_yang(3):Data Loading Failed:Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet96" points to a non-existing leaf. Failed to apply patch Usage: config apply-patch [OPTIONS] PATCH_FILE_PATH Try "config apply-patch -h" for help. Error: Given patch is not valid because it will result in an invalid config admin@vlab-01:~$ ``` #### New command output (if the output of a command-line utility has changed) ``` admin@vlab-01:~$ sudo config apply-patch remove_port.json-patch -i /FEATURE Patch Applier: Patch application starting. Patch Applier: Patch: [{"op": "remove", "path": "/PORT"}] Patch Applier: Getting current config db. Patch Applier: Simulating the target full config after applying the patch. Patch Applier: Validating target config does not have empty tables, since they do not show up in ConfigDb. Patch Applier: Sorting patch updates. Failed to apply patch Usage: config apply-patch [OPTIONS] PATCH_FILE_PATH Try "config apply-patch -h" for help. Error: Given patch will produce invalid config. Error: Data Loading Failed Leafref "/sonic-port:sonic-port/sonic-port:PORT/sonic-port:PORT_LIST/sonic-port:name" of value "Ethernet96" points to a non-existing leaf. admin@vlab-01:~$ ``` --- generic_config_updater/gu_common.py | 14 +++++++++----- generic_config_updater/patch_sorter.py | 13 ++++++++----- tests/generic_config_updater/gu_common_test.py | 12 ++++++++---- tests/generic_config_updater/patch_sorter_test.py | 11 ++++++----- 4 files changed, 31 insertions(+), 19 deletions(-) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 0fa1e66f92..356bff3435 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -105,9 +105,9 @@ def validate_sonic_yang_config(self, sonic_yang_as_json): sy.loadData(config_db_as_json) sy.validate_data_tree() - return True + return True, None except sonic_yang.SonicYangException as ex: - return False + return False, ex def validate_config_db_config(self, config_db_as_json): sy = self.create_sonic_yang_with_loaded_models() @@ -118,9 +118,9 @@ def validate_config_db_config(self, config_db_as_json): sy.loadData(tmp_config_db_as_json) sy.validate_data_tree() - return True + return True, None except sonic_yang.SonicYangException as ex: - return False + return False, ex def crop_tables_without_yang(self, config_db_as_json): sy = self.create_sonic_yang_with_loaded_models() @@ -151,7 +151,8 @@ def remove_empty_tables(self, config): def create_sonic_yang_with_loaded_models(self): # sonic_yang_with_loaded_models will only be initialized once the first time this method is called if self.sonic_yang_with_loaded_models is None: - loaded_models_sy = sonic_yang.SonicYang(self.yang_dir) + sonic_yang_print_log_enabled = genericUpdaterLogging.get_verbose() + loaded_models_sy = sonic_yang.SonicYang(self.yang_dir, print_log_enabled=sonic_yang_print_log_enabled) loaded_models_sy.loadYangModel() # This call takes a long time (100s of ms) because it reads files from disk self.sonic_yang_with_loaded_models = loaded_models_sy @@ -829,6 +830,9 @@ def __init__(self): def set_verbose(self, verbose): self._verbose = verbose + def get_verbose(self): + return self._verbose + def get_logger(self, title, print_all_to_console=False): return TitledLogger(SYSLOG_IDENTIFIER, title, self._verbose, print_all_to_console) diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py index 97db21b24e..f23b347bde 100644 --- a/generic_config_updater/patch_sorter.py +++ b/generic_config_updater/patch_sorter.py @@ -562,7 +562,8 @@ def __init__(self, config_wrapper): def validate(self, move, diff): simulated_config = move.apply(diff.current_config) - return self.config_wrapper.validate_config_db_config(simulated_config) + is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) + return is_valid # TODO: Add this validation to YANG models instead class UniqueLanesMoveValidator: @@ -1543,8 +1544,9 @@ def sort(self, patch, algorithm=Algorithm.DFS): # Validate target config self.logger.log_info("Validating target config according to YANG models.") - if not(self.config_wrapper.validate_config_db_config(target_config)): - raise ValueError(f"Given patch is not valid because it will result in an invalid config") + is_valid, error = self.config_wrapper.validate_config_db_config(target_config) + if not is_valid: + raise ValueError(f"Given patch will produce invalid config. Error: {error}") # Generate list of changes to apply self.logger.log_info("Sorting patch updates.") @@ -1731,8 +1733,9 @@ def sort(self, patch, algorithm=Algorithm.DFS): # Validate YANG covered target config self.logger.log_info("Validating YANG covered target config according to YANG models.") - if not(self.config_wrapper.validate_config_db_config(target_config_yang)): - raise ValueError(f"Given patch is not valid because it will result in an invalid config") + is_valid, error = self.config_wrapper.validate_config_db_config(target_config_yang) + if not is_valid: + raise ValueError(f"Given patch will produce invalid config. Error: {error}") # Generating changes associated with non-YANG covered configs self.logger.log_info("Sorting non-YANG covered configs patch updates.") diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 2b95bdecc7..e3a8a3bde0 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -144,10 +144,11 @@ def test_validate_sonic_yang_config__valid_config__returns_true(self): expected = True # Act - actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON) + actual, error = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON) # Assert self.assertEqual(expected, actual) + self.assertIsNone(error) def test_validate_sonic_yang_config__invvalid_config__returns_false(self): # Arrange @@ -155,10 +156,11 @@ def test_validate_sonic_yang_config__invvalid_config__returns_false(self): expected = False # Act - actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON_INVALID) + actual, error = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON_INVALID) # Assert self.assertEqual(expected, actual) + self.assertIsNotNone(error) def test_validate_config_db_config__valid_config__returns_true(self): # Arrange @@ -166,10 +168,11 @@ def test_validate_config_db_config__valid_config__returns_true(self): expected = True # Act - actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON) + actual, error = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON) # Assert self.assertEqual(expected, actual) + self.assertIsNone(error) def test_validate_config_db_config__invalid_config__returns_false(self): # Arrange @@ -177,10 +180,11 @@ def test_validate_config_db_config__invalid_config__returns_false(self): expected = False # Act - actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON_INVALID) + actual, error = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON_INVALID) # Assert self.assertEqual(expected, actual) + self.assertIsNotNone(error) def test_crop_tables_without_yang__returns_cropped_config_db_as_json(self): # Arrange diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py index 79f2b5a76b..4cb8fa7019 100644 --- a/tests/generic_config_updater/patch_sorter_test.py +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -925,7 +925,7 @@ def test_validate__invalid_config_db_after_applying_move__failure(self): # Arrange config_wrapper = Mock() config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): False}) + create_side_effect_dict({(str(self.any_simulated_config),): (False, None)}) validator = ps.FullConfigMoveValidator(config_wrapper) # Act and assert @@ -935,7 +935,7 @@ def test_validate__valid_config_db_after_applying_move__success(self): # Arrange config_wrapper = Mock() config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): True}) + create_side_effect_dict({(str(self.any_simulated_config),): (True, None)}) validator = ps.FullConfigMoveValidator(config_wrapper) # Act and assert @@ -3100,7 +3100,8 @@ def run_single_success_case(self, data, skip_exact_change_list_match): simulated_config = current_config for change in actual_changes: simulated_config = change.apply(simulated_config) - self.assertTrue(self.config_wrapper.validate_config_db_config(simulated_config)) + is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) + self.assertTrue(is_valid, f"Change will produce invalid config. Error: {error}") self.assertEqual(target_config, simulated_config) def test_patch_sorter_failure(self): @@ -3424,7 +3425,7 @@ def __create_patch_sorter(self, (str(any_target_config),): (any_target_config_yang, any_target_config_non_yang)}) config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(any_target_config_yang),): valid_yang_covered_config}) + create_side_effect_dict({(str(any_target_config_yang),): (valid_yang_covered_config, None)}) patch_wrapper.generate_patch.side_effect = \ create_side_effect_dict( @@ -3517,7 +3518,7 @@ def __create_patch_sorter(self, config_wrapper.validate_config_db_config.side_effect = \ create_side_effect_dict( - {(str(any_target_config),): valid_config_db}) + {(str(any_target_config),): (valid_config_db, None)}) inner_patch_sorter.sort.side_effect = \ From fdb79b8d65b8ca22232f4e6b140f593dd01613d5 Mon Sep 17 00:00:00 2001 From: Sujin Kang Date: Fri, 29 Apr 2022 15:51:54 -0700 Subject: [PATCH 13/20] Allow fw update for other boot type against on the previous "none" boot fw update (#2040) https://github.com/Azure/sonic-buildimage/issues/8928 https://github.com/Azure/sonic-buildimage/issues/8926 https://github.com/Azure/sonic-buildimage/issues/8925 https://github.com/Azure/sonic-buildimage/issues/8924 #### What I did Allow fwutil update all for other boot type if any previous fw update done for "none" boot type #### How I did it Allow fwutil update all for other boot type if any previous fw update done for "none" boot type #### How to verify it 1. Run fwutil update all for boot_type="none" 2. Run fwutil update all for any other boot_type 3. Verify if the 2nd update is proceeded. --- fwutil/lib.py | 13 ++++++++++--- fwutil/main.py | 26 ++++++++------------------ 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/fwutil/lib.py b/fwutil/lib.py index 8e994d3514..5f59445c94 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -790,7 +790,7 @@ def update_firmware(self, chassis_name, module_name, component_name): def update_au_status_file(self, au_info_data, filename=FW_AU_STATUS_FILE_PATH): with open(filename, 'w') as f: - json.dump(au_info_data, f) + json.dump(au_info_data, f, indent=4, sort_keys=True) def read_au_status_file_if_exists(self, filename=FW_AU_STATUS_FILE_PATH): data = None @@ -836,6 +836,8 @@ def set_firmware_auto_update_status(self, component_path, fw_version, boot, rt_c comp_au_status['version'] = fw_version comp_au_status['info'] = info + click.echo("{} firmware auto-update status from {} to {} : {}".format(component_path, fw_version.split('/')[0], fw_version.split('/')[1], info)) + au_status.append(comp_au_status) self.update_au_status_file(data, FW_AU_STATUS_FILE_PATH) @@ -883,7 +885,6 @@ def auto_update_firmware(self, component_au_info, boot): rt_code = int(rt_code.strip()) else: rt_code = component.auto_update_firmware(firmware_path, boot) - click.echo("{} firmware auto-update status return_code: {}".format(component_path, int(rt_code))) (status, info) = self.set_firmware_auto_update_status(component_path, fw_version, boot, rt_code) log_helper.log_fw_auto_update_end(component_path, firmware_path, boot, status, info) except KeyboardInterrupt: @@ -894,7 +895,7 @@ def auto_update_firmware(self, component_au_info, boot): raise - def is_first_auto_update(self, boot): + def is_capable_auto_update(self, boot): task_file = None status_file = None for task_file in glob.glob(os.path.join(FIRMWARE_AU_STATUS_DIR, FW_AU_TASK_FILE_REGEX)): @@ -903,6 +904,12 @@ def is_first_auto_update(self, boot): return False for status_file in glob.glob(os.path.join(FIRMWARE_AU_STATUS_DIR, FW_AU_STATUS_FILE)): if status_file is not None: + data = self.read_au_status_file_if_exists(FW_AU_STATUS_FILE_PATH) + if data is not None: + if boot is "none" or boot in data: + click.echo("Allow firmware auto-update with boot_type {} again".format(boot)) + return True + click.echo("{} firmware auto-update is already performed, {} firmware auto update is not allowed any more".format(status_file, boot)) return False click.echo("Firmware auto-update for boot_type {} is allowed".format(boot)) diff --git a/fwutil/main.py b/fwutil/main.py index 2f378da306..0dac25b5c6 100755 --- a/fwutil/main.py +++ b/fwutil/main.py @@ -89,10 +89,10 @@ def update(ctx): ctx.obj[COMPONENT_PATH_CTX_KEY] = [ ] -# 'auto_update' group +# 'all_update' group @click.group() @click.pass_context -def auto_update(ctx): +def all_update(ctx): """Auto-update platform firmware""" pass @@ -343,7 +343,7 @@ def fw_update(ctx, yes, force, image): # 'fw' subcommand -@auto_update.command(name='fw') +@all_update.command(name='fw') @click.option('-i', '--image', 'image', type=click.Choice(["current", "next"]), default="current", show_default=True, help="Update firmware using current/next SONiC image") @click.option('-f', '--fw_image', 'fw_image', help="Custom FW package path") @click.option('-b', '--boot', 'boot', type=click.Choice(["any", "cold", "fast", "warm", "none"]), default="none", show_default=True, help="Necessary boot option after the firmware update") @@ -380,7 +380,7 @@ def fw_auto_update(ctx, boot, image=None, fw_image=None): if cup is not None: au_component_list = cup.get_update_available_components() if au_component_list: - if cup.is_first_auto_update(boot): + if cup.is_capable_auto_update(boot): for au_component in au_component_list: cup.auto_update_firmware(au_component, boot) log_helper.print_warning("All firmware auto-update has been performed") @@ -462,18 +462,10 @@ def status(ctx): # 'updates' subcommand -@click.group() +@show.command(name='update-all-status') @click.pass_context -def show_update(ctx): - """status : Show platform components auto_update status""" - pass - - -# 'status' subcommand -@show_update.command(name='status') -@click.pass_context -def update_status(ctx): - """Show platform components auto_update status""" +def update_all_status(ctx): + """Show platform components update all status""" try: csp = ComponentStatusProvider() click.echo(csp.get_au_status()) @@ -487,14 +479,12 @@ def version(): """Show utility version""" click.echo("fwutil version {0}".format(VERSION)) -show.add_command(show_update, name='update') - install.add_command(chassis_install, name='chassis') install.add_command(module_install, name='module') update.add_command(chassis_update, name='chassis') update.add_command(module_update, name='module') -update.add_command(auto_update, name='all') +update.add_command(all_update, name='all') chassis_install.add_command(component_install, name='component') module_install.add_command(component_install, name='component') From 114386991df2f8ee5688644be17e21b36ac54677 Mon Sep 17 00:00:00 2001 From: Nathan Cohen <66022536+nathcohe@users.noreply.github.com> Date: Mon, 2 May 2022 15:40:06 -0400 Subject: [PATCH 14/20] Ordering fix for sfpshow eeprom (#2113) Fixed ordering for sfpshow eeprom --- scripts/sfpshow | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 7eb9edaca2..9e06333277 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -10,6 +10,7 @@ import ast import os import re import sys +from typing import Dict import click from natsort import natsorted @@ -235,7 +236,7 @@ class SFPShow(object): self.intf_name = intf_name self.dump_dom = dump_dom self.table = [] - self.output = '' + self.intf_eeprom: Dict[str, str] = {} self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace_option) # Convert dict values to cli output string @@ -391,7 +392,7 @@ class SFPShow(object): output = '' sfp_info_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface_name)) - output = interface_name + ': ' + 'SFP EEPROM detected' + '\n' + output = 'SFP EEPROM detected\n' sfp_info_output = self.convert_sfp_info_to_output_string(sfp_info_dict) output += sfp_info_output @@ -408,24 +409,22 @@ class SFPShow(object): if self.intf_name is not None: presence = self.db.exists(self.db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(self.intf_name)) if presence: - self.output = self.convert_interface_sfp_info_to_cli_output_string( + self.intf_eeprom[self.intf_name] = self.convert_interface_sfp_info_to_cli_output_string( self.db, self.intf_name, self.dump_dom) else: - self.output += (self.intf_name + ': ' + 'SFP EEPROM Not detected' + '\n') + self.intf_eeprom[self.intf_name] = "SFP EEPROM Not detected\n" else: port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") - sorted_table_keys = natsorted(port_table_keys) - for i in sorted_table_keys: + for i in port_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): presence = self.db.exists(self.db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface)) if presence: - self.output += self.convert_interface_sfp_info_to_cli_output_string( + self.intf_eeprom[interface] = self.convert_interface_sfp_info_to_cli_output_string( self.db, interface, self.dump_dom) else: - self.output += (interface + ': ' + 'SFP EEPROM Not detected' + '\n') + self.intf_eeprom[interface] = "SFP EEPROM Not detected\n" - self.output += '\n' @multi_asic_util.run_on_multi_asic def get_presence(self): @@ -451,7 +450,7 @@ class SFPShow(object): self.table += port_table def display_eeprom(self): - click.echo(self.output) + click.echo("\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_eeprom.items())])) def display_presence(self): header = ['Port', 'Presence'] From c37a95788d6e48d86a4f282e9927d8c7061a6e2f Mon Sep 17 00:00:00 2001 From: yozhao101 <56170650+yozhao101@users.noreply.github.com> Date: Sun, 8 May 2022 15:25:56 -0700 Subject: [PATCH 15/20] [Kdump] Remove the duplicate logic if Kdump was disabled (#2128) Signed-off-by: Yong Zhao yozhao@microsoft.com What I did Recently the following error messages were observed on testbed str2-7050cx3-acs-01 during nightly testing. Specifically this issue will occur when hostcfg daemon is started. Apr 5 21:12:15.794016 str2-7050cx3-acs-01 ERR /hostcfgd: sonic-kdump-config --disable - failed: return code - 1, output:#012None Apr 5 21:12:15.772139 str2-7050cx3-acs-01 INFO hostcfgd[457569]: Error! Exception[[Errno 2] No such file or directory: '/host/grub/grub.cfg'] occured while processing the command sonic-kdump-config --disable. The root reason is the path of kernel boot loader configuration file used in kdump_disable(...) function was not correct. We have two different boot loaders GRUB and Aboot on different platforms. Paths of kernel boot loader configuration file corresponding to these two boot loaders are different. So kdump_disable(...) function should not explicitly use the grub_cfg since the function parameter cmdline_file already represents the path of kernel boot loader configuration file. How I did it I removed the duplicate logic in the function kdump_disable(...) and use cmdline_file to uniquely represent the path of kernel boot loader configuration file. How to verify it I tested this change on testbed str2-7050cx3-acs-01. Previous command output (if the output of a command-line utility has changed) admin@str2-7050cx3-acs-02:~$ ls -al /host/machine.conf -rw-r--r-- 1 root 88 167 Apr 19 12:47 /host/machine.conf admin@str2-7050cx3-acs-02:~$ /usr/local/bin/sonic-kdump-config --disable Unable to locate current SONiC image admin@str2-7050cx3-acs-02:~$ sudo /usr/local/bin/sonic-kdump-config --disable kdump is already disabled Error! Exception[[Errno 2] No such file or directory: '/host/grub/grub.cfg'] occured while processing the command sonic-kdump-config --disable. admin@str-s6000-on-6:~$ sudo /usr/local/bin/sonic-kdump-config --disable kdump is already disabled Kdump is already disabled admin@str-s6000-on-6:~$ ls -al /host/grub/grub.cfg -rw------- 1 root root 2529 Mar 17 21:48 /host/grub/grub.cfg admin@str-s6000-on-6:~$ New command output (if the output of a command-line utility has changed) admin@str2-7050cx3-acs-02:~$ ls -al /host/machine.conf -rw-r--r-- 1 root 88 167 Apr 19 12:47 /host/machine.conf admin@str2-7050cx3-acs-02:~$ /usr/local/bin/sonic-kdump-config --disable Root privileges required for this operation admin@str2-7050cx3-acs-02:~$ sudo /usr/local/bin/sonic-kdump-config --disable kdump is already disabled admin@str-s6000-on-6:~$ ls -al /host/grub/grub.cfg -rw------- 1 root root 2529 Mar 17 21:48 /host/grub/grub.cfg admin@str-s6000-on-6:~$ sudo /usr/local/bin/sonic-kdump-config --disable kdump is already disabled admin@str-s6000-on-6:~$ --- scripts/sonic-kdump-config | 81 +++++---- tests/sonic_kdump_config_test.py | 297 +++++++++++++++++++++++++++++++ 2 files changed, 340 insertions(+), 38 deletions(-) create mode 100644 tests/sonic_kdump_config_test.py diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config index 2ffdd2e8db..f86aa3a7d4 100755 --- a/scripts/sonic-kdump-config +++ b/scripts/sonic-kdump-config @@ -17,12 +17,14 @@ See the License for the specific language governing permissions and limitations under the License. ''' -import sys import argparse -import shlex +import json import os +import shlex +import sys +import syslog import subprocess -import json + from swsscommon.swsscommon import ConfigDBConnector aboot_cfg_template ="/host/image-%s/kernel-cmdline" @@ -437,7 +439,7 @@ def get_kdump_config_json(config_param): # @param verbose If True, the function will display a few additinal information # @param image The image on which kdump settings are changed # @return True if the grub/cmdline cfg has changed, and False if it has not -def cmd_kdump_enable(verbose, image=get_current_image()): +def cmd_kdump_enable(verbose, image): kdump_enabled = get_kdump_administrative_mode() memory = get_kdump_memory() @@ -460,21 +462,39 @@ def cmd_kdump_enable(verbose, image=get_current_image()): # @param image The image on which kdump settings are changed # @return True if the grub/cmdline cfg has changed, and False if it has not def cmd_kdump_config_next(verbose): - return cmd_kdump_enable(verbose, image=get_next_image()) + image = get_next_image() + return cmd_kdump_enable(verbose, image) -## Disable kdump -# -# @param verbose If True, the function will display a few additional information -# @return True if the grub/cmdline cfg has changed, and False if it has not -def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file): + +def kdump_disable(verbose, image, booter_config_file_path): + """Unloads Kdump kernel and remove parameter `crashkernel=*` from configuration file of + kernel boot loader. + + Args: + image: A string represents SONiC image version. + booter_config_file_path: A string represents path of kernel boot loader configuration file. + + Returns: + changes: If kernel booter configuration file was changed, returns True; Otherwise, returns + False. + """ write_use_kdump(0) if verbose: print("Disabling kdump for image=[%s]\n" % image) - lines = [line.rstrip('\n') for line in open(cmdline_file)] + + try: + with open(booter_config_file_path) as config_file_handler: + lines = [line.rstrip('\n') for line in config_file_handler] + except OSError as sys_err: + syslog.syslog(syslog.LOG_ERR, "Failed to open kernel booter configuration file: '{}', Error is: '{}'!" + .format(booter_config_file_path, sys_err)) + return False + img_index = locate_image(lines, "loop=image-"+image) changed = False + crash_kernel_mem = search_for_crash_kernel(lines[img_index]) if crash_kernel_mem is None: print("kdump is already disabled") @@ -482,53 +502,36 @@ def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file lines[img_index] = lines[img_index].replace("crashkernel="+crash_kernel_mem, "") changed = True if verbose: - print("Removed [%s] in %s" % ("crashkernel="+crash_kernel_mem, cmdline_file)) + print("Removed [%s] in %s" % ("crashkernel="+crash_kernel_mem, booter_config_file_path)) if changed: - rewrite_cfg(lines, cmdline_file) + rewrite_cfg(lines, booter_config_file_path) if not os.path.exists('/etc/sonic/config_db.json'): print_err("Startup configuration not found, Kdump configuration is not saved") return False - current_img = get_current_image(); - if verbose: - print("Current image=[%s]\n" % current_img) - lines = [line.rstrip('\n') for line in open(grub_cfg)] - current_img_index = locate_image(lines, "loop=image-"+current_img) - - changed = False - curr_crash_kernel_mem = search_for_crash_kernel(lines[current_img_index]) - if curr_crash_kernel_mem is None: - print("Kdump is already disabled") - else: - lines[current_img_index] = lines[current_img_index].replace("crashkernel="+curr_crash_kernel_mem, "") - changed = True - if verbose: - print("Removed [%s] in grub.cfg" % ("crashkernel="+curr_crash_kernel_mem)) - - if changed: - rewrite_grub_cfg(lines, grub_cfg) - return changed + ## Command: Disable kdump # # @param verbose If True, the function will display a few additional information -# @param image The image on which kdump settings are changed -def cmd_kdump_disable(verbose, image=get_current_image()): +def cmd_kdump_disable(verbose): + image = get_current_image() kdump_enabled = get_kdump_administrative_mode() memory = get_kdump_memory() num_dumps = get_kdump_num_dumps() + if verbose: print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) if os.path.exists(grub_cfg): - return kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, grub_cfg) + return kdump_disable(verbose, image, grub_cfg) elif open(machine_cfg, 'r').read().find('aboot_platform') >= 0: aboot_cfg = aboot_cfg_template % image - return kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, aboot_cfg) + return kdump_disable(verbose, image, aboot_cfg) else: print("Feature not supported on this platform") return False @@ -549,7 +552,8 @@ def cmd_kdump_memory(verbose, memory): memory_in_db = get_kdump_memory() memory_in_json = get_kdump_config_json("memory") if memory != crash_kernel_in_cmdline or memory != memory_in_db or memory != memory_in_json: - cmd_kdump_enable(verbose) + image = get_current_image() + cmd_kdump_enable(verbose, image) print("Kdump updated memory will be only operational after the system reboots") else: num_dumps = get_kdump_num_dumps() @@ -635,7 +639,8 @@ def main(): changed = False try: if options.enable: - changed = cmd_kdump_enable(options.verbose) + image = get_current_image() + changed = cmd_kdump_enable(options.verbose, image) elif options.config_next: changed = cmd_kdump_config_next(options.verbose) elif options.disable: diff --git a/tests/sonic_kdump_config_test.py b/tests/sonic_kdump_config_test.py new file mode 100644 index 0000000000..ca687a03d3 --- /dev/null +++ b/tests/sonic_kdump_config_test.py @@ -0,0 +1,297 @@ +import logging +import os +import sys +import unittest +from unittest.mock import patch, mock_open + +from utilities_common.general import load_module_from_source + +TESTS_DIR_PATH = os.path.dirname(os.path.abspath(__file__)) +UTILITY_DIR_PATH = os.path.dirname(TESTS_DIR_PATH) +SCRIPTS_DIR_PATH = os.path.join(UTILITY_DIR_PATH, "scripts") +sys.path.append(SCRIPTS_DIR_PATH) + +ABOOT_MACHINE_CFG_PLATFORM = "aboot_platform=x86_64-arista_7050cx3_32s" +ABOOT_MACHINE_CFG_ARCH = "aboot_arch=x86_64" +KERNEL_BOOTING_CFG_KDUMP_DISABLED = "loop=image-20201231.63/fs.squashfs loopfstype=squashfs" +KERNEL_BOOTING_CFG_KDUMP_ENABLED = "loop=image-20201231.63/fs.squashfs loopfstype=squashfs crashkernel=0M-2G:256MB" + +logger = logging.getLogger(__name__) +# Load `sonic-kdump-config` module from source since `sonic-kdump-config` does not have .py extension. +sonic_kdump_config_path = os.path.join(SCRIPTS_DIR_PATH, "sonic-kdump-config") +sonic_kdump_config = load_module_from_source("sonic_kdump_config", sonic_kdump_config_path) + + +class TestSonicKdumpConfig(unittest.TestCase): + @classmethod + def setup_class(cls): + print("SETUP") + + @patch("sonic_kdump_config.run_command") + def test_read_num_kdumps(self, mock_run_cmd): + """Tests the function `read_num_kdumps(...)` in script `sonic-kdump-config`. + """ + mock_run_cmd.return_value = (0, ["0"], None) + num_dumps = sonic_kdump_config.read_num_dumps() + assert num_dumps == 0 + + logger.info("Value of 'num_dumps' is: '{}'.".format(num_dumps)) + logger.info("Expected value of 'num_dumps' is: '0'.") + + mock_run_cmd.return_value = (0, ["NotInteger"], None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (0, (), None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (0, [], None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, [], None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, ["3"], None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, (), None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, ["NotInteger"], None) + with self.assertRaises(SystemExit) as sys_exit: + num_dumps = sonic_kdump_config.read_num_dumps() + self.assertEqual(sys_exit.exception.code, 1) + + @patch("sonic_kdump_config.run_command") + def test_read_use_kdump(self, mock_run_cmd): + """Tests the function `read_use_kdump(...)` in script `sonic-kdump-config`. + """ + mock_run_cmd.return_value = (0, ["0"], None) + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + assert is_kdump_enabled == 0 + + mock_run_cmd.return_value = (0, (), None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (0, [], None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (0, ["NotInteger"], None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, ["0"], None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, ["NotInteger"], None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, (), None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.return_value = (1, [], None) + with self.assertRaises(SystemExit) as sys_exit: + is_kdump_enabled = sonic_kdump_config.read_use_kdump() + self.assertEqual(sys_exit.exception.code, 1) + + @patch("sonic_kdump_config.read_use_kdump") + @patch("sonic_kdump_config.run_command") + def test_write_num_kdump(self, mock_run_cmd, mock_read_kdump): + """Tests the function `write_use_kdump(...)` in script `sonic-kdump-config`. + """ + mock_run_cmd.side_effect = [(0, [], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + sonic_kdump_config.write_use_kdump(0) + + mock_run_cmd.side_effect = [(0, (), None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, ["NotInteger"], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, [], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, (), None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, ["NotInteger"], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, (), None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, ["NotInteger"], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, [], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, (), None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(2, ["NotInteger"], None), (0, ["1"], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (1, [""], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (0, ["1"], None)] + mock_read_kdump.return_value = 1 + sonic_kdump_config.write_use_kdump(1) + + mock_run_cmd.side_effect = [(0, [], None), (1, [""], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (0, [""], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(1) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (0, [""], None)] + mock_read_kdump.return_value = 1 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (1, [""], None)] + mock_read_kdump.return_value = 1 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + mock_run_cmd.side_effect = [(0, [], None), (1, [""], None)] + mock_read_kdump.return_value = 0 + with self.assertRaises(SystemExit) as sys_exit: + sonic_kdump_config.write_use_kdump(0) + self.assertEqual(sys_exit.exception.code, 1) + + @patch("sonic_kdump_config.kdump_disable") + @patch("sonic_kdump_config.get_current_image") + @patch("sonic_kdump_config.get_kdump_administrative_mode") + @patch("sonic_kdump_config.get_kdump_memory") + @patch("sonic_kdump_config.get_kdump_num_dumps") + @patch("os.path.exists") + def test_cmd_kdump_disable(self, mock_path_exist, mock_num_dumps, mock_memory, + mock_administrative_mode, mock_image, mock_kdump_disable): + """Tests the function `cmd_kdump_disable(...)` in script `sonic-kdump-config.py`. + """ + mock_path_exist.return_value = True + mock_num_dumps.return_value = 3 + mock_memory.return_value = "0M-2G:256MB" + mock_administrative_mode = "True" + mock_image.return_value = "20201230.63" + mock_kdump_disable.return_value = True + + return_result = sonic_kdump_config.cmd_kdump_disable(True) + assert return_result == True + + mock_path_exist.return_value = False + with patch("sonic_kdump_config.open", mock_open(read_data=ABOOT_MACHINE_CFG_PLATFORM)): + return_result = sonic_kdump_config.cmd_kdump_disable(True) + assert return_result == True + + mock_path_exist.return_value = False + with patch("sonic_kdump_config.open", mock_open(read_data=ABOOT_MACHINE_CFG_ARCH)): + return_result = sonic_kdump_config.cmd_kdump_disable(True) + assert return_result == False + + @patch("sonic_kdump_config.write_use_kdump") + @patch("os.path.exists") + def test_kdump_disable(self, mock_path_exist, mock_write_kdump): + """Tests the function `kdump_disable(...)` in script `sonic-kdump-config.py`. + """ + mock_path_exist.return_value = True + mock_write_kdump.return_value = 0 + + return_result = sonic_kdump_config.kdump_disable(True, "20201230.63", "/host/image-20201231.64/kernel-cmdline") + assert return_result == False + + mock_open_func = mock_open(read_data=KERNEL_BOOTING_CFG_KDUMP_ENABLED) + with patch("sonic_kdump_config.open", mock_open_func): + return_result = sonic_kdump_config.kdump_disable(True, "20201230.63", "/host/grub/grub.cfg") + assert return_result == True + handle = mock_open_func() + handle.writelines.assert_called_once() + + mock_open_func = mock_open(read_data=KERNEL_BOOTING_CFG_KDUMP_DISABLED) + with patch("sonic_kdump_config.open", mock_open_func): + return_result = sonic_kdump_config.kdump_disable(True, "20201230.63", "/host/grub/grub.cfg") + assert return_result == False + + mock_path_exist.return_value = False + mock_open_func = mock_open(read_data=KERNEL_BOOTING_CFG_KDUMP_ENABLED) + with patch("sonic_kdump_config.open", mock_open_func): + return_result = sonic_kdump_config.kdump_disable(True, "20201230.63", "/host/grub/grub.cfg") + assert return_result == False + handle = mock_open_func() + handle.writelines.assert_called_once() + + mock_path_exist.return_value = False + mock_open_func = mock_open(read_data=KERNEL_BOOTING_CFG_KDUMP_DISABLED) + with patch("sonic_kdump_config.open", mock_open_func): + return_result = sonic_kdump_config.kdump_disable(True, "20201230.63", "/host/grub/grub.cfg") + assert return_result == False + + @classmethod + def teardown_class(cls): + print("TEARDOWN") From 6ab1c51c9a088ff6e271fe6628ec99bb07dc6d3d Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Sun, 8 May 2022 19:26:06 -0700 Subject: [PATCH 16/20] [minigraph] Consume golden_config_db.json while loading minigraph (#2140) What I did This PR is for supporting consume golden_config_db.json while loading minigraph. User can put the golden_config_db.json with minigraph file to override configDB after reload minigraph. The golden_config_db.json looks just like a config_db.json and it will be placed on /etc/sonic/golden_config_db.json. How I did it Add code config override-config-table command to let it consume golden_config_db.json How to verify it Add UT tests and run. --- config/main.py | 71 ++++++++- tests/config_override_input/empty_input.json | 113 +++++++++++++ .../full_config_override.json | 122 ++++++++++++++ .../new_feature_config.json | 124 +++++++++++++++ .../partial_config_override.json | 130 +++++++++++++++ tests/config_override_test.py | 150 ++++++++++++++++++ tests/config_test.py | 21 +++ 7 files changed, 729 insertions(+), 2 deletions(-) create mode 100644 tests/config_override_input/empty_input.json create mode 100644 tests/config_override_input/full_config_override.json create mode 100644 tests/config_override_input/new_feature_config.json create mode 100644 tests/config_override_input/partial_config_override.json create mode 100644 tests/config_override_test.py diff --git a/config/main.py b/config/main.py index 3d549b991c..c4d8651b77 100644 --- a/config/main.py +++ b/config/main.py @@ -15,7 +15,7 @@ from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat -from minigraph import parse_device_desc_xml +from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports from socket import AF_INET, AF_INET6 @@ -27,7 +27,7 @@ from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util import utilities_common.cli as clicommon -from utilities_common.general import load_db_config +from utilities_common.general import load_db_config, load_module_from_source from .utils import log @@ -71,6 +71,7 @@ DEFAULT_CONFIG_YANG_FILE = '/etc/sonic/config_yang.json' NAMESPACE_PREFIX = 'asic' INTF_KEY = "interfaces" +DEFAULT_GOLDEN_CONFIG_DB_FILE = '/etc/sonic/golden_config_db.json' INIT_CFG_FILE = '/etc/sonic/init_cfg.json' @@ -99,6 +100,9 @@ QUEUE_RANGE = click.IntRange(min=0, max=255) GRE_TYPE_RANGE = click.IntRange(min=0, max=65535) +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + # # Helper functions # @@ -1619,6 +1623,10 @@ def load_minigraph(db, no_service_restart): cfggen_namespace_option = " -n {}".format(namespace) clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) + # Load golden_config_db.json + if os.path.isfile(DEFAULT_GOLDEN_CONFIG_DB_FILE): + override_config_by(DEFAULT_GOLDEN_CONFIG_DB_FILE) + # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them if not no_service_restart: @@ -1667,6 +1675,65 @@ def load_port_config(config_db, port_config_path): port_name), display_cmd=True) return + +def override_config_by(golden_config_path): + # Override configDB with golden config + clicommon.run_command('config override-config-table {}'.format( + golden_config_path), display_cmd=True) + return + + +# +# 'override-config-table' command ('config override-config-table ...') +# +@config.command('override-config-table') +@click.argument('input-config-db', required=True) +@click.option( + '--dry-run', is_flag=True, default=False, + help='test out the command without affecting config state' +) +@clicommon.pass_db +def override_config_table(db, input_config_db, dry_run): + """Override current configDB with input config.""" + + try: + # Load golden config json + config_input = read_json_file(input_config_db) + except Exception as e: + click.secho("Bad format: json file broken. {}".format(str(e)), + fg='magenta') + sys.exit(1) + + # Validate if the input is dict + if not isinstance(config_input, dict): + click.secho("Bad format: input_config_db is not a dict", + fg='magenta') + sys.exit(1) + + config_db = db.cfgdb + + if dry_run: + # Read config from configDB + current_config = config_db.get_config() + # Serialize to the same format as json input + sonic_cfggen.FormatConverter.to_serialized(current_config) + # Override current config with golden config + for table in config_input: + current_config[table] = config_input[table] + print(json.dumps(current_config, sort_keys=True, + indent=4, cls=minigraph_encoder)) + else: + # Deserialized golden config to DB recognized format + sonic_cfggen.FormatConverter.to_deserialized(config_input) + # Delete table from DB then mod_config to apply golden config + click.echo("Removing configDB overriden table first ...") + for table in config_input: + config_db.delete_table(table) + click.echo("Overriding input config to configDB ...") + data = sonic_cfggen.FormatConverter.output_to_db(config_input) + config_db.mod_config(data) + click.echo("Overriding completed. No service is restarted.") + # # 'hostname' command # diff --git a/tests/config_override_input/empty_input.json b/tests/config_override_input/empty_input.json new file mode 100644 index 0000000000..ced851cfb8 --- /dev/null +++ b/tests/config_override_input/empty_input.json @@ -0,0 +1,113 @@ +{ + "running_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + }, + "golden_config": { + + }, + "expected_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + } +} \ No newline at end of file diff --git a/tests/config_override_input/full_config_override.json b/tests/config_override_input/full_config_override.json new file mode 100644 index 0000000000..1b589acb50 --- /dev/null +++ b/tests/config_override_input/full_config_override.json @@ -0,0 +1,122 @@ +{ + "running_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + }, + "golden_config": { + "ACL_TABLE": { + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet12" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "state": "disabled" + }, + "database": { + "state": "disabled" + } + }, + "PORT": { + "Ethernet12": { + "admin_status": "up", + "alias": "fortyGigE0/12", + "description": "Servers2:eth0", + "index": "3", + "lanes": "37,38,39,40", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + }, + "expected_config": { + "ACL_TABLE": { + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet12" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "state": "disabled" + }, + "database": { + "state": "disabled" + } + }, + "PORT": { + "Ethernet12": { + "admin_status": "up", + "alias": "fortyGigE0/12", + "description": "Servers2:eth0", + "index": "3", + "lanes": "37,38,39,40", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + } +} \ No newline at end of file diff --git a/tests/config_override_input/new_feature_config.json b/tests/config_override_input/new_feature_config.json new file mode 100644 index 0000000000..b8894f078c --- /dev/null +++ b/tests/config_override_input/new_feature_config.json @@ -0,0 +1,124 @@ +{ + "running_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + }, + "golden_config": { + "NEW_FEATURE_TABLE": { + "entry": { + "field": "value", + "state": "disabled" + } + } + }, + "expected_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + }, + "NEW_FEATURE_TABLE": { + "entry": { + "field": "value", + "state": "disabled" + } + } + } +} \ No newline at end of file diff --git a/tests/config_override_input/partial_config_override.json b/tests/config_override_input/partial_config_override.json new file mode 100644 index 0000000000..2021ea282b --- /dev/null +++ b/tests/config_override_input/partial_config_override.json @@ -0,0 +1,130 @@ +{ + "running_config": { + "ACL_TABLE": { + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + }, + "golden_config": { + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + } + }, + "expected_config": { + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "NTP_ACL": { + "policy_desc": "NTP_ACL", + "services": [ + "NTP" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + }, + "AUTO_TECHSUPPORT_FEATURE": { + "bgp": { + "rate_limit_interval": "600", + "state": "enabled" + }, + "database": { + "rate_limit_interval": "600", + "state": "enabled" + } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } + } + } +} \ No newline at end of file diff --git a/tests/config_override_test.py b/tests/config_override_test.py new file mode 100644 index 0000000000..bee2b44192 --- /dev/null +++ b/tests/config_override_test.py @@ -0,0 +1,150 @@ +import os +import json +import filecmp +import config.main as config + +from click.testing import CliRunner +from unittest import mock +from utilities_common.db import Db +from utilities_common.general import load_module_from_source +from minigraph import minigraph_encoder + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +DATA_DIR = os.path.join(SCRIPT_DIR, "config_override_input") +EMPTY_INPUT = os.path.join(DATA_DIR, "empty_input.json") +PARTIAL_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "partial_config_override.json") +NEW_FEATURE_CONFIG = os.path.join(DATA_DIR, "new_feature_config.json") +FULL_CONFIG_OVERRIDE = os.path.join(DATA_DIR, "full_config_override.json") + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def write_init_config_db(cfgdb, config): + tables = cfgdb.get_config() + # delete all tables then write config to configDB + for table in tables: + cfgdb.delete_table(table) + data = dict() + sonic_cfggen.deep_update( + data, sonic_cfggen.FormatConverter.to_deserialized(config)) + cfgdb.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) + return + + +def read_config_db(cfgdb): + data = dict() + sonic_cfggen.deep_update( + data, sonic_cfggen.FormatConverter.db_to_output(cfgdb.get_config())) + return sonic_cfggen.FormatConverter.to_serialized(data) + + +def write_config_to_file(cfgdb, file): + with open(file, 'w') as f: + json.dump(cfgdb, f, sort_keys=True, indent=4, cls=minigraph_encoder) + return + + +class TestConfigOverride(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + return + + def test_broken_json(self): + def read_json_file_side_effect(filename): + return {{"TABLE"}} + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + + assert result.exit_code == 1 + assert "Bad format: json file broken" in result.output + + def test_json_is_not_dict(self): + def read_json_file_side_effect(filename): + return [{}] + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + + assert result.exit_code == 1 + assert "Bad format: input_config_db is not a dict" in result.output + + def test_dry_run(self): + def read_json_file_side_effect(filename): + return {} + db = Db() + current_config = read_config_db(db.cfgdb) + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json', '--dry-run']) + + assert result.exit_code == 0 + assert json.loads(result.output) == current_config + + def test_golden_config_db_empty(self): + db = Db() + with open(EMPTY_INPUT, "r") as f: + read_data = json.load(f) + self.check_override_config_table( + db, config, read_data['running_config'], read_data['golden_config'], + read_data['expected_config']) + + def test_golden_config_db_partial(self): + """Golden Config only modify ACL_TABLE""" + db = Db() + with open(PARTIAL_CONFIG_OVERRIDE, "r") as f: + read_data = json.load(f) + self.check_override_config_table( + db, config, read_data['running_config'], read_data['golden_config'], + read_data['expected_config']) + + def test_golden_config_db_new_feature(self): + """Golden Config append NEW_FEATURE_TABLE""" + db = Db() + with open(NEW_FEATURE_CONFIG, "r") as f: + read_data = json.load(f) + self.check_override_config_table( + db, config, read_data['running_config'], read_data['golden_config'], + read_data['expected_config']) + + def test_golden_config_db_full(self): + """Golden Config makes change to every table in configDB""" + db = Db() + with open(FULL_CONFIG_OVERRIDE, "r") as f: + read_data = json.load(f) + self.check_override_config_table( + db, config, read_data['running_config'], read_data['golden_config'], + read_data['expected_config']) + + def check_override_config_table(self, db, config, running_config, + golden_config, expected_config): + def read_json_file_side_effect(filename): + return golden_config + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + write_init_config_db(db.cfgdb, running_config) + + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + + current_config = read_config_db(db.cfgdb) + assert result.exit_code == 0 + assert current_config == expected_config + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + return diff --git a/tests/config_test.py b/tests/config_test.py index 88f6015ee5..87865334a2 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -224,6 +224,27 @@ def is_file_side_effect(filename): assert result.exit_code == 0 assert expected_output in result.output + def test_load_minigraph_with_golden_config(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + db = Db() + golden_config = {} + self.check_golden_config(db, config, golden_config, + "config override-config-table /etc/sonic/golden_config_db.json") + + def check_golden_config(self, db, config, golden_config, expected_output): + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert expected_output in result.output + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From 0811214e989200ec2e35b361ef0519fd7008fb92 Mon Sep 17 00:00:00 2001 From: rupesh-k <53595165+rupesh-k@users.noreply.github.com> Date: Tue, 10 May 2022 05:52:55 +0530 Subject: [PATCH 17/20] Validate destination port is not LAG (#2053) * Validate destination port is not LAG * Enhance to support mirroring of source port rx/tx to different destination ports --- config/main.py | 63 +++++++++++++---------- tests/config_mirror_session_test.py | 80 +++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 5 ++ 3 files changed, 122 insertions(+), 26 deletions(-) diff --git a/config/main.py b/config/main.py index c4d8651b77..f6ad1aa041 100644 --- a/config/main.py +++ b/config/main.py @@ -829,18 +829,36 @@ def interface_is_in_portchannel(portchannel_member_table, interface_name): return False -def interface_has_mirror_config(mirror_table, interface_name): - """ Check if port is already configured with mirror config """ - for _, v in mirror_table.items(): - if 'src_port' in v and v['src_port'] == interface_name: +def check_mirror_direction_config(v, direction): + """ Check if port is already configured for mirror in same direction """ + if direction: + direction=direction.upper() + if ('direction' in v and v['direction'] == 'BOTH') or (direction == 'BOTH'): return True - if 'dst_port' in v and v['dst_port'] == interface_name: + if 'direction' in v and v['direction'] == direction: return True + else: + return True + +def interface_has_mirror_config(ctx, mirror_table, dst_port, src_port, direction): + """ Check if dst/src port is already configured with mirroring in same direction """ + for _, v in mirror_table.items(): + if src_port: + for port in src_port.split(","): + if 'dst_port' in v and v['dst_port'] == port: + ctx.fail("Error: Source Interface {} already has mirror config".format(port)) + if 'src_port' in v and re.search(port,v['src_port']): + if check_mirror_direction_config(v, direction): + ctx.fail("Error: Source Interface {} already has mirror config in same direction".format(port)) + if dst_port: + if ('dst_port' in v and v['dst_port'] == dst_port) or ('src_port' in v and re.search(dst_port,v['src_port'])): + ctx.fail("Error: Destination Interface {} already has mirror config".format(dst_port)) return False def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction): """ Check if SPAN mirror-session config is valid """ + ctx = click.get_current_context() if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0: click.echo("Error: {} already exists".format(session_name)) return False @@ -851,41 +869,34 @@ def validate_mirror_session_config(config_db, session_name, dst_port, src_port, if dst_port: if not interface_name_is_valid(config_db, dst_port): - click.echo("Error: Destination Interface {} is invalid".format(dst_port)) - return False + ctx.fail("Error: Destination Interface {} is invalid".format(dst_port)) + + if is_portchannel_present_in_db(config_db, dst_port): + ctx.fail("Error: Destination Interface {} is not supported".format(dst_port)) if interface_is_in_vlan(vlan_member_table, dst_port): - click.echo("Error: Destination Interface {} has vlan config".format(dst_port)) - return False + ctx.fail("Error: Destination Interface {} has vlan config".format(dst_port)) - if interface_has_mirror_config(mirror_table, dst_port): - click.echo("Error: Destination Interface {} already has mirror config".format(dst_port)) - return False if interface_is_in_portchannel(portchannel_member_table, dst_port): - click.echo("Error: Destination Interface {} has portchannel config".format(dst_port)) - return False + ctx.fail("Error: Destination Interface {} has portchannel config".format(dst_port)) if clicommon.is_port_router_interface(config_db, dst_port): - click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port)) - return False + ctx.fail("Error: Destination Interface {} is a L3 interface".format(dst_port)) if src_port: for port in src_port.split(","): if not interface_name_is_valid(config_db, port): - click.echo("Error: Source Interface {} is invalid".format(port)) - return False + ctx.fail("Error: Source Interface {} is invalid".format(port)) if dst_port and dst_port == port: - click.echo("Error: Destination Interface cant be same as Source Interface") - return False - if interface_has_mirror_config(mirror_table, port): - click.echo("Error: Source Interface {} already has mirror config".format(port)) - return False + ctx.fail("Error: Destination Interface cant be same as Source Interface") + + if interface_has_mirror_config(ctx, mirror_table, dst_port, src_port, direction): + return False if direction: if direction not in ['rx', 'tx', 'both']: - click.echo("Error: Direction {} is invalid".format(direction)) - return False + ctx.fail("Error: Direction {} is invalid".format(direction)) return True @@ -2086,7 +2097,7 @@ def add_span(session_name, dst_port, src_port, direction, queue, policer): dst_port = interface_alias_to_name(None, dst_port) if dst_port is None: click.echo("Error: Destination Interface {} is invalid".format(dst_port)) - return + return False session_info = { "type" : "SPAN", diff --git a/tests/config_mirror_session_test.py b/tests/config_mirror_session_test.py index fe440e799d..986df2e711 100644 --- a/tests/config_mirror_session_test.py +++ b/tests/config_mirror_session_test.py @@ -164,8 +164,88 @@ def test_mirror_session_span_add(): assert result.exit_code != 0 assert ERR_MSG_VALUE_FAILURE in result.stdout + # Verify invalid dst port + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethern", "Ethernet4", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface Ethern is invalid" in result.stdout + + # Verify destination port not have vlan config + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet24", "Ethernet4", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface Ethernet24 has vlan config" in result.stdout + + # Verify destination port is not part of portchannel + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet116", "Ethernet4", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface Ethernet116 has portchannel config" in result.stdout + + # Verify destination port not router interface + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet0", "Ethernet4", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface Ethernet0 is a L3 interface" in result.stdout + + # Verify destination port not Portchannel + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "PortChannel1001"]) + assert result.exit_code != 0 + assert "Error: Destination Interface PortChannel1001 is not supported" in result.output + + # Verify source interface is invalid + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet52", "Ethern", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Source Interface Ethern is invalid" in result.stdout + + # Verify source interface is not same as destination + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet52", "Ethernet52", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface cant be same as Source Interface" in result.stdout + + # Verify destination port not have mirror config + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet44", "Ethernet56", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Destination Interface Ethernet44 already has mirror config" in result.output + + # Verify source port is not configured as dstport in other session + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet52", "Ethernet44", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Source Interface Ethernet44 already has mirror config" in result.output + + # Verify source port is not configured in same direction + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet52", "Ethernet8,Ethernet40", "rx", "100"]) + assert result.exit_code != 0 + assert "Error: Source Interface Ethernet40 already has mirror config in same direction" in result.output + + # Verify direction is invalid + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet52", "Ethernet56", "px", "100"]) + assert result.exit_code != 0 + assert "Error: Direction px is invalid" in result.stdout + # Positive case with mock.patch('config.main.add_span') as mocked: + result = runner.invoke( + config.config.commands["mirror_session"].commands["span"].commands["add"], + ["test_session", "Ethernet8", "Ethernet4", "tx", "100"]) result = runner.invoke( config.config.commands["mirror_session"].commands["span"].commands["add"], ["test_session", "Ethernet0", "Ethernet4", "rx", "100"]) diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index f416e4a941..9a037bb587 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -2553,5 +2553,10 @@ }, "QUEUE|Ethernet96|6": { "scheduler": "[SCHEDULER|scheduler.0]" + }, + "MIRROR_SESSION|test_session_db1": { + "dst_port": "Ethernet44", + "src_port": "Ethernet40,Ethernet48", + "direction": "RX" } } From 083ebcc3369eb893e77bcdbbcbc162ddf14762ef Mon Sep 17 00:00:00 2001 From: qinchuanares <37220227+qinchuanares@users.noreply.github.com> Date: Mon, 9 May 2022 17:48:06 -0700 Subject: [PATCH 18/20] Add transceiver-info items advertised for cmis-supported moddules (#2135) * include more transceiver-info items advertised for cmis-supported modules * resolving test failure * resolving test failure * include a test case covering cmis-compliant module * resolving test case failure * add units for support laser freq and tx power * add units on laser freq and tx power * correct freq unit * correct a syntax error on key categorization Co-authored-by: Chuan Qin (QINCHUAN) --- sfputil/main.py | 125 +++++++++++++++++++++++++------ tests/sfputil_test.py | 170 ++++++++++++++++++++++++++++++++---------- 2 files changed, 231 insertions(+), 64 deletions(-) diff --git a/sfputil/main.py b/sfputil/main.py index 5eb72195d7..91cb372bac 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -63,6 +63,48 @@ 'application_advertisement': 'Application Advertisement' } +QSFP_DD_DATA_MAP = { + 'model': 'Vendor PN', + 'vendor_oui': 'Vendor OUI', + 'vendor_date': 'Vendor Date Code(YYYY-MM-DD Lot)', + 'manufacturer': 'Vendor Name', + 'vendor_rev': 'Vendor Rev', + 'serial': 'Vendor SN', + 'type': 'Identifier', + 'ext_identifier': 'Extended Identifier', + 'ext_rateselect_compliance': 'Extended RateSelect Compliance', + 'cable_length': 'cable_length', + 'cable_type': 'Length', + 'nominal_bit_rate': 'Nominal Bit Rate(100Mbs)', + 'specification_compliance': 'Specification compliance', + 'encoding': 'Encoding', + 'connector': 'Connector', + 'application_advertisement': 'Application Advertisement', + 'active_firmware': 'Active Firmware Version', + 'inactive_firmware': 'Inactive Firmware Version', + 'hardware_rev': 'Hardware Revision', + 'media_interface_code': 'Media Interface Code', + 'host_electrical_interface': 'Host Electrical Interface', + 'host_lane_count': 'Host Lane Count', + 'media_lane_count': 'Media Lane Count', + 'host_lane_assignment_option': 'Host Lane Assignment Options', + 'media_lane_assignment_option': 'Media Lane Assignment Options', + 'active_apsel_hostlane1': 'Active App Selection Host Lane 1', + 'active_apsel_hostlane2': 'Active App Selection Host Lane 2', + 'active_apsel_hostlane3': 'Active App Selection Host Lane 3', + 'active_apsel_hostlane4': 'Active App Selection Host Lane 4', + 'active_apsel_hostlane5': 'Active App Selection Host Lane 5', + 'active_apsel_hostlane6': 'Active App Selection Host Lane 6', + 'active_apsel_hostlane7': 'Active App Selection Host Lane 7', + 'active_apsel_hostlane8': 'Active App Selection Host Lane 8', + 'media_interface_technology': 'Media Interface Technology', + 'cmis_rev': 'CMIS Revision', + 'supported_max_tx_power': 'Supported Max TX Power', + 'supported_min_tx_power': 'Supported Min TX Power', + 'supported_max_laser_freq': 'Supported Max Laser Frequency', + 'supported_min_laser_freq': 'Supported Min Laser Frequency' +} + SFP_DOM_CHANNEL_MONITOR_MAP = { 'rx1power': 'RXPower', 'tx1bias': 'TXBias', @@ -273,31 +315,68 @@ def format_dict_value_to_string(sorted_key_table, def convert_sfp_info_to_output_string(sfp_info_dict): indent = ' ' * 8 output = '' - - sorted_qsfp_data_map_keys = sorted(QSFP_DATA_MAP, key=QSFP_DATA_MAP.get) - for key in sorted_qsfp_data_map_keys: - if key == 'cable_type': - output += '{}{}: {}\n'.format(indent, sfp_info_dict['cable_type'], sfp_info_dict['cable_length']) - elif key == 'cable_length': - pass - elif key == 'specification_compliance': - if sfp_info_dict['type'] == "QSFP-DD Double Density 8X Pluggable Transceiver" or \ - sfp_info_dict['type'] == "OSFP 8X Pluggable Transceiver" or \ - sfp_info_dict['type'] == "QSFP+ or later with CMIS": - output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) + sfp_type = sfp_info_dict['type'] + # CMIS supported module types include QSFP-DD and OSFP + if sfp_type.startswith('QSFP-DD') or sfp_type.startswith('OSFP'): + sorted_qsfp_data_map_keys = sorted(QSFP_DD_DATA_MAP, key=QSFP_DD_DATA_MAP.get) + for key in sorted_qsfp_data_map_keys: + if key == 'cable_type': + output += '{}{}: {}\n'.format(indent, sfp_info_dict['cable_type'], sfp_info_dict['cable_length']) + elif key == 'cable_length': + pass + elif key == 'specification_compliance': + if sfp_info_dict['type'] == "QSFP-DD Double Density 8X Pluggable Transceiver" or \ + sfp_info_dict['type'] == "OSFP 8X Pluggable Transceiver" or \ + sfp_info_dict['type'] == "QSFP+ or later with CMIS": + output += '{}{}: {}\n'.format(indent, QSFP_DD_DATA_MAP[key], sfp_info_dict[key]) + else: + output += '{}{}:\n'.format(indent, QSFP_DD_DATA_MAP['specification_compliance']) + + spec_compliance_dict = {} + try: + spec_compliance_dict = ast.literal_eval(sfp_info_dict['specification_compliance']) + sorted_compliance_key_table = natsorted(spec_compliance_dict) + for compliance_key in sorted_compliance_key_table: + output += '{}{}: {}\n'.format((indent * 2), compliance_key, spec_compliance_dict[compliance_key]) + except ValueError as e: + output += '{}N/A\n'.format((indent * 2)) + elif key == 'application_advertisement': + pass + elif key == 'supported_max_tx_power' or key == 'supported_min_tx_power': + output += '{}{}: {}dBm\n'.format(indent, QSFP_DD_DATA_MAP[key], sfp_info_dict[key]) + elif key == 'supported_max_laser_freq' or key == 'supported_min_laser_freq': + output += '{}{}: {}GHz\n'.format(indent, QSFP_DD_DATA_MAP[key], sfp_info_dict[key]) else: - output += '{}{}:\n'.format(indent, QSFP_DATA_MAP['specification_compliance']) - - spec_compliance_dict = {} try: - spec_compliance_dict = ast.literal_eval(sfp_info_dict['specification_compliance']) - sorted_compliance_key_table = natsorted(spec_compliance_dict) - for compliance_key in sorted_compliance_key_table: - output += '{}{}: {}\n'.format((indent * 2), compliance_key, spec_compliance_dict[compliance_key]) - except ValueError as e: - output += '{}N/A\n'.format((indent * 2)) - else: - output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) + output += '{}{}: {}\n'.format(indent, QSFP_DD_DATA_MAP[key], sfp_info_dict[key]) + except (KeyError, ValueError) as e: + output += '{}{}: N/A\n'.format(indent, QSFP_DD_DATA_MAP[key]) + + else: + sorted_qsfp_data_map_keys = sorted(QSFP_DATA_MAP, key=QSFP_DATA_MAP.get) + for key in sorted_qsfp_data_map_keys: + if key == 'cable_type': + output += '{}{}: {}\n'.format(indent, sfp_info_dict['cable_type'], sfp_info_dict['cable_length']) + elif key == 'cable_length': + pass + elif key == 'specification_compliance': + if sfp_info_dict['type'] == "QSFP-DD Double Density 8X Pluggable Transceiver" or \ + sfp_info_dict['type'] == "OSFP 8X Pluggable Transceiver" or \ + sfp_info_dict['type'] == "QSFP+ or later with CMIS": + output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) + else: + output += '{}{}:\n'.format(indent, QSFP_DATA_MAP['specification_compliance']) + + spec_compliance_dict = {} + try: + spec_compliance_dict = ast.literal_eval(sfp_info_dict['specification_compliance']) + sorted_compliance_key_table = natsorted(spec_compliance_dict) + for compliance_key in sorted_compliance_key_table: + output += '{}{}: {}\n'.format((indent * 2), compliance_key, spec_compliance_dict[compliance_key]) + except ValueError as e: + output += '{}N/A\n'.format((indent * 2)) + else: + output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) return output diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index c3b6b96a9e..83d2f8c4cc 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -77,47 +77,135 @@ def test_format_dict_value_to_string(self): sfputil.QSFP_DOM_CHANNEL_MONITOR_MAP, sfputil.DOM_VALUE_UNIT_MAP) assert output == expected_output - - def test_convert_sfp_info_to_output_string(self): - sfp_info_dict = { - 'type': 'QSFP28 or later', - 'type_abbrv_name': 'QSFP28', - 'manufacturer': 'Mellanox', - 'model': 'MCP1600-C003', - 'vendor_rev': 'A2', - 'serial': 'MT1636VS10561', - 'vendor_oui': '00-02-c9', - 'vendor_date': '2016-07-18', - 'connector': 'No separable connector', - 'encoding': '64B66B', - 'ext_identifier': 'Power Class 1(1.5W max)', - 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', - 'cable_type': 'Length Cable Assembly(m)', - 'cable_length': '3', - 'application_advertisement': 'N/A', - 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", - 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no', 'Voltage_support': 'no', 'Temp_support': 'no'}", - 'nominal_bit_rate': '255' - } - - expected_output = '''\ - Application Advertisement: N/A - Connector: No separable connector - Encoding: 64B66B - Extended Identifier: Power Class 1(1.5W max) - Extended RateSelect Compliance: QSFP+ Rate Select Version 1 - Identifier: QSFP28 or later - Length Cable Assembly(m): 3 - Nominal Bit Rate(100Mbs): 255 - Specification compliance: - 10/40G Ethernet Compliance Code: 40GBASE-CR4 - Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 - Vendor Name: Mellanox - Vendor OUI: 00-02-c9 - Vendor PN: MCP1600-C003 - Vendor Rev: A2 - Vendor SN: MT1636VS10561 -''' + @pytest.mark.parametrize("sfp_info_dict, expected_output",[ + # Non-CMIS module + ( + # sfp_info_dict + { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no', 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' + }, + # expected_output + " Application Advertisement: N/A\n" + " Connector: No separable connector\n" + " Encoding: 64B66B\n" + " Extended Identifier: Power Class 1(1.5W max)\n" + " Extended RateSelect Compliance: QSFP+ Rate Select Version 1\n" + " Identifier: QSFP28 or later\n" + " Length Cable Assembly(m): 3\n" + " Nominal Bit Rate(100Mbs): 255\n" + " Specification compliance:\n" + " 10/40G Ethernet Compliance Code: 40GBASE-CR4\n" + " Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18\n" + " Vendor Name: Mellanox\n" + " Vendor OUI: 00-02-c9\n" + " Vendor PN: MCP1600-C003\n" + " Vendor Rev: A2\n" + " Vendor SN: MT1636VS10561\n" + ), + # CMIS compliant module + ( + # sfp_info_dict + { + 'type': 'QSFP-DD Double Density 8X Pluggable Transceiver', + 'type_abbrv_name': 'QSFP-DD', + 'manufacturer': 'abc', + 'model': 'def', + 'vendor_rev': 'ghi', + 'serial': 'jkl', + 'vendor_oui': '00-00-00', + 'vendor_date': '2000-01-01', + 'connector': 'LC', + 'encoding': 'N/A', + 'ext_identifier': 'Power Class 8 (18.0W Max)', + 'ext_rateselect_compliance': 'N/A', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '0', + 'application_advertisement': 'N/A', + 'specification_compliance': "sm_media_interface", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no', 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '0', + 'active_firmware': '0.1', + 'inactive_firmware': '0.0', + 'hardware_rev': '0.0', + 'media_interface_code': '400ZR, DWDM, amplified', + 'host_electrical_interface': '400GAUI-8 C2M (Annex 120E)', + 'host_lane_count': 8, + 'media_lane_count': 1, + 'host_lane_assignment_option': 1, + 'media_lane_assignment_option': 1, + 'active_apsel_hostlane1': 1, + 'active_apsel_hostlane2': 1, + 'active_apsel_hostlane3': 1, + 'active_apsel_hostlane4': 1, + 'active_apsel_hostlane5': 1, + 'active_apsel_hostlane6': 1, + 'active_apsel_hostlane7': 1, + 'active_apsel_hostlane8': 1, + 'media_interface_technology': 'C-band tunable laser', + 'cmis_rev': '5.0', + 'supported_max_tx_power': 0, + 'supported_min_tx_power': -20, + 'supported_max_laser_freq': 196100, + 'supported_min_laser_freq': 191300 + }, + # expected_output + " Active App Selection Host Lane 1: 1\n" + " Active App Selection Host Lane 2: 1\n" + " Active App Selection Host Lane 3: 1\n" + " Active App Selection Host Lane 4: 1\n" + " Active App Selection Host Lane 5: 1\n" + " Active App Selection Host Lane 6: 1\n" + " Active App Selection Host Lane 7: 1\n" + " Active App Selection Host Lane 8: 1\n" + " Active Firmware Version: 0.1\n" + " CMIS Revision: 5.0\n" + " Connector: LC\n" + " Encoding: N/A\n" + " Extended Identifier: Power Class 8 (18.0W Max)\n" + " Extended RateSelect Compliance: N/A\n" + " Hardware Revision: 0.0\n" + " Host Electrical Interface: 400GAUI-8 C2M (Annex 120E)\n" + " Host Lane Assignment Options: 1\n" + " Host Lane Count: 8\n" + " Identifier: QSFP-DD Double Density 8X Pluggable Transceiver\n" + " Inactive Firmware Version: 0.0\n" + " Length Cable Assembly(m): 0\n" + " Media Interface Code: 400ZR, DWDM, amplified\n" + " Media Interface Technology: C-band tunable laser\n" + " Media Lane Assignment Options: 1\n" + " Media Lane Count: 1\n" + " Nominal Bit Rate(100Mbs): 0\n" + " Specification compliance: sm_media_interface\n" + " Supported Max Laser Frequency: 196100GHz\n" + " Supported Max TX Power: 0dBm\n" + " Supported Min Laser Frequency: 191300GHz\n" + " Supported Min TX Power: -20dBm\n" + " Vendor Date Code(YYYY-MM-DD Lot): 2000-01-01\n" + " Vendor Name: abc\n" + " Vendor OUI: 00-00-00\n" + " Vendor PN: def\n" + " Vendor Rev: ghi\n" + " Vendor SN: jkl\n" + ), + ]) + def test_convert_sfp_info_to_output_string(self, sfp_info_dict, expected_output): output = sfputil.convert_sfp_info_to_output_string(sfp_info_dict) assert output == expected_output From 7a06457036c62b75903e14289e3b8bc3fc6bc213 Mon Sep 17 00:00:00 2001 From: Vivek R Date: Tue, 10 May 2022 02:48:32 -0700 Subject: [PATCH 19/20] [auto_ts] Enable register/de-register auto_ts config for APP Extension (#2139) - What I did Added the ability to dynamically add entries into AUTO_TECHSUPPORT_FEATURE when a new app extension is installed/updated More details can be found here: sonic-net/SONiC#990 - How I did it Enhanced FeatureRegistry Class to also support AUTO_TECHSUPPORT_FEATURE table - How to verify it Unit Tests verify on live switch Signed-off-by: Vivek Reddy Karri --- .../service_creator/feature.py | 65 ++++++++++- .../test_service_creator.py | 107 ++++++++++++++++++ 2 files changed, 169 insertions(+), 3 deletions(-) diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index eb8e1a0710..73e35566dd 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -1,9 +1,10 @@ #!/usr/bin/env python """ This module implements new feature registration/de-registration in SONiC system. """ - +import copy from typing import Dict, Type +from sonic_package_manager.logger import log from sonic_package_manager.manifest import Manifest from sonic_package_manager.service_creator.sonic_db import SonicDB @@ -15,6 +16,14 @@ 'set_owner': 'local' } +AUTO_TS_GLOBAL = "AUTO_TECHSUPPORT" +AUTO_TS_FEATURE = "AUTO_TECHSUPPORT_FEATURE" +CFG_STATE = "state" +# TODO: Enable available_mem_threshold once the mem_leak_auto_ts feature is available +DEFAULT_AUTO_TS_FEATURE_CONFIG = { + 'state': 'disabled', + 'rate_limit_interval': '600' +} def is_enabled(cfg): return cfg.get('state', 'disabled').lower() == 'enabled' @@ -25,8 +34,11 @@ def is_multi_instance(cfg): class FeatureRegistry: - """ FeatureRegistry class provides an interface to - register/de-register new feature persistently. """ + """ 1) FeatureRegistry class provides an interface to + register/de-register new feature tables persistently. + 2) Writes persistent configuration to FEATURE & + AUTO_TECHSUPPORT_FEATURE tables + """ def __init__(self, sonic_db: Type[SonicDB]): self._sonic_db = sonic_db @@ -60,6 +72,9 @@ def register(self, new_cfg = {**new_cfg, **non_cfg_entries} conn.set_entry(FEATURE, name, new_cfg) + + if self.register_auto_ts(name): + log.info(f'{name} entry is added to {AUTO_TS_FEATURE} table') def deregister(self, name: str): """ Deregister feature by name. @@ -73,6 +88,7 @@ def deregister(self, name: str): db_connetors = self._sonic_db.get_connectors() for conn in db_connetors: conn.set_entry(FEATURE, name, None) + conn.set_entry(AUTO_TS_FEATURE, name, None) def update(self, old_manifest: Manifest, @@ -103,6 +119,9 @@ def update(self, new_cfg = {**new_cfg, **non_cfg_entries} conn.set_entry(FEATURE, new_name, new_cfg) + + if self.register_auto_ts(new_name, old_name): + log.info(f'{new_name} entry is added to {AUTO_TS_FEATURE} table') def is_feature_enabled(self, name: str) -> bool: """ Returns whether the feature is current enabled @@ -123,6 +142,46 @@ def get_multi_instance_features(self): features = conn.get_table(FEATURE) return [feature for feature, cfg in features.items() if is_multi_instance(cfg)] + def infer_auto_ts_capability(self, init_cfg_conn): + """ Determine whether to enable/disable the state for new feature + AUTO_TS provides a compile-time knob to enable/disable this feature + Default State for the new feature follows the decision made at compile time. + + Args: + init_cfg_conn: PersistentConfigDbConnector for init_cfg.json + Returns: + Capability: Tuple: (bool, ["enabled", "disabled"]) + """ + cfg = init_cfg_conn.get_entry(AUTO_TS_GLOBAL, "GLOBAL") + default_state = cfg.get(CFG_STATE, "") + if not default_state: + return (False, "disabled") + else: + return (True, default_state) + + def register_auto_ts(self, new_name, old_name=None): + """ Registers auto_ts feature + """ + # Infer and update default config + init_cfg_conn = self._sonic_db.get_initial_db_connector() + def_cfg = DEFAULT_AUTO_TS_FEATURE_CONFIG.copy() + (auto_ts_add_cfg, auto_ts_state) = self.infer_auto_ts_capability(init_cfg_conn) + def_cfg['state'] = auto_ts_state + + if not auto_ts_add_cfg: + log.debug("Skip adding AUTO_TECHSUPPORT_FEATURE table because no AUTO_TECHSUPPORT|GLOBAL entry is found") + return False + + for conn in self._sonic_db.get_connectors(): + new_cfg = copy.deepcopy(def_cfg) + if old_name: + current_cfg = conn.get_entry(AUTO_TS_FEATURE, old_name) + conn.set_entry(AUTO_TS_FEATURE, old_name, None) + new_cfg.update(current_cfg) + + conn.set_entry(AUTO_TS_FEATURE, new_name, new_cfg) + return True + @staticmethod def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: """ Get configurable feature table entries: diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 295e80dc52..c943289362 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -205,6 +205,7 @@ def test_feature_registration(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) mock_connector.set_entry.assert_called_with('FEATURE', 'test', { @@ -258,6 +259,7 @@ def test_feature_registration_with_timer(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) mock_connector.set_entry.assert_called_with('FEATURE', 'test', { @@ -275,6 +277,7 @@ def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest, owner='kube') mock_connector.set_entry.assert_called_with('FEATURE', 'test', { @@ -286,3 +289,107 @@ def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): 'has_global_scope': 'True', 'has_timer': 'False', }) + + +class AutoTSHelp: + """ Helper class for Auto TS Feature Registry Tests + """ + GLOBAL_STATE = {} + + @classmethod + def get_entry(cls, table, key): + if table == "AUTO_TECHSUPPORT" and key == "GLOBAL": + return AutoTSHelp.GLOBAL_STATE + elif table == "AUTO_TECHSUPPORT_FEATURE" and key == "test": + return {"state" : "enabled", "rate_limit_interval" : "600"} + else: + return {} + + @classmethod + def get_entry_running_cfg(cls, table, key): + if table == "AUTO_TECHSUPPORT_FEATURE" and key == "test": + return {"state" : "disabled", "rate_limit_interval" : "1000"} + else: + return {} + + +def test_auto_ts_global_disabled(mock_sonic_db, manifest): + mock_init_cfg = Mock() + AutoTSHelp.GLOBAL_STATE = {"state" : "disabled"} + mock_init_cfg.get_entry = Mock(side_effect=AutoTSHelp.get_entry) + mock_sonic_db.get_connectors = Mock(return_value=[mock_init_cfg]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_init_cfg) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_init_cfg.set_entry.assert_any_call("AUTO_TECHSUPPORT_FEATURE", "test", { + "state" : "disabled", + "rate_limit_interval" : "600" + } + ) + + +def test_auto_ts_global_enabled(mock_sonic_db, manifest): + mock_init_cfg = Mock() + AutoTSHelp.GLOBAL_STATE = {"state" : "enabled"} + mock_init_cfg.get_entry = Mock(side_effect=AutoTSHelp.get_entry) + mock_sonic_db.get_connectors = Mock(return_value=[mock_init_cfg]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_init_cfg) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_init_cfg.set_entry.assert_any_call("AUTO_TECHSUPPORT_FEATURE", "test", { + "state" : "enabled", + "rate_limit_interval" : "600" + } + ) + + +def test_auto_ts_deregister(mock_sonic_db): + mock_connector = Mock() + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.deregister("test") + mock_connector.set_entry.assert_any_call("AUTO_TECHSUPPORT_FEATURE", "test", None) + + +def test_auto_ts_feature_update_flow(mock_sonic_db, manifest): + new_manifest = copy.deepcopy(manifest) + new_manifest['service']['name'] = 'test_new' + new_manifest['service']['delayed'] = True + + AutoTSHelp.GLOBAL_STATE = {"state" : "enabled"} + # Mock init_cfg connector + mock_init_cfg = Mock() + mock_init_cfg.get_entry = Mock(side_effect=AutoTSHelp.get_entry) + + # Mock running/peristent cfg connector + mock_other_cfg = Mock() + mock_other_cfg.get_entry = Mock(side_effect=AutoTSHelp.get_entry_running_cfg) + + # Setup sonic_db class + mock_sonic_db.get_connectors = Mock(return_value=[mock_init_cfg, mock_other_cfg]) + mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_init_cfg) + + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.update(manifest, new_manifest) + + mock_init_cfg.set_entry.assert_has_calls( + [ + call("AUTO_TECHSUPPORT_FEATURE", "test", None), + call("AUTO_TECHSUPPORT_FEATURE", "test_new", { + "state" : "enabled", + "rate_limit_interval" : "600" + }) + ], + any_order = True + ) + + mock_other_cfg.set_entry.assert_has_calls( + [ + call("AUTO_TECHSUPPORT_FEATURE", "test", None), + call("AUTO_TECHSUPPORT_FEATURE", "test_new", { + "state" : "disabled", + "rate_limit_interval" : "1000" + }) + ], + any_order = True + ) From a73f156457e580d5870b960056eafb2c29ef6449 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 10 May 2022 09:50:41 -0700 Subject: [PATCH 20/20] [show][vrf]Fixing show vrf to include vlan subinterface (#2158) * [show][vrf]Fixing show vrf to include vlan subinterface --- doc/Command-Reference.md | 3 +++ show/main.py | 2 +- tests/show_vrf_test.py | 39 ++++++++++++++++++++++++++++++++++ tests/vrf_input/config_db.json | 35 ++++++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 1 deletion(-) create mode 100644 tests/show_vrf_test.py create mode 100644 tests/vrf_input/config_db.json diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 748a273d85..4beef0cafa 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -5281,8 +5281,11 @@ If vrf-name is also provided as part of the command, if the vrf is created it wi default Vlan20 Vrf-red Vlan100 Loopback11 + Eth0.100 Vrf-blue Loopback100 Loopback102 + Ethernet0.10 + PortChannel101 ```` ### VRF config commands diff --git a/show/main.py b/show/main.py index 1821f6c9d8..198940bc3e 100755 --- a/show/main.py +++ b/show/main.py @@ -207,7 +207,7 @@ def cli(ctx): def get_interface_bind_to_vrf(config_db, vrf_name): """Get interfaces belong to vrf """ - tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] + tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE', 'VLAN_SUB_INTERFACE'] data = [] for table_name in tables: interface_dict = config_db.get_table(table_name) diff --git a/tests/show_vrf_test.py b/tests/show_vrf_test.py new file mode 100644 index 0000000000..3c6d1c5b36 --- /dev/null +++ b/tests/show_vrf_test.py @@ -0,0 +1,39 @@ +import os +import sys +from click.testing import CliRunner +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.db import Db + +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "vrf_input") + +class TestShowVrf(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def test_vrf_show(self): + from .mock_tables import dbconnector + jsonfile_config = os.path.join(mock_db_path, "config_db") + dbconnector.dedicated_dbs['CONFIG_DB'] = jsonfile_config + runner = CliRunner() + db = Db() + expected_output = """\ +VRF Interfaces +------ --------------- +Vrf1 +Vrf101 Ethernet0.10 +Vrf102 PortChannel0002 + Vlan40 + Eth32.10 +Vrf103 Ethernet4 + Loopback0 +""" + + result = runner.invoke(show.cli.commands['vrf'], [], obj=db) + dbconnector.dedicated_dbs = {} + assert result.exit_code == 0 + assert result.output == expected_output diff --git a/tests/vrf_input/config_db.json b/tests/vrf_input/config_db.json new file mode 100644 index 0000000000..6d646f2f2b --- /dev/null +++ b/tests/vrf_input/config_db.json @@ -0,0 +1,35 @@ +{ + "VLAN_SUB_INTERFACE|Ethernet0.10": { + "vrf_name": "Vrf101", + "admin_status": "up" + }, + "VLAN_SUB_INTERFACE|Eth32.10": { + "vrf_name": "Vrf102", + "admin_status": "up", + "vlan": "100" + }, + "VLAN_INTERFACE|Vlan40": { + "vrf_name": "Vrf102" + }, + "PORTCHANNEL_INTERFACE|PortChannel0002": { + "vrf_name": "Vrf102" + }, + "INTERFACE|Ethernet4": { + "vrf_name": "Vrf103" + }, + "LOOPBACK_INTERFACE|Loopback0": { + "vrf_name": "Vrf103" + }, + "VRF|Vrf1": { + "fallback": "false" + }, + "VRF|Vrf101": { + "NULL": "NULL" + }, + "VRF|Vrf102": { + "NULL": "NULL" + }, + "VRF|Vrf103": { + "NULL": "NULL" + } +}