From becf5b5ff7dd117b91399ae766999b82afb5a3d7 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Mon, 21 Dec 2020 15:38:31 -0800 Subject: [PATCH] Kubernetes support commands update (#1133) 1) Upgrade Feature commands in both config & show to adopt the kube updates 2) kube join/reset upgraded to call kube_label command to add/remove label, which would trigger join/reset 3) few minor updates in kube code. New/update to commands. `sudo config feature owner ` To update owner `sudo config feature fallback ` To enable/disable fallback `show feature status` Shows the status of the feature. Extended to include kube-support related fields `show feature config` New command to show the current config info `show kube server` Extended to show config & status of server --- config/feature.py | 32 ++++ config/kube.py | 320 ++++++++----------------------- doc/Command-Reference.md | 148 +++++++++++--- scripts/fast-reboot | 38 +++- scripts/reboot | 17 ++ show/feature.py | 126 +++++++++++- show/kube.py | 103 ++++++---- tests/feature_test.py | 213 +++++++++++++++++--- tests/kube_test.py | 187 +++++++++++++++--- tests/mock_tables/config_db.json | 48 +++-- utilities_common/cli.py | 6 - 11 files changed, 834 insertions(+), 404 deletions(-) diff --git a/config/feature.py b/config/feature.py index 6fdc68c02690..950671a3d779 100644 --- a/config/feature.py +++ b/config/feature.py @@ -11,6 +11,38 @@ def feature(): """Configure features""" pass +def _update_field(db, name, fld, val): + tbl = db.cfgdb.get_table('FEATURE') + if name not in tbl: + click.echo("Unable to retrieve {} from FEATURE table".format(name)) + sys.exit(1) + db.cfgdb.mod_entry('FEATURE', name, { fld: val }) + + +# +# 'owner' command ('config feature owner ...') +# +@feature.command('owner', short_help="set owner for a feature") +@click.argument('name', metavar='', required=True) +@click.argument('owner', metavar='', required=True, type=click.Choice(["local", "kube"])) +@pass_db +def feature_owner(db, name, owner): + """Set owner for the feature""" + _update_field(db, name, "set_owner", owner) + + +# +# 'fallback' command ('config feature fallback ...') +# +@feature.command('fallback', short_help="set fallback for a feature") +@click.argument('name', metavar='', required=True) +@click.argument('fallback', metavar='', required=True, type=click.Choice(["on", "off"])) +@pass_db +def feature_fallback(db, name, fallback): + """Set fallback for the feature""" + _update_field(db, name, "no_fallback_to_local", "false" if fallback == "on" else "true") + + # # 'state' command ('config feature state ...') # diff --git a/config/kube.py b/config/kube.py index 08cca628d817..ef27c0538e1f 100644 --- a/config/kube.py +++ b/config/kube.py @@ -1,256 +1,74 @@ -import fcntl -import os -import shutil -import tempfile -from urllib.parse import urlparse - import click -import netaddr -import requests -import urllib3 -import utilities_common.cli as clicommon -import yaml -from sonic_py_common import device_info -from swsssdk import ConfigDBConnector -from utilities_common.db import Db +import socket + +from utilities_common.cli import AbbreviationGroup, pass_db from .utils import log -KUBE_ADMIN_CONF = "/etc/sonic/kube_admin.conf" -KUBELET_YAML = "/var/lib/kubelet/config.yaml" -KUBELET_SERVICE = "/etc/systemd/system/multi-user.target.wants/kubelet.service" +# DB Field names +KUBE_SERVER_TABLE_NAME = "KUBERNETES_MASTER" +KUBE_SERVER_TABLE_KEY = "SERVER" +KUBE_SERVER_IP = "ip" +KUBE_SERVER_PORT = "port" +KUBE_SERVER_DISABLE = "disable" +KUBE_SERVER_INSECURE = "insecure" + +KUBE_STATE_SERVER_CONNECTED = "connected" +KUBE_STATE_SERVER_REACHABLE = "server_reachability" +KUBE_STATE_SERVER_IP = "server_ip" +KUBE_STATE_SERVER_TS = "last_update_ts" + +KUBE_LABEL_TABLE = "KUBE_LABELS" +KUBE_LABEL_SET_KEY = "SET" + +def is_valid_ip4_addr(address): + try: + socket.inet_pton(socket.AF_INET, address) + except socket.error: # not a valid address + return False + return True -SERVER_ADMIN_URL = "https://{}/admin.conf" -KUBEADM_JOIN_CMD = "kubeadm join --discovery-file {} --node-name {}" -LOCK_FILE = "/var/lock/kube_join.lock" +def is_valid_ip6_addr(address): + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: # not a valid address + return False + return True -def _update_kube_server(field, val): - config_db = ConfigDBConnector() - config_db.connect() - table = "KUBERNETES_MASTER" - key = "SERVER" - db_data = Db().get_data(table, key) +def _update_kube_server(db, field, val): + db_data = db.cfgdb.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) def_data = { - "IP": "", - "insecure": "False", - "disable": "False" + KUBE_SERVER_IP: "", + KUBE_SERVER_PORT: "6443", + KUBE_SERVER_INSECURE: "False", + KUBE_SERVER_DISABLE: "False" } for f in def_data: if db_data and f in db_data: if f == field and db_data[f] != val: - config_db.mod_entry(table, key, {field: val}) + db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {field: val}) log.log_info("modify kubernetes server entry {}={}".format(field,val)) else: # Missing field. Set to default or given value v = val if f == field else def_data[f] - config_db.mod_entry(table, key, {f: v}) + db.cfgdb.mod_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY, {f: v}) log.log_info("set kubernetes server entry {}={}".format(f,v)) -def _take_lock(): - lock_fd = None - try: - lock_fd = open(LOCK_FILE, "w") - fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - log.log_info("Lock taken {}".format(LOCK_FILE)) - except IOError as e: - lock_fd = None - log.log_error("Lock {} failed: {}".format(LOCK_FILE, str(e))) - return lock_fd - - -def _download_file(server, insecure): - fname = "" - if insecure: - urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - - r = requests.get(SERVER_ADMIN_URL.format(server), verify=not insecure) - if r.status_code == 200: - (h, fname) = tempfile.mkstemp(suffix="_kube_join") - os.write(h, r.text) - os.close(h) - else: - clicommon.do_exit("Failed to download {}".format( - SERVER_ADMIN_URL.format(server))) - - # Ensure the admin.conf has given VIP as server-IP. - update_file = "{}.upd".format(fname) - cmd = 'sed "s/server:.*:6443/server: https:\/\/{}:6443/" {} > {}'.format( - server, fname, update_file) - clicommon.run_command(cmd) - - shutil.copyfile(update_file, KUBE_ADMIN_CONF) - - clicommon.run_command("rm -f {} {}".format(fname, update_file)) - - -def _is_connected(server=""): - if (os.path.exists(KUBE_ADMIN_CONF) and - os.path.exists(KUBELET_YAML) and - os.path.exists(KUBELET_SERVICE)): - - with open(KUBE_ADMIN_CONF, 'r') as s: - d = yaml.load(s) - d = d['clusters'] if 'clusters' in d else [] - d = d[0] if len(d) > 0 else {} - d = d['cluster'] if 'cluster' in d else {} - d = d['server'] if 'server' in d else "" - if d: - o = urlparse(d) - if o.hostname: - return not server or server == o.hostname - return False - - -def _get_labels(): - labels = [] - - hwsku = device_info.get_hwsku() - version_info = device_info.get_sonic_version_info() - - labels.append("sonic_version={}".format(version_info['build_version'])) - labels.append("hwsku={}".format(hwsku)) - lh = Db().get_data('DEVICE_METADATA', 'localhost') - labels.append("deployment_type={}".format( - lh['type'] if lh and 'type' in lh else "Unknown")) - labels.append("enable_pods=True") - - return labels - - -def _label_node(label): - cmd = "kubectl --kubeconfig {} label nodes {} {}".format( - KUBE_ADMIN_CONF, device_info.get_hostname(), label) - clicommon.run_command(cmd, ignore_error=True) - - -def _troubleshoot_tips(): - msg = """ -if join fails, check the following - -a) Ensure both master & node run same or compatible k8s versions - -b) Check if this node already exists in master - Use 'sudo kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes' to list nodes at master. - - If yes, delete it, as the node is attempting a new join. - 'kubectl --kubeconfig=/etc/kubernetes/admin.conf drain --ignore-daemonsets' - 'kubectl --kubeconfig=/etc/kubernetes/admin.conf delete node ' - -c) In Master check if all system pods are running good. - 'kubectl get pods --namespace kube-system' - - If any not running properly, say READY column has 0/1, decribe pod for more detail. - 'kubectl --namespace kube-system describe pod ' +def _label_node(dbconn, name, val=None): + set_key = "{}|{}".format(KUBE_LABEL_TABLE, KUBE_LABEL_SET_KEY) + client = dbconn.get_redis_client(dbconn.STATE_DB) + client.hset(set_key, name, val if val else "false") - For additional details, look into pod's logs. - @ node: /var/log/pods//... - @ master: 'kubectl logs -n kube-system ' - """ - (h, fname) = tempfile.mkstemp(suffix="kube_hints_") - os.write(h, msg) - os.close(h) - - log.log_error("Refer file {} for troubleshooting tips".format(fname)) - - -def _do_join(server, insecure): - try: - _download_file(server, insecure) - - clicommon.run_command("systemctl enable kubelet") - - clicommon.run_command("modprobe br_netfilter") - - clicommon.run_command(KUBEADM_JOIN_CMD.format( - KUBE_ADMIN_CONF, device_info.get_hostname()), ignore_error=True) - - if _is_connected(server): - labels = _get_labels() - for label in labels: - _label_node(label) - - except requests.exceptions.RequestException as e: - clicommon.do_exit("Download failed: {}".format(str(e))) - - except OSError as e: - clicommon.do_exit("Download failed: {}".format(str(e))) - - _troubleshoot_tips() - - -def kube_reset(): - lock_fd = _take_lock() - if not lock_fd: - log.log_error("Lock {} is active; Bail out".format(LOCK_FILE)) - return - - # Remove a key label and drain/delete self from cluster - # If not, the next join would fail - # - if os.path.exists(KUBE_ADMIN_CONF): - _label_node("enable_pods-") - clicommon.run_command( - "kubectl --kubeconfig {} --request-timeout 20s drain {} --ignore-daemonsets".format( - KUBE_ADMIN_CONF, device_info.get_hostname()), - ignore_error=True) - clicommon.run_command( - "kubectl --kubeconfig {} --request-timeout 20s delete node {}".format( - KUBE_ADMIN_CONF, device_info.get_hostname()), - ignore_error=True) - - clicommon.run_command("kubeadm reset -f", ignore_error=True) - clicommon.run_command("rm -rf /etc/cni/net.d") - clicommon.run_command("rm -f {}".format(KUBE_ADMIN_CONF)) - clicommon.run_command("systemctl stop kubelet") - clicommon.run_command("systemctl disable kubelet") - - -def kube_join(force=False): - lock_fd = _take_lock() - if not lock_fd: - log.log_error("Lock {} is active; Bail out".format(LOCK_FILE)) - return - - db_data = Db().get_data('KUBERNETES_MASTER', 'SERVER') - if not db_data or 'IP' not in db_data or not db_data['IP']: - log.log_error("Kubernetes server is not configured") - - if db_data['disable'].lower() != "false": - log.log_error("kube join skipped as kubernetes server is marked disabled") - return - - if not force: - if _is_connected(db_data['IP']): - # Already connected. No-Op - return - - kube_reset() - _do_join(db_data['IP'], db_data['insecure']) - - -@click.group(cls=clicommon.AbbreviationGroup) +@click.group(cls=AbbreviationGroup) def kubernetes(): """kubernetes command line""" pass -# cmd kubernetes join [-f/--force] -@kubernetes.command() -@click.option('-f', '--force', help='Force a join', is_flag=True) -def join(force): - kube_join(force=force) - - -# cmd kubernetes reset -@kubernetes.command() -def reset(): - kube_reset() - - # cmd kubernetes server @kubernetes.group() def server(): @@ -260,29 +78,45 @@ def server(): # cmd kubernetes server IP @server.command() -@click.argument('vip') -def ip(vip): +@click.argument('vip', required=True) +@pass_db +def ip(db, vip): """Specify a kubernetes cluster VIP""" - if not netaddr.IPAddress(vip): + if vip and not is_valid_ip4_addr(vip) and not is_valid_ip6_addr(vip): click.echo('Invalid IP address %s' % vip) - return - _update_kube_server('IP', vip) + sys.exit(1) + _update_kube_server(db, KUBE_SERVER_IP, vip) + + +# cmd kubernetes server Port +@server.command() +@click.argument('portval', required=True) +@pass_db +def port(db, portval): + """Specify a kubernetes Service port""" + val = int(portval) + if (val <= 0) or (val >= (64 << 10)): + click.echo('Invalid port value %s' % portval) + sys.exit(1) + _update_kube_server(db, KUBE_SERVER_PORT, portval) # cmd kubernetes server insecure @server.command() @click.argument('option', type=click.Choice(["on", "off"])) -def insecure(option): +@pass_db +def insecure(db, option): """Specify a kubernetes cluster VIP access as insecure or not""" - _update_kube_server('insecure', option == "on") + _update_kube_server(db, 'insecure', option == "on") # cmd kubernetes server disable @server.command() @click.argument('option', type=click.Choice(["on", "off"])) -def disable(option): +@pass_db +def disable(db, option): """Specify a kubernetes cluster VIP access is disabled or not""" - _update_kube_server('disable', option == "on") + _update_kube_server(db, 'disable', option == "on") # cmd kubernetes label @@ -296,20 +130,16 @@ def label(): @label.command() @click.argument('key', required=True) @click.argument('val', required=True) -def add(key, val): +@pass_db +def add(db, key, val): """Add a label to this node""" - if not key or not val: - click.echo('Require key & val') - return - _label_node("{}={}".format(key, val)) + _label_node(db.db, key, val) # cmd kubernetes label drop @label.command() @click.argument('key', required=True) -def drop(key): +@pass_db +def drop(db, key): """Drop a label from this node""" - if not key: - click.echo('Require key to drop') - return - _label_node("{}-".format(key)) + _label_node(db.db, key) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 4122c0d865ed..3088b6db6149 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -65,6 +65,9 @@ * [IP / IPv6](#ip--ipv6) * [IP show commands](#ip-show-commands) * [IPv6 show commands](#ipv6-show-commands) +* [Kubernetes](#Kubernetes) + * [Kubernetes show commands](#Kubernetes-show-commands) + * [Kubernetes config commands](#Kubernetes-config-commands) * [LLDP](#lldp) * [LLDP show commands](#lldp-show-commands) * [Loading, Reloading And Saving Configuration](#loading-reloading-and-saving-configuration) @@ -292,6 +295,7 @@ This command lists all the possible configuration commands at the top level. hostname Change device hostname without impacting traffic interface Interface-related configuration tasks interface_naming_mode Modify interface naming mode for interacting... + kubernetes Kubernetes server related configuration load Import a previous saved config DB dump file. load_mgmt_config Reconfigure hostname and mgmt interface based... load_minigraph Reconfigure based on minigraph. @@ -343,6 +347,7 @@ This command displays the full list of show commands available in the software; interfaces Show details of the network interfaces ip Show IP (IPv4) commands ipv6 Show IPv6 commands + kubernetes Show kubernetes commands line Show all /dev/ttyUSB lines and their info lldp LLDP (Link Layer Discovery Protocol)... logging Show system log @@ -2725,7 +2730,7 @@ The list of the WRED profile fields that are configurable is listed in the below Go Back To [Beginning of the document](#) or [Beginning of this section](#ecn) -## Feature +## Feature SONiC includes a capability in which Feature state can be enabled/disabled which will make corresponding feature docker container to start/stop. @@ -2738,43 +2743,102 @@ likelihood of entering a healthy state. ### Feature show commands +**show feature config** + +Shows the config of given feature or all if no feature is given. The "fallback" is shown only if configured. The fallback defaults to "true" when not configured. + +- Usage: + ``` + show feature config [] + ``` + +- Example: + ``` + admin@sonic:~$ show feature config + Feature State AutoRestart Owner fallback + -------------- -------- ------------- ------- ---------- + bgp enabled enabled local + database enabled disabled local + dhcp_relay enabled enabled kube + lldp enabled enabled kube true + mgmt-framework enabled enabled local + nat disabled enabled local + pmon enabled enabled kube + radv enabled enabled kube + sflow disabled enabled local + snmp enabled enabled kube + swss enabled enabled local + syncd enabled enabled local + teamd enabled enabled local + telemetry enabled enabled kube + ``` + **show feature status** -This command will display the status of feature state. +Shows the status of given feature or all if no feature is given. The "fallback" defaults to "true" when not configured. +The subset of features are configurable for remote management and only those report additional data. - Usage: ``` - show feature status [] + show feature status [] ``` - Example: ``` admin@sonic:~$ show feature status - Feature State AutoRestart - ---------- -------------- -------------- - bgp enabled enabled - database always_enabled always_enabled - dhcp_relay enabled enabled - lldp enabled enabled - pmon enabled enabled - radv enabled enabled - snmp enabled enabled - swss always_enabled enabled - syncd always_enabled enabled - teamd always_enabled enabled - telemetry enabled enabled + Feature State AutoRestart SystemState UpdateTime ContainerId ContainerVersion SetOwner CurrentOwner RemoteState + -------------- -------- ------------- ------------- ------------------- ------------- ------------------ ---------- -------------- ------------- + bgp enabled enabled up local local none + database enabled disabled local + dhcp_relay enabled enabled up 2020-11-15 18:21:09 249e70102f55 20201230.100 kube local + lldp enabled enabled up 2020-11-15 18:21:09 779c2d55ee12 20201230.100 kube local + mgmt-framework enabled enabled up local local none + nat disabled enabled local + pmon enabled enabled up 2020-11-15 18:20:27 a2b9ffa8aba3 20201230.100 kube local + radv enabled enabled up 2020-11-15 18:21:05 d8ff27dcfe46 20201230.100 kube local + sflow disabled enabled local + snmp enabled enabled up 2020-11-15 18:25:51 8b7d5529e306 20201230.111 kube kube running + swss enabled enabled up local local none + syncd enabled enabled up local local none + teamd enabled enabled up local local none + telemetry enabled enabled down 2020-11-15 18:24:59 20201230.100 kube none ``` -**show feature autorestart** -This command will display the status of auto-restart for feature container. +**config feature owner** + +Configures the owner for a feature as "local" or "kube". The "local" implies starting the feature container from local image. The "kube" implies that kubernetes server is made eligible to deploy the feature. The deployment of a feature by kubernetes is conditional based on many factors like, whether the kube server is configured or not, connected-to-kube-server or not and if that master has manifest for this feature for this switch or not and more. At some point in future, the deployment *could* happen and till that point the feature can run from local image, called "fallback". The fallback is allowed by default and it could be toggled to "not allowed". When fallback is not allowed, the feature would run only upon deployment by kubernetes master. - Usage: ``` - show feature autorestart [] + config feature owner [] [local/kube] + ``` + +- Example: + ``` + admin@sonic:~$ sudo config feature owner snmp kube + ``` + +**config feature fallback** + +Features configured for "kube" deployment could be allowed to fallback to using local image, until the point of successful kube deployment. The fallback is allowed by default. + +- Usage: + ``` + config feature fallback [] [on/off] ``` - Example: ``` + admin@sonic:~$ sudo config feature fallback snmp on + ``` + +**show feature autorestart** + +This command will display the status of auto-restart for feature container. + +- Usage: + ``` + show feature autorestart [] admin@sonic:~$ show feature autorestart Feature AutoRestart ---------- -------------- @@ -2803,10 +2867,6 @@ This command will configure the state for a specific feature. - Usage: ``` config feature state (enabled | disabled) - ``` - -- Example: - ``` admin@sonic:~$ sudo config feature state bgp disabled ``` @@ -2817,10 +2877,6 @@ This command will configure the status of auto-restart for a specific feature co - Usage: ``` config feature autorestart (enabled | disabled) - ``` - -- Example: - ``` admin@sonic:~$ sudo config feature autorestart bgp disabled ``` NOTE: If the existing state or auto-restart value for a feature is "always_enabled" then config @@ -3968,6 +4024,44 @@ Refer the routing stack [Quagga Command Reference](https://www.quagga.net/docs/q Go Back To [Beginning of the document](#) or [Beginning of this section](#ip--ipv6) +## Kubernetes + +### Kubernetes show commands + +**show kubernetes server config** + +This command displays the kubernetes server configuration, if any, else would report as not configured. + +- Usage: + ``` + show kubernetes server config + ``` + +- Example: + ``` + admin@sonic:~$ show kubernetes server config + ip port insecure disable + ----------- ------ ---------- --------- + 10.3.157.24 6443 True False + ``` + +**show kubernetes server status** + +This command displays the kubernetes server status. + +- Usage: + ``` + show kubernetes server status + ``` + +- Example: + ``` + admin@sonic:~$ show kubernetes server status + ip port connected update-time + ----------- ------ ----------- ------------------- + 10.3.157.24 6443 true 2020-11-15 18:25:05 + ``` + ## LLDP ### LLDP show commands diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 8918620201b9..25b69a5c8be3 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -20,6 +20,8 @@ PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" LOG_SSD_HEALTH="/usr/local/bin/log_ssd_health" SSD_FW_UPDATE="ssd-fw-upgrade" +TAG_LATEST=yes + # Require 100M available on the hard drive for warm reboot temp files, # Size is in 1K blocks: MIN_HD_SPACE_NEEDED=100000 @@ -63,6 +65,7 @@ function showHelpAndExit() echo " -c : specify control plane assistant IP list" echo " -s : strict mode: do not proceed without:" echo " - control plane assistant IP list." + echo " -t : Don't tag the current kube images as latest" exit "${EXIT_SUCCESS}" } @@ -98,6 +101,9 @@ function parseOptions() s ) STRICT=yes ;; + t ) + TAG_LATEST=no + ;; esac done } @@ -438,6 +444,13 @@ setup_reboot_variables reboot_pre_check +if test -f /usr/local/bin/ctrmgr_tools.py +then + if [[ x"${TAG_LATEST}" == x"yes" ]]; then + /usr/local/bin/ctrmgr_tools.py tag-all + fi +fi + # Install new FW for mellanox platforms before control plane goes down # So on boot switch will not spend time to upgrade FW increasing the CP downtime if [[ "$sonic_asic_type" == "mellanox" ]]; then @@ -544,11 +557,11 @@ debug "Stopping bgp ..." systemctl stop bgp debug "Stopped bgp ..." -# Kill lldp, otherwise it sends informotion about reboot. +# Kill lldp, otherwise it sends information about reboot. # We call `docker kill lldp` to ensure the container stops as quickly as possible, # then immediately call `systemctl stop lldp` to prevent the service from # restarting the container automatically. -docker kill lldp &> /dev/null || debug "Docker lldp is not running ($?) ..." +container kill lldp &> /dev/null || debug "Docker lldp is not running ($?) ..." systemctl stop lldp if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then @@ -605,14 +618,19 @@ debug "Stopped syncd ..." # then immediately call `systemctl stop ...` to prevent the service from # restarting the container automatically. debug "Stopping all remaining containers ..." -for CONTAINER_NAME in $(docker ps --format '{{.Names}}'); do - CONTAINER_STOP_RC=0 - docker kill $CONTAINER_NAME &> /dev/null || CONTAINER_STOP_RC=$? - systemctl stop $CONTAINER_NAME || debug "Ignore stopping $CONTAINER_NAME error $?" - if [[ CONTAINER_STOP_RC -ne 0 ]]; then - debug "Failed killing container $CONTAINER_NAME RC $CONTAINER_STOP_RC ." - fi -done +if test -f /usr/local/bin/ctrmgr_tools.py +then + /usr/local/bin/ctrmgr_tools.py kill-all +else + for CONTAINER_NAME in $(docker ps --format '{{.Names}}'); do + CONTAINER_STOP_RC=0 + docker kill $CONTAINER_NAME &> /dev/null || CONTAINER_STOP_RC=$? + systemctl stop $CONTAINER_NAME || debug "Ignore stopping $CONTAINER_NAME error $?" + if [[ CONTAINER_STOP_RC -ne 0 ]]; then + debug "Failed killing container $CONTAINER_NAME RC $CONTAINER_STOP_RC ." + fi + done +fi debug "Stopped all remaining containers ..." # Stop the docker container engine. Otherwise we will have a broken docker storage diff --git a/scripts/reboot b/scripts/reboot index 18d68f918aff..889f4c4bd547 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -29,6 +29,7 @@ EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 SSD_FW_UPDATE="ssd-fw-upgrade" REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" +TAG_LATEST=yes function debug() { @@ -38,6 +39,16 @@ function debug() logger "$@" } +function tag_images() +{ + if test -f /usr/local/bin/ctrmgr_tools.py + then + if [[ x"${TAG_LATEST}" == x"yes" ]]; then + /usr/local/bin/ctrmgr_tools.py tag-all + fi + fi +} + function stop_sonic_services() { if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then @@ -110,6 +121,9 @@ function parse_options() v ) VERBOSE=yes ;; + t ) + TAG_LATEST=no + ;; esac done } @@ -127,6 +141,9 @@ debug "User requested rebooting device ..." setup_reboot_variables reboot_pre_check +# Tag remotely deployed images as local +tag_images + # Stop SONiC services gracefully. stop_sonic_services diff --git a/show/feature.py b/show/feature.py index e3dfd907a38a..f6aab9cf6bf7 100644 --- a/show/feature.py +++ b/show/feature.py @@ -1,3 +1,4 @@ +import sys import click from natsort import natsorted from tabulate import tabulate @@ -12,6 +13,27 @@ def feature(): """Show feature status""" pass + +def make_header(fields_info, fields): + header = ["Feature"] + + for (h, f, _) in fields_info: + if f in fields: + header.append(h) + return header + +def make_body(names, lst_data, fields, fields_info): + # Make body + body = [] + for name, data in zip(names, lst_data): + entry = [name] + for (_, f, d) in fields_info: + if f in fields: + entry.append(data[f] if f in data else d) + body.append(entry) + return body + + # # 'status' subcommand (show feature status) # @@ -19,20 +41,106 @@ def feature(): @click.argument('feature_name', required=False) @pass_db def feature_status(db, feature_name): - header = ['Feature', 'State', 'AutoRestart'] - body = [] - feature_table = db.cfgdb.get_table('FEATURE') + fields_info = [ + ('State', 'state', ""), + ('AutoRestart', 'auto_restart', ""), + ('SystemState', 'system_state', ""), + ('UpdateTime', 'update_time', ""), + ('ContainerId', 'container_id', ""), + ('Version', 'container_version', ""), + ('SetOwner', 'set_owner', ""), + ('CurrentOwner', 'current_owner', ""), + ('RemoteState', "remote_state", "") + ] + + cfg_table = db.cfgdb.get_table('FEATURE') + dbconn = db.db + keys = dbconn.keys(dbconn.STATE_DB, "FEATURE|*") + ordered_data = [] + fields = set() + names = [] if feature_name: - if feature_table and feature_name in feature_table: - body.append([feature_name, feature_table[feature_name]['state'], \ - feature_table[feature_name]['auto_restart']]) + key = "FEATURE|{}".format(feature_name) + if feature_name in cfg_table: + data = cfg_table[feature_name] + if keys and (key in keys): + data.update(dbconn.get_all(dbconn.STATE_DB, key)) + ordered_data.append(data) + fields = set(data.keys()) + names.append(feature_name) else: click.echo("Can not find feature {}".format(feature_name)) sys.exit(1) else: - for key in natsorted(list(feature_table.keys())): - body.append([key, feature_table[key]['state'], feature_table[key]['auto_restart']]) - click.echo(tabulate(body, header)) + for name in natsorted(cfg_table.keys()): + data = cfg_table[name] + key = "FEATURE|{}".format(name) + if keys and (key in keys): + data.update(dbconn.get_all(dbconn.STATE_DB, key)) + + fields = fields | set(data.keys()) + ordered_data.append(data) + names.append(name) + + header = make_header(fields_info, fields) + body = make_body(names, ordered_data, fields, fields_info) + click.echo(tabulate(body, header, disable_numparse=True)) + + +def _negate_bool_str(d): + d = d.lower() + if d == "true": + return "false" + if d == "false": + return "true" + return d + +def _update_data(upd_lst, data): + for f in upd_lst: + if f in data: + data[f] = upd_lst[f](data[f]) + return data + +# +# 'config' subcommand (show feature config) +# +@feature.command('config', short_help="Show feature config") +@click.argument('feature_name', required=False) +@pass_db +def feature_config(db, feature_name): + fields_info = [ + ('State', 'state', ""), + ('AutoRestart', 'auto_restart', ""), + ('Owner', 'set_owner', "local"), + ('fallback', 'no_fallback_to_local', "") + ] + + update_list = { "no_fallback_to_local" : _negate_bool_str } + + cfg_table = db.cfgdb.get_table('FEATURE') + ordered_data = [] + names = [] + fields = set() + if feature_name: + if feature_name in cfg_table: + data = _update_data(update_list, cfg_table[feature_name]) + ordered_data.append(data) + names.append(feature_name) + fields = set(data.keys()) + else: + click.echo("Can not find feature {}".format(feature_name)) + sys.exit(1) + else: + for key in natsorted(cfg_table.keys()): + data = _update_data(update_list, cfg_table[key]) + + fields = fields | set(data.keys()) + names.append(key) + ordered_data.append(data) + + header = make_header(fields_info, fields) + body = make_body(names, ordered_data, fields, fields_info) + click.echo(tabulate(body, header, disable_numparse=True)) # # 'autorestart' subcommand (show feature autorestart) diff --git a/show/kube.py b/show/kube.py index fbcfbe3e23ed..5ab8cf3f6fb4 100644 --- a/show/kube.py +++ b/show/kube.py @@ -1,67 +1,92 @@ -import os - import click -from sonic_py_common import device_info -from utilities_common.db import Db -import utilities_common.cli as clicommon +from tabulate import tabulate + +from utilities_common.cli import AbbreviationGroup, pass_db -KUBE_ADMIN_CONF = "/etc/sonic/kube_admin.conf" -KUBECTL_CMD = "kubectl --kubeconfig /etc/sonic/kube_admin.conf {}" REDIS_KUBE_TABLE = 'KUBERNETES_MASTER' REDIS_KUBE_KEY = 'SERVER' +KUBE_LABEL_TABLE = "KUBE_LABELS" +KUBE_LABEL_SET_KEY = "SET" -def _print_entry(d, prefix): - if prefix: - prefix += " " - if isinstance(d, dict): - for k in d: - _print_entry(d[k], prefix + k) - else: - print(prefix + str(d)) +def _print_entry(data, fields): + header = [] + body = [] + for (h, f, d) in fields: + header.append(h) + body.append(data[f] if f in data else d) -def run_kube_command(cmd): - if os.path.exists(KUBE_ADMIN_CONF): - clicommon.run_command(KUBECTL_CMD.format(cmd)) - else: - print("System not connected to cluster yet") + click.echo(tabulate([body,], header, disable_numparse=True)) # # kubernetes group ("show kubernetes ...") # -@click.group() +@click.group(cls=AbbreviationGroup, name='kubernetes', invoke_without_command=False) def kubernetes(): pass -@kubernetes.command() -def nodes(): - """List all nodes in this kubernetes cluster""" - run_kube_command("get nodes") +# cmd kubernetes server +@kubernetes.group() +def server(): + """ Server configuration """ + pass -@kubernetes.command() -def pods(): - """List all pods in this kubernetes cluster""" - run_kube_command("get pods --field-selector spec.nodeName={}".format(device_info.get_hostname())) +@server.command() +@pass_db +def config(db): + """Show kube configuration""" + server_cfg_fields = [ + # (
, , ) + ("ip", "ip" "", False), + ("port", "port", "6443"), + ("insecure", "insecure", "False"), + ("disable","disable", "False") + ] -@kubernetes.command() -def status(): - """Descibe this node""" - run_kube_command("describe node {}".format(device_info.get_hostname())) + kube_fvs = db.cfgdb.get_entry(REDIS_KUBE_TABLE, REDIS_KUBE_KEY) + if kube_fvs: + _print_entry(kube_fvs, server_cfg_fields) + else: + print("Kubernetes server is not configured") -@kubernetes.command() -def server(): +@server.command() +@pass_db +def status(db): """Show kube configuration""" - kube_fvs = Db().get_data(REDIS_KUBE_TABLE, REDIS_KUBE_KEY) + server_state_fields = [ + # (
, , ) + ("ip", "ip" "", False), + ("port", "port", "6443"), + ("connected", "connected", ""), + ("update-time", "update_time", "") + ] + + + kube_fvs = db.db.get_all(db.db.STATE_DB, + "{}|{}".format(REDIS_KUBE_TABLE, REDIS_KUBE_KEY)) if kube_fvs: - _print_entry(kube_fvs, "{} {}".format( - REDIS_KUBE_TABLE, REDIS_KUBE_KEY)) + _print_entry(kube_fvs, server_state_fields) else: - print("Kubernetes server is not configured") + print("Kubernetes server has no status info") + + +@kubernetes.command() +@pass_db +def labels(db): + header = ["name", "value"] + + body = [] + labels = db.db.get_all(db.db.STATE_DB, + "{}|{}".format(KUBE_LABEL_TABLE, KUBE_LABEL_SET_KEY)) + if labels: + for (n,v) in labels.items(): + body.append([n, v]) + click.echo(tabulate(body, header, disable_numparse=True)) diff --git a/tests/feature_test.py b/tests/feature_test.py index 1b5e275a7c34..661dc9584d1e 100644 --- a/tests/feature_test.py +++ b/tests/feature_test.py @@ -5,34 +5,102 @@ from utilities_common.db import Db show_feature_status_output="""\ -Feature State AutoRestart ----------- -------------- -------------- -bgp enabled enabled -database always_enabled always_enabled -dhcp_relay enabled enabled -lldp enabled enabled -nat enabled enabled -pmon enabled enabled -radv enabled enabled -restapi disabled enabled -sflow disabled enabled -snmp enabled enabled -swss enabled enabled -syncd enabled enabled -teamd enabled enabled -telemetry enabled enabled +Feature State AutoRestart SetOwner +---------- -------------- -------------- ---------- +bgp enabled enabled local +database always_enabled always_enabled local +dhcp_relay enabled enabled kube +lldp enabled enabled kube +nat enabled enabled local +pmon enabled enabled kube +radv enabled enabled kube +restapi disabled enabled local +sflow disabled enabled local +snmp enabled enabled kube +swss enabled enabled local +syncd enabled enabled local +teamd enabled enabled local +telemetry enabled enabled kube +""" + +show_feature_status_output_with_remote_mgmt="""\ +Feature State AutoRestart SystemState UpdateTime ContainerId Version SetOwner CurrentOwner RemoteState +---------- -------------- -------------- ------------- ------------------- ------------- ------------ ---------- -------------- ------------- +bgp enabled enabled local +database always_enabled always_enabled local +dhcp_relay enabled enabled kube +lldp enabled enabled kube +nat enabled enabled local +pmon enabled enabled kube +radv enabled enabled kube +restapi disabled enabled local +sflow disabled enabled local +snmp enabled enabled up 2020-11-12 23:32:56 aaaabbbbcccc 20201230.100 kube kube kube +swss enabled enabled local +syncd enabled enabled local +teamd enabled enabled local +telemetry enabled enabled kube +""" + +show_feature_config_output="""\ +Feature State AutoRestart +---------- -------- ------------- +bgp enabled enabled +database enabled disabled +dhcp_relay enabled enabled +lldp enabled enabled +nat enabled enabled +pmon enabled enabled +radv enabled enabled +restapi disabled enabled +sflow disabled enabled +snmp enabled enabled +swss enabled enabled +syncd enabled enabled +teamd enabled enabled +telemetry enabled enabled +""" + +show_feature_config_output_with_remote_mgmt="""\ +Feature State AutoRestart Owner +---------- -------------- -------------- ------- +bgp enabled enabled local +database always_enabled always_enabled local +dhcp_relay enabled enabled kube +lldp enabled enabled kube +nat enabled enabled local +pmon enabled enabled kube +radv enabled enabled kube +restapi disabled enabled local +sflow disabled enabled local +snmp enabled enabled kube +swss enabled enabled local +syncd enabled enabled local +teamd enabled enabled local +telemetry enabled enabled kube """ show_feature_bgp_status_output="""\ -Feature State AutoRestart ---------- ------- ------------- -bgp enabled enabled +Feature State AutoRestart SetOwner +--------- ------- ------------- ---------- +bgp enabled enabled local """ show_feature_bgp_disabled_status_output="""\ -Feature State AutoRestart ---------- -------- ------------- -bgp disabled enabled +Feature State AutoRestart SetOwner +--------- -------- ------------- ---------- +bgp disabled enabled local +""" +show_feature_snmp_config_owner_output="""\ +Feature State AutoRestart Owner fallback +--------- ------- ------------- ------- ---------- +snmp enabled enabled local true +""" + +show_feature_snmp_config_fallback_output="""\ +Feature State AutoRestart Owner fallback +--------- ------- ------------- ------- ---------- +snmp enabled enabled kube false """ show_feature_autorestart_output="""\ @@ -68,9 +136,9 @@ """ show_feature_database_always_enabled_state_output="""\ -Feature State AutoRestart ---------- -------------- -------------- -database always_enabled always_enabled +Feature State AutoRestart SetOwner +--------- -------------- -------------- ---------- +database always_enabled always_enabled local """ show_feature_database_always_enabled_autorestart_output="""\ @@ -90,7 +158,7 @@ class TestFeature(object): def setup_class(cls): print("SETUP") - def test_show_feature_status(self, get_cmd_module): + def test_show_feature_status_no_kube_status(self, get_cmd_module): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(show.cli.commands["feature"].commands["status"], []) @@ -99,6 +167,37 @@ def test_show_feature_status(self, get_cmd_module): assert result.exit_code == 0 assert result.output == show_feature_status_output + def test_show_feature_status(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + dbconn = db.db + for (key, val) in [("system_state", "up"), ("current_owner", "kube"), + ("container_id", "aaaabbbbcccc"), ("update_time", "2020-11-12 23:32:56"), + ("container_version", "20201230.100"), ("remote_state", "kube")]: + dbconn.set(dbconn.STATE_DB, "FEATURE|snmp", key, val) + runner = CliRunner() + result = runner.invoke(show.cli.commands["feature"].commands["status"], ["snmp"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["feature"].commands["status"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_status_output_with_remote_mgmt + + def test_show_feature_config(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(show.cli.commands["feature"].commands["config"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + if "Owner" in result.output: + assert result.output == show_feature_config_output_with_remote_mgmt + else: + assert result.output == show_feature_config_output + def test_show_feature_status_abbrev_cmd(self, get_cmd_module): (config, show) = get_cmd_module runner = CliRunner() @@ -134,6 +233,25 @@ def test_show_feature_autorestart(self, get_cmd_module): assert result.exit_code == 0 assert result.output == show_feature_autorestart_output + def test_fail_autorestart(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + # Try setting auto restart for non-existing feature + result = runner.invoke(config.config.commands["feature"].commands["autorestart"], ["foo", "disabled"]) + print(result.exit_code) + assert result.exit_code == 1 + + # Delete Feature table + db.cfgdb.delete_table("FEATURE") + + # Try setting auto restart when no FEATURE table + result = runner.invoke(config.config.commands["feature"].commands["autorestart"], ["bgp", "disabled"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + + def test_show_bgp_autorestart_status(self, get_cmd_module): (config, show) = get_cmd_module runner = CliRunner() @@ -164,6 +282,48 @@ def test_config_bgp_feature_state(self, get_cmd_module): assert result.exit_code == 0 assert result.output == show_feature_bgp_disabled_status_output + def test_config_snmp_feature_owner(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["feature"].commands["owner"], ["snmp", "local"], obj=db) + print(result.exit_code) + print(result.output) + result = runner.invoke(config.config.commands["feature"].commands["fallback"], ["snmp", "on"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["feature"].commands["config"], ["foo"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + + result = runner.invoke(show.cli.commands["feature"].commands["config"], ["snmp"], obj=db) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_snmp_config_owner_output + + def test_config_unknown_feature_owner(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["feature"].commands["owner"], ["foo", "local"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 1 + + def test_config_snmp_feature_fallback(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["feature"].commands["fallback"], ["snmp", "off"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + result = runner.invoke(show.cli.commands["feature"].commands["config"], ["snmp"], obj=db) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_feature_snmp_config_fallback_output + def test_config_bgp_autorestart(self, get_cmd_module): (config, show) = get_cmd_module db = Db() @@ -220,6 +380,7 @@ def test_config_unknown_feature(self, get_cmd_module): runner = CliRunner() result = runner.invoke(config.config.commands["feature"].commands['state'], ["foo", "enabled"]) print(result.output) + print(result.exit_code) assert result.exit_code == 1 @classmethod diff --git a/tests/kube_test.py b/tests/kube_test.py index 2a39c241d6e6..a19402a627ef 100644 --- a/tests/kube_test.py +++ b/tests/kube_test.py @@ -1,32 +1,62 @@ -import os -import sys from click.testing import CliRunner from utilities_common.db import Db -show_server_output_0="""\ +show_no_server_output="""\ Kubernetes server is not configured """ +show_server_output_0="""\ +ip port insecure disable +----------- ------ ---------- --------- +10.3.157.24 6443 True False +""" show_server_output_1="""\ -KUBERNETES_MASTER SERVER IP 10.10.10.11 -KUBERNETES_MASTER SERVER insecure False -KUBERNETES_MASTER SERVER disable False +ip port insecure disable +----------- ------ ---------- --------- +10.10.10.11 6443 True False """ show_server_output_2="""\ -KUBERNETES_MASTER SERVER IP 10.10.10.11 -KUBERNETES_MASTER SERVER insecure True -KUBERNETES_MASTER SERVER disable False +ip port insecure disable +----------- ------ ---------- --------- +10.3.157.24 6443 False False """ show_server_output_3="""\ -KUBERNETES_MASTER SERVER IP 10.10.10.11 -KUBERNETES_MASTER SERVER insecure True -KUBERNETES_MASTER SERVER disable True +ip port insecure disable +----------- ------ ---------- --------- +10.3.157.24 6443 True True +""" + +show_server_output_4="""\ +ip port insecure disable +----------- ------ ---------- --------- +10.3.157.24 7777 True False +""" + +empty_server_status="""\ +Kubernetes server has no status info +""" + +non_empty_server_status="""\ +ip port connected update-time +----------- ------ ----------- ------------------- +10.3.157.24 6443 false 2020-11-13 00:49:05 +""" + +empty_labels="""\ +name value +------ ------- """ +non_empty_labels="""\ +name value +------------- ------------- +hwsku Force10-S6000 +teamd_enabled false +""" -class kube(object): +class TestKube(object): @classmethod def setup_class(cls): print("SETUP") @@ -35,44 +65,145 @@ def setup_class(cls): def __check_res(self, result, info, op): print("Running test: {}".format(info)) print(result.exit_code) - print(result.output) assert result.exit_code == 0 - assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == op - + print(result.output) + assert result.output == op def test_kube_server(self, get_cmd_module): (config, show) = get_cmd_module - db = Db() runner = CliRunner() # Check server not configured - result = runner.invoke(show.cli.commands["kubernetes"].commands["server"]) - self.__check_res(result, "empty server test", show_server_output_0) + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"]) + self.__check_res(result, "init server config test", show_server_output_0) + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["status"]) + self.__check_res(result, "init server status test", empty_server_status) + + def test_no_kube_server(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + + db.cfgdb.delete_table("KUBERNETES_MASTER") + + # Check server not configured + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) + self.__check_res(result, "null server config test", show_no_server_output) + + # Add IP when not configured + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.11"], obj=db) + self.__check_res(result, "set server IP when none", "") + + + + def test_kube_server_status(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + dbconn = db.db + + for (k, v) in [ ("ip", "10.3.157.24"), ("port", "6443"), + ("connected", "false"), ("update_time", "2020-11-13 00:49:05")]: + dbconn.set(dbconn.STATE_DB, "KUBERNETES_MASTER|SERVER", k, v) + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["status"], [], obj=db) + self.__check_res(result, "init server status test", non_empty_server_status) + + + def test_set_server_ip(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() # Add IP & test show result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10.10.10.11"], obj=db) self.__check_res(result, "set server IP", "") - - result = runner.invoke(show.cli.commands["kubernetes"].commands["server"]) + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) self.__check_res(result, "check server IP", show_server_output_1) + def test_set_server_invalid_ip_port(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + + # test invalid IP + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10101011"], obj=db) + assert result.exit_code == 1 + + # test invalid port + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["port", "10101011"], obj=db) + assert result.exit_code == 1 + + + + def test_set_insecure(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + # set insecure as True & test show - result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["insecure", "on"], obj=db) + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["insecure", "off"], obj=db) self.__check_res(result, "set server insecure", "") - - result = runner.invoke(show.cli.commands["kubernetes"].commands["server"]) + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) self.__check_res(result, "check server IP", show_server_output_2) - + + + def test_set_disable(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + # set disable as True & test show result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["disable", "on"], obj=db) self.__check_res(result, "set server disable", "") - - result = runner.invoke(show.cli.commands["kubernetes"].commands["server"]) + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) self.__check_res(result, "check server IP", show_server_output_3) - + + def test_set_port(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + + # set port to a different value & test show + result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["port", "7777"], obj=db) + self.__check_res(result, "set server port", "") + + result = runner.invoke(show.cli.commands["kubernetes"].commands["server"].commands["config"], [], obj=db) + self.__check_res(result, "check server IP", show_server_output_4) + + + def test_kube_labels(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + + # Check for no labels + result = runner.invoke(show.cli.commands["kubernetes"].commands["labels"]) + self.__check_res(result, "no labels", empty_labels) + + + def test_set_kube_labels(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + + # Add a label + result = runner.invoke(config.config.commands["kubernetes"].commands["label"].commands["add"], ["hwsku", "Force10-S6000"], obj=db) + self.__check_res(result, "set add label", "") + + # Drop a label + result = runner.invoke(config.config.commands["kubernetes"].commands["label"].commands["drop"], ["teamd_enabled"], obj=db) + self.__check_res(result, "set drop label", "") + + result = runner.invoke(show.cli.commands["kubernetes"].commands["labels"], [], obj=db) + self.__check_res(result, "Test labels", non_empty_labels) + @classmethod def teardown_class(cls): diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 06e1f65f8078..74437b197a9a 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -570,72 +570,86 @@ "FEATURE|bgp": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|database": { "state": "always_enabled", "auto_restart": "always_enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|dhcp_relay": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "FEATURE|nat": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|pmon": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "FEATURE|radv": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "FEATURE|restapi": { "state": "disabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|sflow": { "state": "disabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|snmp": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "FEATURE|swss": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|syncd": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|teamd": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "local" }, "FEATURE|telemetry": { "state": "enabled", "auto_restart": "enabled", - "high_mem_alert": "disabled" + "high_mem_alert": "disabled", + "set_owner": "kube" }, "DEVICE_METADATA|localhost": { "default_bgp_status": "down", @@ -1404,5 +1418,11 @@ }, "DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE": { "default_dynamic_th": "0" + }, + "KUBERNETES_MASTER|SERVER": { + "ip": "10.3.157.24", + "insecure": "True", + "disable": "False", + "port": "6443" } } diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 3644f71b10f2..f521a576b83e 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -537,12 +537,6 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False sys.exit(rc) -def do_exit(msg): - m = "FATAL failure: {}. Exiting...".format(msg) - _log_msg(syslog.LOG_ERR, True, inspect.stack()[1][1], inspect.stack()[1][2], m) - raise SystemExit(m) - - def json_dump(data): """ Dump data in JSON format