diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..e6ede4c871 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,234 @@ +# config file for ansible -- http://ansible.com/ +# ============================================== + +# nearly all parameters can be overridden in ansible-playbook +# or with command line flags. ansible will read ANSIBLE_CONFIG, +# ansible.cfg in the current working directory, .ansible.cfg in +# the home directory or /etc/ansible/ansible.cfg, whichever it +# finds first + +[defaults] + +# some basic default values... + +inventory = /etc/ansible/hosts +library = library:library/ixia +remote_tmp = $HOME/.ansible/tmp +pattern = * +forks = 5 +poll_interval = 15 +sudo_user = root +#ask_sudo_pass = True +#ask_pass = True +transport = smart +#remote_port = 22 +module_lang = C + +# plays will gather facts by default, which contain information about +# the remote system. +# +# smart - gather by default, but don't regather if already gathered +# implicit - gather by default, turn off with gather_facts: False +# explicit - do not gather by default, must say gather_facts: True +gathering = smart + +# additional paths to search for roles in, colon separated +#roles_path = /etc/ansible/roles + +# uncomment this to disable SSH key host checking +#host_key_checking = False +host_key_checking = False +UserKnownHostsFile=/dev/null + +# change this for alternative sudo implementations +sudo_exe = sudo + +# what flags to pass to sudo +#sudo_flags = -H + +# SSH timeout +timeout = 10 + +# default user to use for playbooks if user is not specified +# (/usr/bin/ansible will use current user as default) +#remote_user = root + +# logging is off by default unless this path is defined +# if so defined, consider logrotate +#log_path = /var/log/ansible.log + +# default module name for /usr/bin/ansible +#module_name = command + +# use this shell for commands executed under sudo +# you may need to change this to bin/bash in rare instances +# if sudo is constrained +#executable = /bin/sh + +# if inventory variables overlap, does the higher precedence one win +# or are hash values merged together? The default is 'replace' but +# this can also be set to 'merge'. +#hash_behaviour = replace + +# list any Jinja2 extensions to enable here: +#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n + +# if set, always use this private key file for authentication, same as +# if passing --private-key to ansible or ansible-playbook +#private_key_file = /path/to/file + +# format of string {{ ansible_managed }} available within Jinja2 +# templates indicates to users editing templates files will be replaced. +# replacing {file}, {host} and {uid} and strftime codes with proper values. +ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} + +# by default, ansible-playbook will display "Skipping [host]" if it determines a task +# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" +# messages. NOTE: the task header will still be shown regardless of whether or not the +# task is skipped. +#display_skipped_hosts = True + +# by default (as of 1.3), Ansible will raise errors when attempting to dereference +# Jinja2 variables that are not set in templates or action lines. Uncomment this line +# to revert the behavior to pre-1.3. +#error_on_undefined_vars = False + +# by default (as of 1.6), Ansible may display warnings based on the configuration of the +# system running ansible itself. This may include warnings about 3rd party packages or +# other conditions that should be resolved if possible. +# to disable these warnings, set the following value to False: +#system_warnings = True + +# by default (as of 1.4), Ansible may display deprecation warnings for language +# features that should no longer be used and will be removed in future versions. +# to disable these warnings, set the following value to False: +#deprecation_warnings = True + +# (as of 1.8), Ansible can optionally warn when usage of the shell and +# command module appear to be simplified by using a default Ansible module +# instead. These warnings can be silenced by adjusting the following +# setting or adding warn=yes or warn=no to the end of the command line +# parameter string. This will for example suggest using the git module +# instead of shelling out to the git command. +# command_warnings = False + + +# set plugin path directories here, separate with colons +action_plugins = plugins/action +# callback_plugins = /usr/share/ansible_plugins/callback_plugins +connection_plugins = plugins/connection +# lookup_plugins = /usr/share/ansible_plugins/lookup_plugins +# vars_plugins = /usr/share/ansible_plugins/vars_plugins +# filter_plugins = /usr/share/ansible_plugins/filter_plugins +callback_whitelist = profile_tasks + +# by default callbacks are not loaded for /bin/ansible, enable this if you +# want, for example, a notification or logging callback to also apply to +# /bin/ansible runs +#bin_ansible_callbacks = False + + +# don't like cows? that's unfortunate. +# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 +#nocows = 1 + +# don't like colors either? +# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 +#nocolor = 1 + +# the CA certificate path used for validating SSL certs. This path +# should exist on the controlling node, not the target nodes +# common locations: +# RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt +# Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +# Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt +#ca_file_path = + +# the http user-agent string to use when fetching urls. Some web server +# operators block the default urllib user agent as it is frequently used +# by malicious attacks/scripts, so we set it to something unique to +# avoid issues. +#http_user_agent = ansible-agent + +# if set to a persistent type (not 'memory', for example 'redis') fact values +# from previous runs in Ansible will be stored. This may be useful when +# wanting to use, for example, IP information from one group of servers +# without having to talk to them in the same playbook run to get their +# current IP information. +fact_caching = memory + + +# retry files +#retry_files_enabled = False +#retry_files_save_path = ~/.ansible-retry + +[privilege_escalation] +#become=True +become_method='sudo' +#become_user='root' +#become_ask_pass=False + +[paramiko_connection] + +# uncomment this line to cause the paramiko connection plugin to not record new host +# keys encountered. Increases performance on new host additions. Setting works independently of the +# host key checking setting above. +#record_host_keys=False + +# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this +# line to disable this behaviour. +#pty=False + +[ssh_connection] + +# ssh arguments to use +# Leaving off ControlPersist will result in poor performance, so use +# paramiko on older platforms rather than removing it +ssh_args = -o ControlMaster=auto -o ControlPersist=120s -o UserKnownHostsFile=/dev/null + + +# The path to use for the ControlPath sockets. This defaults to +# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with +# very long hostnames or very long path names (caused by long user names or +# deeply nested home directories) this can exceed the character limit on +# file socket names (108 characters for most platforms). In that case, you +# may wish to shorten the string below. +# +# Example: +# control_path = %(directory)s/%%h-%%r +#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r + +# Enabling pipelining reduces the number of SSH operations required to +# execute a module on the remote server. This can result in a significant +# performance improvement when enabled, however when using "sudo:" you must +# first disable 'requiretty' in /etc/sudoers +# +# By default, this option is disabled to preserve compatibility with +# sudoers configurations that have requiretty (the default on many distros). +# +pipelining = True + +# if True, make ansible use scp if the connection type is ssh +# (default is sftp) +#scp_if_ssh = True + +[accelerate] +accelerate_port = 5099 +accelerate_timeout = 30 +accelerate_connect_timeout = 5.0 + +# The daemon timeout is measured in minutes. This time is measured +# from the last activity to the accelerate daemon. +accelerate_daemon_timeout = 30 + +# If set to yes, accelerate_multi_key will allow multiple +# private keys to be uploaded to it, though each user must +# have access to the system via SSH to add a new key. The default +# is "no". +#accelerate_multi_key = yes + +[selinux] +# file systems that require special treatment when dealing with security context +# the default behaviour that copies the existing context or uses the user default +# needs to be changed to use the file system dependant context. +#special_context_filesystems=nfs,vboxsf,fuse diff --git a/ansible/deploy_sonic.yml b/ansible/deploy_sonic.yml new file mode 100644 index 0000000000..ccab9a2b73 --- /dev/null +++ b/ansible/deploy_sonic.yml @@ -0,0 +1,11 @@ +--- +# This Playbook would deploy to all the devices. + +- hosts: sonic + gather_facts: no + vars_files: + - vars/docker_registry.yml + roles: + - role: sonic-common + - { role: sonicv1 , when: "sonic_version == 'v1'" } + - { role: sonicv2 , when: "sonic_version == 'v2'" } diff --git a/ansible/group_vars/sonic/vars b/ansible/group_vars/sonic/vars new file mode 100644 index 0000000000..216dbc09b6 --- /dev/null +++ b/ansible/group_vars/sonic/vars @@ -0,0 +1,49 @@ +ansible_ssh_user: acsadmin +ansible_ssh_pass: password +ansible_become_pass: password +acsadmin_password: password +acsadmin_initial_password: 123456 + +sonic_version: "v2" + +broadcom_hwskus: [ 'ACS-S6000', 'Force10-S6000' ] + +mellanox_hwskus: [ 'ACS-MSN2700' ] + +sensor_hwskus: [ "ACS-S6000", "ACS-MSN2700", "Force10-S6000" ] + +## Note: +## Docker volumes should be list instead of dict. However, if we want to keep code DRY, we +## need to merge dictionaries, and convert them to list +## Ref: +## 1. http://stackoverflow.com/questions/9254178/is-there-yaml-syntax-for-sharing-part-of-a-list-or-map +## 2. https://groups.google.com/forum/#!msg/ansible-project/M-FNUK9Wz98/gH63Ka4hDAAJ + +syncd_docker_volumes_dict: + "/host/machine.conf:/etc/machine.conf": + +syncd_docker_volumes: "{{ syncd_docker_volumes_dict.keys() }}" + +orchagent_docker_volumes_dict: + "/etc/ssw/:/etc/ssw/:ro": + "/etc/network/interfaces:/etc/network/interfaces:ro": + "/etc/network/interfaces.d/:/etc/network/interfaces.d/:ro": + "/host/machine.conf:/host/machine.conf": + +orchagent_docker_volumes: "{{ orchagent_docker_volumes_dict.keys() }}" + +apt_repo_ip: "10.0.0.5" + +# ntp variables +ntp_servers: ['10.0.0.1', '10.0.0.2'] + +# syslog variables +syslog_servers: ['10.0.0.5', '10.0.0.6'] + +# dns variables +dns_servers: ['10.0.0.5', '10.0.0.6'] + +# snmp variables +snmp_rocommunity: public +snmp_location: testlab + diff --git a/ansible/group_vars/sonic_latest/package_versions.yml b/ansible/group_vars/sonic_latest/package_versions.yml new file mode 100644 index 0000000000..e843ee8f7b --- /dev/null +++ b/ansible/group_vars/sonic_latest/package_versions.yml @@ -0,0 +1,20 @@ +linux_image: { name: linux-image-3.16.0-4-amd64 , version: 3.16.7-ckt11-2+acs8u2 } +platform_modules_s6000: { name: platform-modules-s6000 , version: "*" } +opennslv2: { name: opennsl-modules-3.16.0-4-amd64 , version: "6.4.10*" } +version_sx_kernel: "*" +version_mlnxsdk: "*" +version_iproute2_mlnx: "1.mlnx*" +version_docker_engine: 1.11.1-0~jessie +version_sonic_cli: "*" + +image_id_database: docker-database:latest +image_id_snmp: docker-snmp:latest +image_id_lldp: docker-lldp:latest +image_id_platform_monitor: docker-platform-monitor:latest + +image_id_syncd: docker-syncd:latest +image_id_syncd_mlnx: docker-syncd-mlnx:latest +image_id_syncd_mlnx_rpc: docker-syncd-mlnx-rpc:latest +image_id_orchagent: docker-orchagent:latest +image_id_orchagent_mlnx: docker-orchagent-mlnx:latest +image_id_fpm: docker-fpm:latest diff --git a/ansible/inventory b/ansible/inventory new file mode 100644 index 0000000000..7c9df6f67b --- /dev/null +++ b/ansible/inventory @@ -0,0 +1,12 @@ +[sonic_stable] + +[sonic_latest] +switch1 ansible_host=10.0.0.100 sonic_version=v2 sonic_hwsku=Force10-S6000 +switch2 ansible_host=10.0.0.101 sonic_version=v2 sonic_hwsku=ACS-MSN2700 + +[sonic:children] +sonic_stable +sonic_latest + +[ptf] +ptf-1 ansible_host=10.0.0.200 ansible_ssh_user=root ansible_ssh_pass=password diff --git a/ansible/library/docker.py b/ansible/library/docker.py new file mode 100644 index 0000000000..545bbd592e --- /dev/null +++ b/ansible/library/docker.py @@ -0,0 +1,1775 @@ +#!/usr/bin/python + +# (c) 2013, Cove Schneider +# (c) 2014, Joshua Conner +# (c) 2014, Pavel Antonov +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +DOCUMENTATION = ''' +--- +module: docker +version_added: "1.4" +short_description: manage docker containers +description: + - Manage the life cycle of docker containers. +options: + count: + description: + - Number of matching containers that should be in the desired state. + default: 1 + image: + description: + - Container image used to match and launch containers. + required: true + pull: + description: + - Control when container images are updated from the C(docker_url) registry. + If "missing," images will be pulled only when missing from the host; + if '"always," the registry will be checked for a newer version of the + image' each time the task executes. + default: missing + choices: [ "missing", "always" ] + version_added: "1.9" + command: + description: + - Command used to match and launch containers. + default: null + name: + description: + - Name used to match and uniquely name launched containers. Explicit names + are used to uniquely identify a single container or to link among + containers. Mutually exclusive with a "count" other than "1". + default: null + version_added: "1.5" + ports: + description: + - "List containing private to public port mapping specification. + Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. + The container ports need to be exposed either in the Dockerfile or via the C(expose) option." + default: null + version_added: "1.5" + expose: + description: + - List of additional container ports to expose for port mappings or links. + If the port is already exposed using EXPOSE in a Dockerfile, you don't + need to expose it again. + default: null + version_added: "1.5" + publish_all_ports: + description: + - Publish all exposed ports to the host interfaces. + default: false + version_added: "1.5" + volumes: + description: + - List of volumes to mount within the container using docker CLI-style + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' + default: null + volumes_from: + description: + - List of names of containers to mount volumes from. + default: null + links: + description: + - List of other containers to link within this container with an optional + - 'alias. Use docker CLI-style syntax: C(redis:myredis).' + default: null + version_added: "1.5" + log_driver: + description: + - You can specify a different logging driver for the container than for the daemon. + "json-file" Default logging driver for Docker. Writes JSON messages to file. + docker logs command is available only for this logging driver. + "none" disables any logging for the container. + "syslog" Syslog logging driver for Docker. Writes log messages to syslog. + docker logs command is not available for this logging driver. + "journald" Journald logging driver for Docker. Writes log messages to "journald". + "gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. + "fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input). + If not defined explicitly, the Docker daemon's default ("json-file") will apply. + Requires docker >= 1.6.0. + required: false + default: json-file + choices: + - json-file + - none + - syslog + - journald + - gelf + - fluentd + version_added: "2.0" + log_opt: + description: + - Additional options to pass to the logging driver selected above. See Docker `log-driver + ` documentation for more information. + Requires docker >=1.7.0. + required: false + default: null + version_added: "2.0" + memory_limit: + description: + - RAM allocated to the container as a number of bytes or as a human-readable + string like "512MB". Leave as "0" to specify no limit. + default: 0 + docker_url: + description: + - URL of the host running the docker daemon. This will default to the env + var DOCKER_HOST if unspecified. + default: ${DOCKER_HOST} or unix://var/run/docker.sock + use_tls: + description: + - Whether to use tls to connect to the docker server. "no" means not to + use tls (and ignore any other tls related parameters). "encrypt" means + to use tls to encrypt the connection to the server. "verify" means to + also verify that the server's certificate is valid for the server + (this both verifies the certificate against the CA and that the + certificate was issued for that host. If this is unspecified, tls will + only be used if one of the other tls options require it. + choices: [ "no", "encrypt", "verify" ] + version_added: "1.9" + tls_client_cert: + description: + - Path to the PEM-encoded certificate used to authenticate docker client. + If specified tls_client_key must be valid + default: ${DOCKER_CERT_PATH}/cert.pem + version_added: "1.9" + tls_client_key: + description: + - Path to the PEM-encoded key used to authenticate docker client. If + specified tls_client_cert must be valid + default: ${DOCKER_CERT_PATH}/key.pem + version_added: "1.9" + tls_ca_cert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + This has no effect if use_tls is encrypt. + default: ${DOCKER_CERT_PATH}/ca.pem + version_added: "1.9" + tls_hostname: + description: + - A hostname to check matches what's supplied in the docker server's + certificate. If unspecified, the hostname is taken from the docker_url. + default: Taken from docker_url + version_added: "1.9" + docker_api_version: + description: + - Remote API version to use. This defaults to the current default as + specified by docker-py. + default: docker-py default remote API version + version_added: "1.8" + docker_user: + description: + - Username or UID to use within the container + required: false + default: null + version_added: "2.0" + username: + description: + - Remote API username. + default: null + password: + description: + - Remote API password. + default: null + email: + description: + - Remote API email. + default: null + hostname: + description: + - Container hostname. + default: null + domainname: + description: + - Container domain name. + default: null + env: + description: + - Pass a dict of environment variables to the container. + default: null + dns: + description: + - List of custom DNS servers for the container. + required: false + default: null + detach: + description: + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. + default: true + signal: + version_added: "2.0" + description: + - With the state "killed", you can alter the signal sent to the + container. + required: false + default: KILL + state: + description: + - Assert the container's desired state. "present" only asserts that the + matching containers exist. "started" asserts that the matching + containers both exist and are running, but takes no action if any + configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching + containers are running and restarts any that have any images or + configuration out of date. "restarted" unconditionally restarts (or + starts) the matching containers. "stopped" and '"killed" stop and kill + all matching containers. "absent" stops and then' removes any matching + containers. + required: false + default: started + choices: + - present + - started + - reloaded + - restarted + - stopped + - killed + - absent + privileged: + description: + - Whether the container should run in privileged mode or not. + default: false + lxc_conf: + description: + - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). + default: null + stdin_open: + description: + - Keep stdin open after a container is launched. + default: false + version_added: "1.6" + tty: + description: + - Allocate a pseudo-tty within the container. + default: false + version_added: "1.6" + net: + description: + - 'Network mode for the launched container: bridge, none, container:' + - or host. Requires docker >= 0.11. + default: false + version_added: "1.8" + pid: + description: + - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0 + required: false + default: None + aliases: [] + version_added: "1.9" + registry: + description: + - Remote registry URL to pull images from. + default: DockerHub + aliases: [] + version_added: "1.8" + read_only: + description: + - Mount the container's root filesystem as read only + default: null + aliases: [] + version_added: "2.0" + restart_policy: + description: + - Container restart policy. + choices: ["no", "on-failure", "always"] + default: null + version_added: "1.9" + restart_policy_retry: + description: + - Maximum number of times to restart a container. Leave as "0" for unlimited + retries. + default: 0 + version_added: "1.9" + extra_hosts: + version_added: "2.0" + description: + - Dict of custom host-to-IP mappings to be defined in the container + insecure_registry: + description: + - Use insecure private registry by HTTP instead of HTTPS. Needed for + docker-py >= 0.5.0. + default: false + version_added: "1.9" + cpu_set: + description: + - CPUs in which to allow execution. Requires docker-py >= 0.6.0. + required: false + default: null + version_added: "2.0" + cap_add: + description: + - Add capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + version_added: "2.0" + cap_drop: + description: + - Drop capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + aliases: [] + version_added: "2.0" + stop_timeout: + description: + - How many seconds to wait for the container to stop before killing it. + required: false + default: 10 + version_added: "2.0" +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Ash Wilson (@smashwilson)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" +requirements: + - "python >= 2.6" + - "docker-py >= 0.3.0" + - "The docker server >= 0.10.0" +''' + +EXAMPLES = ''' +# Containers are matched either by name (if provided) or by an exact match of +# the image they were launched with and the command they're running. The module +# can accept either a name to target a container uniquely, or a count to operate +# on multiple containers at once when it makes sense to do so. + +# Ensure that a data container with the name "mydata" exists. If no container +# by this name exists, it will be created, but not started. + +- name: data container + docker: + name: mydata + image: busybox + state: present + volumes: + - /data + +# Ensure that a Redis server is running, using the volume from the data +# container. Expose the default Redis port. + +- name: redis container + docker: + name: myredis + image: redis + command: redis-server --appendonly yes + state: started + expose: + - 6379 + volumes_from: + - mydata + +# Ensure that a container of your application server is running. This will: +# - pull the latest version of your application image from DockerHub. +# - ensure that a container is running with the specified name and exact image. +# If any configuration options have changed, the existing container will be +# stopped and removed, and a new one will be launched in its place. +# - link this container to the existing redis container launched above with +# an alias. +# - bind TCP port 9000 within the container to port 8080 on all interfaces +# on the host. +# - bind UDP port 9001 within the container to port 8081 on the host, only +# listening on localhost. +# - set the environment variable SECRET_KEY to "ssssh". + +- name: application container + docker: + name: myapplication + image: someuser/appimage + state: reloaded + pull: always + links: + - "myredis:aliasedredis" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + +# Ensure that exactly five containers of another server are running with this +# exact image and command. If fewer than five are running, more will be launched; +# if more are running, the excess will be stopped. + +- name: load-balanced containers + docker: + state: reloaded + count: 5 + image: someuser/anotherappimage + command: sleep 1d + +# Unconditionally restart a service container. This may be useful within a +# handler, for example. + +- name: application service + docker: + name: myservice + image: someuser/serviceimage + state: restarted + +# Stop all containers running the specified image. + +- name: obsolete container + docker: + image: someuser/oldandbusted + state: stopped + +# Stop and remove a container with the specified name. + +- name: obsolete container + docker: + name: ohno + image: someuser/oldandbusted + state: absent + +# Example Syslogging Output + +- name: myservice container + docker: + name: myservice + image: someservice/someimage + state: reloaded + log_driver: syslog + log_opt: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + syslog-tag: myservice +''' + +HAS_DOCKER_PY = True +DEFAULT_DOCKER_API_VERSION = None + +import sys +import json +import os +import shlex +from urlparse import urlparse +try: + import docker.client + import docker.utils + import docker.errors + from requests.exceptions import RequestException +except ImportError: + HAS_DOCKER_PY = False + +if HAS_DOCKER_PY: + try: + from docker.errors import APIError as DockerAPIError + except ImportError: + from docker.client import APIError as DockerAPIError + try: + # docker-py 1.2+ + import docker.constants + DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION + except (ImportError, AttributeError): + # docker-py less than 1.2 + DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION + + +def _human_to_bytes(number): + suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + + if isinstance(number, int): + return number + if number[-1] == suffixes[0] and number[-2].isdigit(): + return number[:-1] + + i = 1 + for each in suffixes[1:]: + if number[-len(each):] == suffixes[i]: + return int(number[:-len(each)]) * (1024 ** i) + i = i + 1 + + raise ValueError('Could not convert %s to integer' % (number,)) + + +def _ansible_facts(container_list): + return {"docker_containers": container_list} + + +def _docker_id_quirk(inspect): + # XXX: some quirk in docker + if 'ID' in inspect: + inspect['Id'] = inspect['ID'] + del inspect['ID'] + return inspect + + +def get_split_image_tag(image): + # If image contains a host or org name, omit that from our check + if '/' in image: + registry, resource = image.rsplit('/', 1) + else: + registry, resource = None, image + + # now we can determine if image has a tag or a digest + tag = "latest" + basename = resource + for s in ['@',':']: + if s in resource: + basename, tag = resource.split(s, 1) + break + + if registry: + fullname = '/'.join((registry, basename)) + else: + fullname = basename + + return fullname, tag + +def normalize_image(image): + """ + Normalize a Docker image name to include the implied :latest tag. + """ + + return ":".join(get_split_image_tag(image)) + + +def is_running(container): + '''Return True if an inspected container is in a state we consider "running."''' + + return container['State']['Running'] == True and not container['State'].get('Ghost', False) + + +def get_docker_py_versioninfo(): + if hasattr(docker, '__version__'): + # a '__version__' attribute was added to the module but not until + # after 0.3.0 was pushed to pypi. If it's there, use it. + version = [] + for part in docker.__version__.split('.'): + try: + version.append(int(part)) + except ValueError: + for idx, char in enumerate(part): + if not char.isdigit(): + nondigit = part[idx:] + digit = part[:idx] + break + if digit: + version.append(int(digit)) + if nondigit: + version.append(nondigit) + elif hasattr(docker.Client, '_get_raw_response_socket'): + # HACK: if '__version__' isn't there, we check for the existence of + # `_get_raw_response_socket` in the docker.Client class, which was + # added in 0.3.0 + version = (0, 3, 0) + else: + # This is untrue but this module does not function with a version less + # than 0.3.0 so it's okay to lie here. + version = (0,) + + return tuple(version) + + +def check_dependencies(module): + """ + Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a + helpful error message if it isn't. + """ + if not HAS_DOCKER_PY: + module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") + else: + versioninfo = get_docker_py_versioninfo() + if versioninfo < (0, 3, 0): + module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") + + +class DockerManager(object): + + counters = dict( + created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 + ) + reload_reasons = [] + _capabilities = set() + + # Map optional parameters to minimum (docker-py version, server APIVersion) + # docker-py version is a tuple of ints because we have to compare them + # server APIVersion is passed to a docker-py function that takes strings + _cap_ver_req = { + 'dns': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), + 'restart_policy': ((0, 5, 0), '1.14'), + 'extra_hosts': ((0, 7, 0), '1.3.1'), + 'pid': ((1, 0, 0), '1.17'), + 'log_driver': ((1, 2, 0), '1.18'), + 'log_opt': ((1, 2, 0), '1.18'), + 'host_config': ((0, 7, 0), '1.15'), + 'cpu_set': ((0, 6, 0), '1.14'), + 'cap_add': ((0, 5, 0), '1.14'), + 'cap_drop': ((0, 5, 0), '1.14'), + 'read_only': ((1, 0, 0), '1.17'), + 'stop_timeout': ((0, 5, 0), '1.0'), + # Clientside only + 'insecure_registry': ((0, 5, 0), '0.0') + } + + def __init__(self, module): + self.module = module + + self.binds = None + self.volumes = None + if self.module.params.get('volumes'): + self.binds = {} + self.volumes = [] + vols = self.module.params.get('volumes') + for vol in vols: + parts = vol.split(":") + # regular volume + if len(parts) == 1: + self.volumes.append(parts[0]) + # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + else: + self.module.fail_json(msg='volumes support 1 to 3 arguments') + + self.lxc_conf = None + if self.module.params.get('lxc_conf'): + self.lxc_conf = [] + options = self.module.params.get('lxc_conf') + for option in options: + parts = option.split(':', 1) + self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) + + self.exposed_ports = None + if self.module.params.get('expose'): + self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) + + self.port_bindings = None + if self.module.params.get('ports'): + self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) + + self.links = None + if self.module.params.get('links'): + self.links = self.get_links(self.module.params.get('links')) + + self.env = self.module.params.get('env', None) + + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_api_version = module.params.get('docker_api_version') + + tls_client_cert = module.params.get('tls_client_cert', None) + if not tls_client_cert and env_cert_path: + tls_client_cert = os.path.join(env_cert_path, 'cert.pem') + + tls_client_key = module.params.get('tls_client_key', None) + if not tls_client_key and env_cert_path: + tls_client_key = os.path.join(env_cert_path, 'key.pem') + + tls_ca_cert = module.params.get('tls_ca_cert') + if not tls_ca_cert and env_cert_path: + tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') + + tls_hostname = module.params.get('tls_hostname') + if tls_hostname is None: + if env_docker_hostname: + tls_hostname = env_docker_hostname + else: + parsed_url = urlparse(docker_url) + if ':' in parsed_url.netloc: + tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + tls_hostname = parsed_url + if not tls_hostname: + tls_hostname = True + + # use_tls can be one of four values: + # no: Do not use tls + # encrypt: Use tls. We may do client auth. We will not verify the server + # verify: Use tls. We may do client auth. We will verify the server + # None: Only use tls if the parameters for client auth were specified + # or tls_ca_cert (which requests verifying the server with + # a specific ca certificate) + use_tls = module.params.get('use_tls') + if use_tls is None and env_docker_verify is not None: + use_tls = 'verify' + + tls_config = None + if use_tls != 'no': + params = {} + + # Setup client auth + if tls_client_cert and tls_client_key: + params['client_cert'] = (tls_client_cert, tls_client_key) + + # We're allowed to verify the connection to the server + if use_tls == 'verify' or (use_tls is None and tls_ca_cert): + if tls_ca_cert: + params['ca_cert'] = tls_ca_cert + params['verify'] = True + params['assert_hostname'] = tls_hostname + else: + params['verify'] = True + params['assert_hostname'] = tls_hostname + elif use_tls == 'encrypt': + params['verify'] = False + + if params: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + tls_config = docker.tls.TLSConfig(**params) + + self.client = docker.Client(base_url=docker_url, + version=docker_api_version, + tls=tls_config) + + self.docker_py_versioninfo = get_docker_py_versioninfo() + + def _check_capabilities(self): + """ + Create a list of available capabilities + """ + api_version = self.client.version()['ApiVersion'] + for cap, req_vers in self._cap_ver_req.items(): + if (self.docker_py_versioninfo >= req_vers[0] and + docker.utils.compare_version(req_vers[1], api_version) >= 0): + self._capabilities.add(cap) + + def ensure_capability(self, capability, fail=True): + """ + Some of the functionality this ansible module implements are only + available in newer versions of docker. Ensure that the capability + is available here. + + If fail is set to False then return True or False depending on whether + we have the capability. Otherwise, simply fail and exit the module if + we lack the capability. + """ + if not self._capabilities: + self._check_capabilities() + + if capability in self._capabilities: + return True + + if not fail: + return False + + api_version = self.client.version()['ApiVersion'] + self.module.fail_json(msg='Specifying the `%s` parameter requires' + ' docker-py: %s, docker server apiversion %s; found' + ' docker-py: %s, server: %s' % ( + capability, + '.'.join(map(str, self._cap_ver_req[capability][0])), + self._cap_ver_req[capability][1], + '.'.join(map(str, self.docker_py_versioninfo)), + api_version)) + + def get_links(self, links): + """ + Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link + """ + processed_links = {} + + for link in links: + parsed_link = link.split(':', 1) + if(len(parsed_link) == 2): + processed_links[parsed_link[0]] = parsed_link[1] + else: + processed_links[parsed_link[0]] = parsed_link[0] + + return processed_links + + def get_exposed_ports(self, expose_list): + """ + Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. + """ + if expose_list: + exposed = [] + for port in expose_list: + port = str(port).strip() + if port.endswith('/tcp') or port.endswith('/udp'): + port_with_proto = tuple(port.split('/')) + else: + # assume tcp protocol if not specified + port_with_proto = (port, 'tcp') + exposed.append(port_with_proto) + return exposed + else: + return None + + def get_start_params(self): + """ + Create start params + """ + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop', 'read_only', 'log_opt'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + if optionals['extra_hosts'] is not None: + self.ensure_capability('extra_hosts') + params['extra_hosts'] = optionals['extra_hosts'] + + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + if optionals['log_opt'] is not None: + for k, v in optionals['log_opt'].iteritems(): + log_config.set_config_value(k, v) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + + if optionals['read_only'] is not None: + self.ensure_capability('read_only') + params['read_only'] = optionals['read_only'] + + return params + + def create_host_config(self): + """ + Create HostConfig object + """ + params = self.get_start_params() + return docker.utils.create_host_config(**params) + + def get_port_bindings(self, ports): + """ + Parse the `ports` string into a port bindings dict for the `start_container` call. + """ + binds = {} + for port in ports: + # ports could potentially be an array like [80, 443], so we make sure they're strings + # before splitting + parts = str(port).split(':') + container_port = parts[-1] + if '/' not in container_port: + container_port = int(parts[-1]) + + p_len = len(parts) + if p_len == 1: + # Bind `container_port` of the container to a dynamically + # allocated TCP port on all available interfaces of the host + # machine. + bind = ('0.0.0.0',) + elif p_len == 2: + # Bind `container_port` of the container to port `parts[0]` on + # all available interfaces of the host machine. + bind = ('0.0.0.0', int(parts[0])) + elif p_len == 3: + # Bind `container_port` of the container to port `parts[1]` on + # IP `parts[0]` of the host machine. If `parts[1]` empty bind + # to a dynamically allocated port of IP `parts[0]`. + bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) + + if container_port in binds: + old_bind = binds[container_port] + if isinstance(old_bind, list): + # append to list if it already exists + old_bind.append(bind) + else: + # otherwise create list that contains the old and new binds + binds[container_port] = [binds[container_port], bind] + else: + binds[container_port] = bind + + return binds + + def get_summary_message(self): + ''' + Generate a message that briefly describes the actions taken by this + task, in English. + ''' + + parts = [] + for k, v in self.counters.iteritems(): + if v == 0: + continue + + if v == 1: + plural = "" + else: + plural = "s" + parts.append("%s %d container%s" % (k, v, plural)) + + if parts: + return ", ".join(parts) + "." + else: + return "No action taken." + + def get_reload_reason_message(self): + ''' + Generate a message describing why any reloaded containers were reloaded. + ''' + + if self.reload_reasons: + return ", ".join(self.reload_reasons) + else: + return None + + def get_summary_counters_msg(self): + msg = "" + for k, v in self.counters.iteritems(): + msg = msg + "%s %d " % (k, v) + + return msg + + def increment_counter(self, name): + self.counters[name] = self.counters[name] + 1 + + def has_changed(self): + for k, v in self.counters.iteritems(): + if v > 0: + return True + + return False + + def get_inspect_image(self): + try: + return self.client.inspect_image(self.module.params.get('image')) + except DockerAPIError as e: + if e.response.status_code == 404: + return None + else: + raise e + + def get_image_repo_tags(self): + image, tag = get_split_image_tag(self.module.params.get('image')) + if tag is None: + tag = 'latest' + resource = '%s:%s' % (image, tag) + + for image in self.client.images(name=image): + # If image is pulled by digest, RepoTags may be None + repo_tags = image.get('RepoTags', None) + if repo_tags is not None and resource in repo_tags: + return repo_tags + repo_digests = image.get('RepoDigests', None) + if repo_digests is not None and resource in repo_digests: + return repo_digests + return [] + + def get_inspect_containers(self, containers): + inspect = [] + for i in containers: + details = self.client.inspect_container(i['Id']) + details = _docker_id_quirk(details) + inspect.append(details) + + return inspect + + def get_differing_containers(self): + """ + Inspect all matching, running containers, and return those that were + started with parameters that differ from the ones that are provided + during this module run. A list containing the differing + containers will be returned, and a short string describing the specific + difference encountered in each container will be appended to + reload_reasons. + + This generates the set of containers that need to be stopped and + started with new parameters with state=reloaded. + """ + + running = self.get_running_containers() + current = self.get_inspect_containers(running) + + #Get API version + api_version = self.client.version()['ApiVersion'] + + image = self.get_inspect_image() + if image is None: + # The image isn't present. Assume that we're about to pull a new + # tag and *everything* will be restarted. + # + # This will give false positives if you untag an image on the host + # and there's nothing more to pull. + return current + + differing = [] + + for container in current: + + # IMAGE + # Compare the image by ID rather than name, so that containers + # will be restarted when new versions of an existing image are + # pulled. + if container['Image'] != image['Id']: + self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) + differing.append(container) + continue + + # COMMAND + + expected_command = self.module.params.get('command') + if expected_command: + expected_command = shlex.split(expected_command) + actual_command = container["Config"]["Cmd"] + + if actual_command != expected_command: + self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) + differing.append(container) + continue + + # EXPOSED PORTS + expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys()) + for p in (self.exposed_ports or []): + expected_exposed_ports.add("/".join(p)) + + actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys()) + + if actually_exposed_ports != expected_exposed_ports: + self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) + differing.append(container) + continue + + # VOLUMES + + expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) + if self.volumes: + expected_volume_keys.update(self.volumes) + + actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) + + if actual_volume_keys != expected_volume_keys: + self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) + differing.append(container) + continue + + # MEM_LIMIT + + try: + expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + + #For v1.19 API and above use HostConfig, otherwise use Config + if docker.utils.compare_version('1.19', api_version) >= 0: + actual_mem = container['HostConfig']['Memory'] + else: + actual_mem = container['Config']['Memory'] + + if expected_mem and actual_mem != expected_mem: + self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) + differing.append(container) + continue + + # ENVIRONMENT + # actual_env is likely to include environment variables injected by + # the Dockerfile. + + expected_env = {} + + for image_env in image['ContainerConfig']['Env'] or []: + name, value = image_env.split('=', 1) + expected_env[name] = value + + if self.env: + for name, value in self.env.iteritems(): + expected_env[name] = str(value) + + actual_env = {} + for container_env in container['Config']['Env'] or []: + name, value = container_env.split('=', 1) + actual_env[name] = value + + if actual_env != expected_env: + # Don't include the environment difference in the output. + self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) + differing.append(container) + continue + + # HOSTNAME + + expected_hostname = self.module.params.get('hostname') + actual_hostname = container['Config']['Hostname'] + if expected_hostname and actual_hostname != expected_hostname: + self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) + differing.append(container) + continue + + # DOMAINNAME + + expected_domainname = self.module.params.get('domainname') + actual_domainname = container['Config']['Domainname'] + if expected_domainname and actual_domainname != expected_domainname: + self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) + differing.append(container) + continue + + # DETACH + + # We don't have to check for undetached containers. If it wasn't + # detached, it would have stopped before the playbook continued! + + # NAME + + # We also don't have to check name, because this is one of the + # criteria that's used to determine which container(s) match in + # the first place. + + # STDIN_OPEN + + expected_stdin_open = self.module.params.get('stdin_open') + actual_stdin_open = container['Config']['OpenStdin'] + if actual_stdin_open != expected_stdin_open: + self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) + differing.append(container) + continue + + # TTY + + expected_tty = self.module.params.get('tty') + actual_tty = container['Config']['Tty'] + if actual_tty != expected_tty: + self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) + differing.append(container) + continue + + # -- "start" call differences -- + + # LXC_CONF + + if self.lxc_conf: + expected_lxc = set(self.lxc_conf) + actual_lxc = set(container['HostConfig']['LxcConf'] or []) + if actual_lxc != expected_lxc: + self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) + differing.append(container) + continue + + # BINDS + + expected_binds = set() + if self.binds: + for host_path, config in self.binds.iteritems(): + if isinstance(config, dict): + container_path = config['bind'] + if config['ro']: + mode = 'ro' + else: + mode = 'rw' + else: + container_path = config + mode = 'rw' + expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) + + actual_binds = set() + for bind in (container['HostConfig']['Binds'] or []): + if len(bind.split(':')) == 2: + actual_binds.add(bind + ":rw") + else: + actual_binds.add(bind) + + if actual_binds != expected_binds: + self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) + differing.append(container) + continue + + # PORT BINDINGS + + expected_bound_ports = {} + if self.port_bindings: + for container_port, config in self.port_bindings.iteritems(): + if isinstance(container_port, int): + container_port = "{0}/tcp".format(container_port) + if len(config) == 1: + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] + + actual_bound_ports = container['HostConfig']['PortBindings'] or {} + + if actual_bound_ports != expected_bound_ports: + self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) + differing.append(container) + continue + + # PUBLISHING ALL PORTS + + # What we really care about is the set of ports that is actually + # published. That should be caught above. + + # PRIVILEGED + + expected_privileged = self.module.params.get('privileged') + actual_privileged = container['HostConfig']['Privileged'] + if actual_privileged != expected_privileged: + self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) + differing.append(container) + continue + + # LINKS + + expected_links = set() + for link, alias in (self.links or {}).iteritems(): + expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) + + actual_links = set(container['HostConfig']['Links'] or []) + if actual_links != expected_links: + self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) + differing.append(container) + continue + + # NETWORK MODE + + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' + if actual_netmode != expected_netmode: + self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) + differing.append(container) + continue + + # DNS + + expected_dns = set(self.module.params.get('dns') or []) + actual_dns = set(container['HostConfig']['Dns'] or []) + if actual_dns != expected_dns: + self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) + differing.append(container) + continue + + # VOLUMES_FROM + + expected_volumes_from = set(self.module.params.get('volumes_from') or []) + actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) + if actual_volumes_from != expected_volumes_from: + self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) + differing.append(container) + + # LOG_DRIVER + + if self.ensure_capability('log_driver', False): + expected_log_driver = self.module.params.get('log_driver') or 'json-file' + actual_log_driver = container['HostConfig']['LogConfig']['Type'] + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue + + if self.ensure_capability('log_opt', False): + expected_logging_opts = self.module.params.get('log_opt') or {} + actual_log_opts = container['HostConfig']['LogConfig']['Config'] + if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0: + log_opt_reasons = { + 'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())), + 'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items())) + } + self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons)) + differing.append(container) + + return differing + + def get_deployed_containers(self): + """ + Return any matching containers that are already present. + """ + + command = self.module.params.get('command') + if command is not None: + command = shlex.split(command) + name = self.module.params.get('name') + if name and not name.startswith('/'): + name = '/' + name + deployed = [] + + # "images" will be a collection of equivalent "name:tag" image names + # that map to the same Docker image. + inspected = self.get_inspect_image() + if inspected: + repo_tags = self.get_image_repo_tags() + else: + repo_tags = [normalize_image(self.module.params.get('image'))] + + for container in self.client.containers(all=True): + details = None + + if name: + name_list = container.get('Names') + if name_list is None: + name_list = [] + matches = name in name_list + else: + details = self.client.inspect_container(container['Id']) + details = _docker_id_quirk(details) + + running_image = normalize_image(details['Config']['Image']) + + image_matches = running_image in repo_tags + + command_matches = command == details['Config']['Cmd'] + + matches = image_matches and command_matches + + if matches: + if not details: + details = self.client.inspect_container(container['Id']) + details = _docker_id_quirk(details) + + deployed.append(details) + + return deployed + + def get_running_containers(self): + return [c for c in self.get_deployed_containers() if is_running(c)] + + def pull_image(self): + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') + + resource = self.module.params.get('image') + image, tag = get_split_image_tag(resource) + if self.module.params.get('username'): + try: + self.client.login( + self.module.params.get('username'), + password=self.module.params.get('password'), + email=self.module.params.get('email'), + registry=self.module.params.get('registry') + ) + except Exception as e: + self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) + try: + changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params)) + try: + last = changes[-1] + # seems Docker 1.8 puts an empty dict at the end of the + # stream; catch that and get the previous instead + # https://github.com/ansible/ansible-modules-core/issues/2043 + if last.strip() == '{}': + last = changes[-2] + except IndexError: + last = '{}' + status = json.loads(last).get('status', '') + if status.startswith('Status: Image is up to date for'): + # Image is already up to date. Don't increment the counter. + pass + elif (status.startswith('Status: Downloaded newer image for') or + status.startswith('Download complete')): + # Image was updated. Increment the pull counter. + self.increment_counter('pulled') + else: + # Unrecognized status string. + self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes) + except Exception as e: + self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) + + def create_containers(self, count=1): + try: + mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + api_version = self.client.version()['ApiVersion'] + + params = {'image': self.module.params.get('image'), + 'command': self.module.params.get('command'), + 'ports': self.exposed_ports, + 'volumes': self.volumes, + 'environment': self.env, + 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), + 'detach': self.module.params.get('detach'), + 'name': self.module.params.get('name'), + 'stdin_open': self.module.params.get('stdin_open'), + 'tty': self.module.params.get('tty'), + 'cpuset': self.module.params.get('cpu_set'), + 'user': self.module.params.get('docker_user'), + } + if self.ensure_capability('host_config', fail=False): + params['host_config'] = self.create_host_config() + + #For v1.19 API and above use HostConfig, otherwise use Config + if docker.utils.compare_version('1.19', api_version) < 0: + params['mem_limit'] = mem_limit + else: + params['host_config']['Memory'] = mem_limit + + + def do_create(count, params): + results = [] + for _ in range(count): + result = self.client.create_container(**params) + self.increment_counter('created') + results.append(result) + + return results + + try: + containers = do_create(count, params) + except docker.errors.APIError as e: + if e.response.status_code != 404: + raise + + self.pull_image() + containers = do_create(count, params) + + return containers + + def start_containers(self, containers): + params = {} + + if not self.ensure_capability('host_config', fail=False): + params = self.get_start_params() + + for i in containers: + self.client.start(i) + self.increment_counter('started') + + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + + def stop_containers(self, containers): + for i in containers: + self.client.stop(i['Id'], self.module.params.get('stop_timeout')) + self.increment_counter('stopped') + + return [self.client.wait(i['Id']) for i in containers] + + def remove_containers(self, containers): + for i in containers: + self.client.remove_container(i['Id']) + self.increment_counter('removed') + + def kill_containers(self, containers): + for i in containers: + self.client.kill(i['Id'], self.module.params.get('signal')) + self.increment_counter('killed') + + def restart_containers(self, containers): + for i in containers: + self.client.restart(i['Id']) + self.increment_counter('restarted') + + +class ContainerSet: + + def __init__(self, manager): + self.manager = manager + self.running = [] + self.deployed = [] + self.changed = [] + + def refresh(self): + ''' + Update our view of the matching containers from the Docker daemon. + ''' + + + self.deployed = self.manager.get_deployed_containers() + self.running = [c for c in self.deployed if is_running(c)] + + def notice_changed(self, containers): + ''' + Record a collection of containers as "changed". + ''' + + self.changed.extend(containers) + + +def present(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist in any state.''' + + containers.refresh() + delta = count - len(containers.deployed) + + if delta > 0: + created = manager.create_containers(delta) + containers.notice_changed(manager.get_inspect_containers(created)) + + if delta < 0: + # If both running and stopped containers exist, remove + # stopped containers first. + containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) + + to_stop = [] + to_remove = [] + for c in containers.deployed[0:-delta]: + if is_running(c): + to_stop.append(c) + to_remove.append(c) + + manager.stop_containers(to_stop) + containers.notice_changed(manager.get_inspect_containers(to_remove)) + manager.remove_containers(to_remove) + +def started(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist and are running.''' + + containers.refresh() + delta = count - len(containers.running) + + if delta > 0: + if name and containers.deployed: + # A stopped container exists with the requested name. + # Clean it up before attempting to start a new one. + manager.remove_containers(containers.deployed) + + created = manager.create_containers(delta) + manager.start_containers(created) + containers.notice_changed(manager.get_inspect_containers(created)) + + if delta < 0: + excess = containers.running[0:-delta] + containers.notice_changed(manager.get_inspect_containers(excess)) + manager.stop_containers(excess) + manager.remove_containers(excess) + +def reloaded(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. If any associated settings have been changed (volumes, + ports or so on), restart those containers. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + started(manager, containers, count, name) + +def restarted(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. Unconditionally restart any that were already running. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + manager.restart_containers(containers.running) + started(manager, containers, count, name) + +def stopped(manager, containers, count, name): + '''Stop any matching containers that are running.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) + +def killed(manager, containers, count, name): + '''Kill any matching containers that are running.''' + + containers.refresh() + + manager.kill_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) + +def absent(manager, containers, count, name): + '''Stop and remove any matching containers.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.deployed)) + manager.remove_containers(containers.deployed) + +def main(): + module = AnsibleModule( + argument_spec = dict( + count = dict(default=1), + image = dict(required=True), + pull = dict(required=False, default='missing', choices=['missing', 'always']), + command = dict(required=False, default=None), + expose = dict(required=False, default=None, type='list'), + ports = dict(required=False, default=None, type='list'), + publish_all_ports = dict(default=False, type='bool'), + volumes = dict(default=None, type='list'), + volumes_from = dict(default=None), + links = dict(default=None, type='list'), + memory_limit = dict(default=0), + memory_swap = dict(default=0), + docker_url = dict(), + use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), + tls_client_cert = dict(required=False, default=None, type='str'), + tls_client_key = dict(required=False, default=None, type='str'), + tls_ca_cert = dict(required=False, default=None, type='str'), + tls_hostname = dict(required=False, type='str', default=None), + docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), + docker_user = dict(default=None), + username = dict(default=None), + password = dict(), + email = dict(), + registry = dict(), + hostname = dict(default=None), + domainname = dict(default=None), + env = dict(type='dict'), + dns = dict(), + detach = dict(default=True, type='bool'), + state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), + signal = dict(default=None), + restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), + restart_policy_retry = dict(default=0, type='int'), + extra_hosts = dict(type='dict'), + debug = dict(default=False, type='bool'), + privileged = dict(default=False, type='bool'), + stdin_open = dict(default=False, type='bool'), + tty = dict(default=False, type='bool'), + lxc_conf = dict(default=None, type='list'), + name = dict(default=None), + net = dict(default=None), + pid = dict(default=None), + insecure_registry = dict(default=False, type='bool'), + log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']), + log_opt = dict(default=None, type='dict'), + cpu_set = dict(default=None), + cap_add = dict(default=None, type='list'), + cap_drop = dict(default=None, type='list'), + read_only = dict(default=None, type='bool'), + stop_timeout = dict(default=10, type='int'), + ), + required_together = ( + ['tls_client_cert', 'tls_client_key'], + ), + ) + + check_dependencies(module) + + try: + manager = DockerManager(module) + count = int(module.params.get('count')) + name = module.params.get('name') + pull = module.params.get('pull') + + state = module.params.get('state') + if state == 'running': + # Renamed running to started in 1.9 + state = 'started' + + if count < 0: + module.fail_json(msg="Count must be greater than zero") + + if count > 1 and name: + module.fail_json(msg="Count and name must not be used together") + + # Explicitly pull new container images, if requested. Do this before + # noticing running and deployed containers so that the image names + # will differ if a newer image has been pulled. + # Missing images should be pulled first to avoid downtime when old + # container is stopped, but image for new one is now downloaded yet. + # It also prevents removal of running container before realizing + # that requested image cannot be retrieved. + if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): + manager.pull_image() + + containers = ContainerSet(manager) + + if state == 'present': + present(manager, containers, count, name) + elif state == 'started': + started(manager, containers, count, name) + elif state == 'reloaded': + reloaded(manager, containers, count, name) + elif state == 'restarted': + restarted(manager, containers, count, name) + elif state == 'stopped': + stopped(manager, containers, count, name) + elif state == 'killed': + killed(manager, containers, count, name) + elif state == 'absent': + absent(manager, containers, count, name) + else: + module.fail_json(msg='Unrecognized state %s. Must be one of: ' + 'present; started; reloaded; restarted; ' + 'stopped; killed; absent.' % state) + + module.exit_json(changed=manager.has_changed(), + msg=manager.get_summary_message(), + summary=manager.counters, + reload_reasons=manager.get_reload_reason_message(), + ansible_facts=_ansible_facts(containers.changed)) + + except DockerAPIError as e: + module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) + + except RequestException as e: + module.fail_json(changed=manager.has_changed(), msg=repr(e)) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py new file mode 100644 index 0000000000..6a256d0b24 --- /dev/null +++ b/ansible/library/minigraph_facts.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python +import calendar +import os +import socket +import struct +from collections import defaultdict + +import ngs_lib +import ngs_lib.exceptions +from lxml import etree as ET +from lxml.etree import QName + +DOCUMENTATION = ''' +--- +module: minigraph_facts +version_added: "1.9" +author: Guohan Lu (gulv@microsoft.com) +short_description: Retrive minigraph facts for a device. +description: + - Retrieve minigraph facts for a device, the facts will be + inserted to the ansible_facts key. +options: + host: + description: + - Set to target snmp server (normally {{inventory_hostname}}) + required: true +''' + +EXAMPLES = ''' +# Gather minigraph facts +- name: Gathering minigraph facts about the device + minigraph_facts: host={{ hostname }} +''' + +ns = "Microsoft.Search.Autopilot.Evolution" +ns1 = "http://schemas.datacontract.org/2004/07/Microsoft.Search.Autopilot.Evolution" +ns2 = "Microsoft.Search.Autopilot.NetMux" +ns3 = "http://www.w3.org/2001/XMLSchema-instance" + +ANSIBLE_USER_MINIGRAPH_PATH = os.path.expanduser('~/.ansible/minigraph') +ANSIBLE_LOCAL_MINIGRAPH_PATH = 'minigraph/{}.xml' +ANSIBLE_USER_MINIGRAPH_MAX_AGE = 86400 # 24-hours (in seconds) + + +def parse_png(png, hname): + neighbors = {} + devices = {} + console_dev = '' + console_port = '' + for child in png: + if child.tag == str(QName(ns, "DeviceInterfaceLinks")): + for link in child.findall(str(QName(ns, "DeviceLinkBase"))): + linktype = link.find(str(QName(ns, "ElementType"))).text + if linktype != "DeviceInterfaceLink" and linktype != "UnderlayInterfaceLink": + continue + + enddevice = link.find(str(QName(ns, "EndDevice"))).text + endport = link.find(str(QName(ns, "EndPort"))).text + startdevice = link.find(str(QName(ns, "StartDevice"))).text + startport = link.find(str(QName(ns, "StartPort"))).text + + if enddevice == hname: + neighbors[endport] = {'name': startdevice, 'port': startport} + else: + neighbors[startport] = {'name': enddevice, 'port': endport} + if child.tag == str(QName(ns, "Devices")): + for device in child.findall(str(QName(ns, "Device"))): + lo_addr = None + # don't shadow type() + d_type = None + mgmt_addr = None + hwsku = None + if str(QName(ns3, "type")) in device.attrib: + d_type = device.attrib[str(QName(ns3, "type"))] + + for node in device: + if node.tag == str(QName(ns, "Address")): + lo_addr = node.find(str(QName(ns2, "IPPrefix"))).text.split('/')[0] + elif node.tag == str(QName(ns, "ManagementAddress")): + mgmt_addr = node.find(str(QName(ns2, "IPPrefix"))).text.split('/')[0] + elif node.tag == str(QName(ns, "Hostname")): + name = node.text + elif node.tag == str(QName(ns, "HwSku")): + hwsku = node.text + + devices[name] = {'lo_addr': lo_addr, 'type': d_type, 'mgmt_addr': mgmt_addr, 'hwsku': hwsku} + + if child.tag == str(QName(ns, "DeviceInterfaceLinks")): + for if_link in child.findall(str(QName(ns, 'DeviceLinkBase'))): + if str(QName(ns3, "type")) in if_link.attrib: + link_type = if_link.attrib[str(QName(ns3, "type"))] + if link_type == 'DeviceSerialLink': + for node in if_link: + if node.tag == str(QName(ns, "EndPort")): + console_port = node.text.split()[-1] + elif node.tag == str(QName(ns, "EndDevice")): + console_dev = node.text + + return (neighbors, devices, console_dev, console_port) + + +def parse_dpg(dpg, hname): + for child in dpg: + hostname = child.find(str(QName(ns, "Hostname"))) + if hostname.text != hname: + continue + + ipintfs = child.find(str(QName(ns, "IPInterfaces"))) + intfs = [] + vlan_map = {} + for ipintf in ipintfs.findall(str(QName(ns, "IPInterface"))): + intfname = ipintf.find(str(QName(ns, "AttachTo"))).text + ipprefix = ipintf.find(str(QName(ns, "Prefix"))).text + (ipaddr, prefix_len) = ipprefix.split('/') + mask_bytes = struct.pack("I", socket.htonl((0xffffffff << (32 - int(prefix_len))) & 0xffffffff)) + ipmask = socket.inet_ntoa(mask_bytes) + + ipaddr_bytes = socket.inet_aton(ipaddr) + ipaddr_val = struct.unpack("I", ipaddr_bytes) + ipmask_val = struct.unpack("I", mask_bytes) + subnet_bytes = struct.pack("I", ipaddr_val[0] & ipmask_val[0]) + subnet = socket.inet_ntoa(subnet_bytes) + "/" + str(prefix_len) + if intfname[0:4] == "Vlan": + vlan_map.update({intfname: {'addr': ipaddr, 'mask': ipmask, 'subnet': subnet}}) + else: + intf = {'name': intfname, 'addr': ipaddr, 'mask': ipmask, 'prefixlen': int(prefix_len), + 'subnet': subnet} + if port_alias_map.has_key(intfname): + intf['alias'] = port_alias_map[intfname] + else: + intf['alias'] = intfname + peer_addr_bytes = None + if int(prefix_len) == 30: + if socket.ntohl(ipaddr_val[0]) & 0x3 == 1: + peer_addr_bytes = struct.pack("I", socket.htonl(socket.ntohl(ipaddr_val[0]) + 1)) + else: + peer_addr_bytes = struct.pack("I", socket.htonl(socket.ntohl(ipaddr_val[0]) - 1)) + if int(prefix_len) == 31: + if socket.ntohl(ipaddr_val[0]) & 0x1 == 0: + peer_addr_bytes = struct.pack("I", socket.htonl(socket.ntohl(ipaddr_val[0]) + 1)) + else: + peer_addr_bytes = struct.pack("I", socket.htonl(socket.ntohl(ipaddr_val[0]) - 1)) + if peer_addr_bytes: + intf['peer_addr'] = socket.inet_ntoa(peer_addr_bytes) + intfs.append(intf) + + pcintfs = child.find(str(QName(ns, "PortChannelInterfaces"))) + pc_intfs = [] + for pcintf in pcintfs.findall(str(QName(ns, "PortChannel"))): + pcintfname = pcintf.find(str(QName(ns, "Name"))).text + pcintfmbr = pcintf.find(str(QName(ns, "AttachTo"))).text + mbr_list = pcintfmbr.split(';', 1) + pc_intfs.append({'name': pcintfname.lower(), 'members': mbr_list}) + + lointfs = child.find(str(QName(ns, "LoopbackIPInterfaces"))) + for lointf in lointfs.findall(str(QName(ns1, "LoopbackIPInterface"))): + ipprefix = lointf.find(str(QName(ns1, "PrefixStr"))).text + (ipaddr, prefix_len) = ipprefix.split('/') + lo_intf = {'addr': ipaddr, 'prefixlen': prefix_len} + + mgmtintfs = child.find(str(QName(ns, "ManagementIPInterfaces"))) + for mgmtintf in mgmtintfs.findall(str(QName(ns1, "ManagementIPInterface"))): + ipprefix = mgmtintf.find(str(QName(ns1, "PrefixStr"))).text + (ipaddr, prefix_len) = ipprefix.split('/') + mask_bytes = struct.pack("I", socket.htonl((0xffffffff << (32 - int(prefix_len))) & 0xffffffff)) + ipmask = socket.inet_ntoa(mask_bytes) + ipaddr_bytes = socket.inet_aton(ipaddr) + ipaddr_val = struct.unpack("I", ipaddr_bytes) + ipmask_val = struct.unpack("I", mask_bytes) + gwaddr_bytes = struct.pack("I", socket.htonl(socket.ntohl(ipaddr_val[0] & ipmask_val[0]) + 1)) + gwaddr = socket.inet_ntoa(gwaddr_bytes) + mgmt_intf = {'addr': ipaddr, 'prefixlen': prefix_len, 'mask': ipmask, 'gwaddr': gwaddr} + + vlanintfs = child.find(str(QName(ns, "VlanInterfaces"))) + vlan_intfs = [] + for vintf in vlanintfs.findall(str(QName(ns, "VlanInterface"))): + vintfname = vintf.find(str(QName(ns, "Name"))).text + vlanid = vintf.find(str(QName(ns, "VlanID"))).text + members = vintf.find(str(QName(ns, "AttachTo"))).text + members = " ".join(members.split(';')) + vlan_attributes = {'name': vintfname, 'members': members, 'vlanid': vlanid} + if vintfname in vlan_map: + vlan_attributes.update(vlan_map[vintfname]) + vlan_intfs.append(vlan_attributes) + + return intfs, lo_intf, mgmt_intf, vlan_intfs, pc_intfs + + +def parse_cpg(cpg, hname): + bgp_sessions = [] + myasn = None + for child in cpg: + tag = child.tag + if tag == str(QName(ns, "PeeringSessions")): + for session in child.findall(str(QName(ns, "BGPSession"))): + start_router = session.find(str(QName(ns, "StartRouter"))).text + start_peer = session.find(str(QName(ns, "StartPeer"))).text + end_router = session.find(str(QName(ns, "EndRouter"))).text + end_peer = session.find(str(QName(ns, "EndPeer"))).text + if end_router == hname: + bgp_sessions.append({'name': start_router, 'addr': start_peer, 'peer_addr': end_peer}) + else: + bgp_sessions.append({'name': end_router, 'addr': end_peer, 'peer_addr': start_peer}) + elif child.tag == str(QName(ns, "Routers")): + for router in child.findall(str(QName(ns1, "BGPRouterDeclaration"))): + asn = router.find(str(QName(ns1, "ASN"))).text + hostname = router.find(str(QName(ns1, "Hostname"))).text + if hostname == hname: + myasn = int(asn) + else: + for bgp_session in bgp_sessions: + if hostname == bgp_session['name']: + bgp_session['asn'] = int(asn) + + return bgp_sessions, myasn + + +def get_console_info(devices, dev, port): + for k, v in devices.items(): + if v['type'] == 'MiniTs': + break + else: + return {} + + ret_val = v + ret_val.update({ + 'ts_port': port, + 'ts_dev': dev + }) + return ret_val + + +def file_age(filename): + """ + :param filename: The filename to carbon date. + :return: The age of the file in seconds. + """ + return calendar.timegm(time.gmtime()) - os.path.getctime(filename) + + +def reconcile_mini_graph_locations(filename, hostname): + """ + Location precedence: + 1. "filename" module parameter + 2. minigraph/ folder + 3. .ansible/minigraph/ folder (<24 hrs old) + 4. Network Graph Service + + post-NGS download, cache to the user folder: + ~/.ansible/minigraph/HOSTNAME_minigraph.xml + + :param filename: the filename to load (may be None) + :param hostname: the hostname to load (required) + :return: tuple(the absolute filepath of the {cached,loaded} mini-graph, the root node of the loaded graph) + """ + if filename is not None: + # literal filename specified. read directly from the file. + root = ET.parse(filename).getroot() + mini_graph_path = filename + else: + # only the hostname was specified, determine the output path + mini_graph_path = os.path.join(ANSIBLE_USER_MINIGRAPH_PATH, hostname + '_minigraph.xml') + if os.path.exists(mini_graph_path) and file_age(mini_graph_path) < ANSIBLE_USER_MINIGRAPH_MAX_AGE: + # found a cached mini-graph, load it. + root = ET.parse(mini_graph_path).getroot() + else: + # No recent cached graph found -- initiate a connection to NGS and download the mini-graph. + mini_graph_string = ngs_lib.NGSConnection().ReadDeviceMiniGraph(hostname=hostname) + root = ET.fromstring(mini_graph_string) + # perform the additional work of writing the mini-graph to cache. + root.getroottree().write(mini_graph_path, pretty_print=True) + + return mini_graph_path, root + + +def parse_xml(filename, hostname): + mini_graph_path, root = reconcile_mini_graph_locations(filename, hostname) + + u_neighbors = None + u_devices = None + hwsku = None + bgp_sessions = None + bgp_asn = None + intfs = None + vlan_intfs = None + pc_intfs = None + mgmt_intf = None + lo_intf = None + neighbors = None + devices = None + + hwsku_qn = QName(ns, "HwSku") + for child in root: + if child.tag == str(hwsku_qn): + hwsku = child.text + + # port_alias_map maps ngs port name to sonic port name + if hwsku == "Force10-S6000": + for i in range(0, 128, 4): + port_alias_map["fortyGigE0/%d" % i] = "Ethernet%d" % i + + for child in root: + if child.tag == str(QName(ns, "DpgDec")): + (intfs, lo_intf, mgmt_intf, vlan_intfs, pc_intfs) = parse_dpg(child, hostname) + elif child.tag == str(QName(ns, "CpgDec")): + (bgp_sessions, bgp_asn) = parse_cpg(child, hostname) + elif child.tag == str(QName(ns, "PngDec")): + (neighbors, devices, console_dev, console_port) = parse_png(child, hostname) + elif child.tag == str(QName(ns, "UngDec")): + (u_neighbors, u_devices, _, _) = parse_png(child, hostname) + + Tree = lambda: defaultdict(Tree) + + results = Tree() + results['minigraph_hwsku'] = hwsku + # sorting by lambdas are not easily done without custom filters. + # TODO: add jinja2 filter to accept a lambda to sort a list of dictionaries by attribute. + # TODO: alternatively (preferred), implement class containers for multiple-attribute entries, enabling sort by attr + results['minigraph_bgp'] = sorted(bgp_sessions, key=lambda x: x['addr']) + results['minigraph_bgp_asn'] = bgp_asn + # TODO: sort does not work properly on all interfaces of varying lengths. Need to sort by integer group(s). + results['minigraph_interfaces'] = sorted(intfs, key=lambda x: x['name']) + results['minigraph_vlan_interfaces'] = vlan_intfs + results['minigraph_portchannel_interfaces'] = pc_intfs + results['minigraph_mgmt_interface'] = mgmt_intf + results['minigraph_lo_interface'] = lo_intf + results['minigraph_neighbors'] = neighbors + results['minigraph_devices'] = devices + results['minigraph_underlay_neighbors'] = u_neighbors + results['minigraph_underlay_devices'] = u_devices + # note - this may include files under acs/ansible/minigraph, or those under the default cache folder. + # (see ANSIBLE_USER_MINIGRAPH_PATH at the top of the module) + results['minigraph_as_xml'] = mini_graph_path + results['minigraph_console'] = get_console_info(devices, console_dev, console_port) + + return results + + +port_alias_map = {} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True), + filename=dict(), + ), + supports_check_mode=True + ) + + try: + # make the directory for caching the mini-graph. + os.mkdir(ANSIBLE_USER_MINIGRAPH_PATH) + except OSError: + if not os.path.isdir(ANSIBLE_USER_MINIGRAPH_PATH): + # file conflict, report the error and exit. + module.fail_json(msg="'{}' exists but is not a directory".format(ANSIBLE_USER_MINIGRAPH_PATH)) + + m_args = module.params + local_file_path = ANSIBLE_LOCAL_MINIGRAPH_PATH.format(m_args['host']) + if 'filename' in m_args and m_args['filename'] is not None: + # literal filename specified + filename = "minigraph/%s" % m_args['filename'] + elif os.path.exists(local_file_path): + # local project minigraph found for the hostname, use that file + filename = local_file_path + else: + # no file could be found + filename = None + + try: + results = parse_xml(filename, m_args['host']) + module.exit_json(ansible_facts=results) + except ngs_lib.exceptions.NgsConnectionError as e: + # all attempts to find a minigraph failed. + module.fail_json(msg=e.message) + + +def debug_main(): + hostname = 'CO4SCH03001BBLF' + filename = '../minigraph/' + hostname + '.xml' + results = parse_xml(filename, hostname) + import json + print(json.dumps(results, indent=3)) + + +from ansible.module_utils.basic import * + +if __name__ == "__main__": + main() + # debug_main() diff --git a/ansible/minigraph/switch1.xml b/ansible/minigraph/switch1.xml new file mode 100644 index 0000000000..b6daa4f034 --- /dev/null +++ b/ansible/minigraph/switch1.xml @@ -0,0 +1,1052 @@ + + + + + + ROUTER01T0 + 10.0.0.33 + switch1 + 10.0.0.32 + 1 + 180 + 60 + + + switch1 + 10.0.0.0 + ROUTER01T2 + 10.0.0.1 + 1 + 180 + 60 + + + ROUTER02T0 + 10.0.0.35 + switch1 + 10.0.0.34 + 1 + 180 + 60 + + + switch1 + 10.0.0.2 + ROUTER02T2 + 10.0.0.3 + 1 + 180 + 60 + + + ROUTER03T0 + 10.0.0.37 + switch1 + 10.0.0.36 + 1 + 180 + 60 + + + switch1 + 10.0.0.4 + ROUTER03T2 + 10.0.0.5 + 1 + 180 + 60 + + + ROUTER04T0 + 10.0.0.39 + switch1 + 10.0.0.38 + 1 + 180 + 60 + + + switch1 + 10.0.0.6 + ROUTER04T2 + 10.0.0.7 + 1 + 180 + 60 + + + ROUTER05T0 + 10.0.0.41 + switch1 + 10.0.0.40 + 1 + 180 + 60 + + + switch1 + 10.0.0.8 + ROUTER05T2 + 10.0.0.9 + 1 + 180 + 60 + + + ROUTER06T0 + 10.0.0.43 + switch1 + 10.0.0.42 + 1 + 180 + 60 + + + switch1 + 10.0.0.10 + ROUTER06T2 + 10.0.0.11 + 1 + 180 + 60 + + + ROUTER07T0 + 10.0.0.45 + switch1 + 10.0.0.44 + 1 + 180 + 60 + + + switch1 + 10.0.0.12 + ROUTER07T2 + 10.0.0.13 + 1 + 180 + 60 + + + ROUTER08T0 + 10.0.0.47 + switch1 + 10.0.0.46 + 1 + 180 + 60 + + + switch1 + 10.0.0.14 + ROUTER08T2 + 10.0.0.15 + 1 + 180 + 60 + + + ROUTER09T0 + 10.0.0.49 + switch1 + 10.0.0.48 + 1 + 180 + 60 + + + switch1 + 10.0.0.16 + ROUTER09T2 + 10.0.0.17 + 1 + 180 + 60 + + + ROUTER10T0 + 10.0.0.51 + switch1 + 10.0.0.50 + 1 + 180 + 60 + + + switch1 + 10.0.0.18 + ROUTER10T2 + 10.0.0.19 + 1 + 180 + 60 + + + ROUTER11T0 + 10.0.0.53 + switch1 + 10.0.0.52 + 1 + 180 + 60 + + + switch1 + 10.0.0.20 + ROUTER11T2 + 10.0.0.21 + 1 + 180 + 60 + + + ROUTER12T0 + 10.0.0.55 + switch1 + 10.0.0.54 + 1 + 180 + 60 + + + switch1 + 10.0.0.22 + ROUTER12T2 + 10.0.0.23 + 1 + 180 + 60 + + + ROUTER13T0 + 10.0.0.57 + switch1 + 10.0.0.56 + 1 + 180 + 60 + + + switch1 + 10.0.0.24 + ROUTER13T2 + 10.0.0.25 + 1 + 180 + 60 + + + ROUTER14T0 + 10.0.0.59 + switch1 + 10.0.0.58 + 1 + 180 + 60 + + + switch1 + 10.0.0.26 + ROUTER14T2 + 10.0.0.27 + 1 + 180 + 60 + + + ROUTER15T0 + 10.0.0.61 + switch1 + 10.0.0.60 + 1 + 180 + 60 + + + switch1 + 10.0.0.28 + ROUTER15T2 + 10.0.0.29 + 1 + 180 + 60 + + + ROUTER16T0 + 10.0.0.63 + switch1 + 10.0.0.62 + 1 + 180 + 60 + + + switch1 + 10.0.0.30 + ROUTER16T2 + 10.0.0.31 + 1 + 180 + 60 + + + + + 65100 + switch1 + + +
10.0.0.33
+ + +
+ +
10.0.0.1
+ + +
+ +
10.0.0.35
+ + +
+ +
10.0.0.3
+ + +
+ +
10.0.0.37
+ + +
+ +
10.0.0.5
+ + +
+ +
10.0.0.39
+ + +
+ +
10.0.0.7
+ + +
+ +
10.0.0.41
+ + +
+ +
10.0.0.9
+ + +
+ +
10.0.0.43
+ + +
+ +
10.0.0.11
+ + +
+ +
10.0.0.45
+ + +
+ +
10.0.0.13
+ + +
+ +
10.0.0.47
+ + +
+ +
10.0.0.15
+ + +
+ +
10.0.0.49
+ + +
+ +
10.0.0.17
+ + +
+ +
10.0.0.51
+ + +
+ +
10.0.0.19
+ + +
+ +
10.0.0.53
+ + +
+ +
10.0.0.21
+ + +
+ +
10.0.0.55
+ + +
+ +
10.0.0.23
+ + +
+ +
10.0.0.57
+ + +
+ +
10.0.0.25
+ + +
+ +
10.0.0.59
+ + +
+ +
10.0.0.27
+ + +
+ +
10.0.0.61
+ + +
+ +
10.0.0.29
+ + +
+ +
10.0.0.63
+ + +
+ +
10.0.0.31
+ + +
+
+ +
+ + 64001 + ROUTER01T0 + + + + 65200 + ROUTER01T2 + + + + 64002 + ROUTER02T0 + + + + 65200 + ROUTER02T2 + + + + 64003 + ROUTER03T0 + + + + 65200 + ROUTER03T2 + + + + 64004 + ROUTER04T0 + + + + 65200 + ROUTER04T2 + + + + 64005 + ROUTER05T0 + + + + 65200 + ROUTER05T2 + + + + 64006 + ROUTER06T0 + + + + 65200 + ROUTER06T2 + + + + 64007 + ROUTER07T0 + + + + 65200 + ROUTER07T2 + + + + 64008 + ROUTER08T0 + + + + 65200 + ROUTER08T2 + + + + 64009 + ROUTER09T0 + + + + 65200 + ROUTER09T2 + + + + 64010 + ROUTER10T0 + + + + 65200 + ROUTER10T2 + + + + 64011 + ROUTER11T0 + + + + 65200 + ROUTER11T2 + + + + 64012 + ROUTER12T0 + + + + 65200 + ROUTER12T2 + + + + 64013 + ROUTER13T0 + + + + 65200 + ROUTER13T2 + + + + 64014 + ROUTER14T0 + + + + 65200 + ROUTER14T2 + + + + 64015 + ROUTER15T0 + + + + 65200 + ROUTER15T2 + + + + 64016 + ROUTER16T0 + + + + 65200 + ROUTER16T2 + + +
+
+ + + + + + HostIP + Loopback0 + + 10.1.0.32/32 + + 10.1.0.32/32 + + + + + HostIP + eth0 + + 10.3.147.195/23 + + 10.3.147.195/23 + + + + + + switch1 + + + + + + Ethernet0 + 10.0.0.0/31 + + + + Ethernet4 + 10.0.0.2/31 + + + + Ethernet8 + 10.0.0.4/31 + + + + Ethernet12 + 10.0.0.6/31 + + + + Ethernet16 + 10.0.0.8/31 + + + + Ethernet20 + 10.0.0.10/31 + + + + Ethernet24 + 10.0.0.12/31 + + + + Ethernet28 + 10.0.0.14/31 + + + + Ethernet32 + 10.0.0.16/31 + + + + Ethernet36 + 10.0.0.18/31 + + + + Ethernet40 + 10.0.0.20/31 + + + + Ethernet44 + 10.0.0.22/31 + + + + Ethernet48 + 10.0.0.24/31 + + + + Ethernet52 + 10.0.0.26/31 + + + + Ethernet56 + 10.0.0.28/31 + + + + Ethernet60 + 10.0.0.30/31 + + + + Ethernet64 + 10.0.0.32/31 + + + + Ethernet68 + 10.0.0.34/31 + + + + Ethernet72 + 10.0.0.36/31 + + + + Ethernet76 + 10.0.0.38/31 + + + + Ethernet80 + 10.0.0.40/31 + + + + Ethernet84 + 10.0.0.42/31 + + + + Ethernet88 + 10.0.0.44/31 + + + + Ethernet92 + 10.0.0.46/31 + + + + Ethernet96 + 10.0.0.48/31 + + + + Ethernet100 + 10.0.0.50/31 + + + + Ethernet104 + 10.0.0.52/31 + + + + Ethernet108 + 10.0.0.54/31 + + + + Ethernet112 + 10.0.0.56/31 + + + + Ethernet116 + 10.0.0.58/31 + + + + Ethernet120 + 10.0.0.60/31 + + + + Ethernet124 + 10.0.0.62/31 + + + + + + + + + + + + DeviceInterfaceLink + switch1 + Ethernet0 + ROUTER01T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet4 + ROUTER02T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet8 + ROUTER03T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet12 + ROUTER04T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet16 + ROUTER05T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet20 + ROUTER06T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet24 + ROUTER07T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet28 + ROUTER08T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet32 + ROUTER09T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet36 + ROUTER10T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet40 + ROUTER11T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet44 + ROUTER12T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet48 + ROUTER13T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet52 + ROUTER14T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet56 + ROUTER15T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet60 + ROUTER16T2 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet64 + ROUTER01T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet68 + ROUTER02T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet72 + ROUTER03T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet76 + ROUTER04T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet80 + ROUTER05T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet84 + ROUTER06T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet88 + ROUTER07T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet92 + ROUTER08T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet96 + ROUTER09T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet100 + ROUTER10T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet104 + ROUTER11T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet108 + ROUTER12T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet112 + ROUTER13T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet116 + ROUTER14T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet120 + ROUTER15T0 + Ethernet1 + + + DeviceInterfaceLink + switch1 + Ethernet124 + ROUTER16T0 + Ethernet1 + + + + switch1 + ACS-S6000 +
+ diff --git a/ansible/roles/sonic-common/files/cron.logrotate b/ansible/roles/sonic-common/files/cron.logrotate new file mode 100644 index 0000000000..055b4d5fbc --- /dev/null +++ b/ansible/roles/sonic-common/files/cron.logrotate @@ -0,0 +1,5 @@ +#!/bin/sh + +test -x /usr/sbin/logrotate || exit 0 +/usr/sbin/logrotate /etc/logrotate.conf + diff --git a/ansible/roles/sonic-common/files/environment b/ansible/roles/sonic-common/files/environment new file mode 100644 index 0000000000..6c85276371 --- /dev/null +++ b/ansible/roles/sonic-common/files/environment @@ -0,0 +1 @@ +VTYSH_PAGER=more diff --git a/ansible/roles/sonic-common/files/motd b/ansible/roles/sonic-common/files/motd new file mode 100644 index 0000000000..47e32505aa --- /dev/null +++ b/ansible/roles/sonic-common/files/motd @@ -0,0 +1,12 @@ +You are on + ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + +-- Software for Open Networking In the Cloud -- + +Unauthorized access and/or use are prohibited. +All access and/or use are subject to monitoring. + diff --git a/ansible/roles/sonic-common/files/rsyslog.d/00-acs.conf b/ansible/roles/sonic-common/files/rsyslog.d/00-acs.conf new file mode 100644 index 0000000000..ec8ce919f4 --- /dev/null +++ b/ansible/roles/sonic-common/files/rsyslog.d/00-acs.conf @@ -0,0 +1,46 @@ +## Quagga rules + +if $programname == "zebra" then { + /var/log/quagga/zebra.log + stop +} + +if $programname == "bgpd" then { + /var/log/quagga/bgpd.log + stop +} + +if $programname == "quagga" then { + /var/log/quagga/zebra.log + stop +} + +if $programname == "watchquagga" then { + /var/log/quagga/zebra.log + stop +} + +## Platform modules rules +if $programname == "platform-modules" then { + /var/log/syslog + stop +} + +## Sensord rules +if $programname == "sensord" then { + /var/log/syslog + stop +} + +## Sswsyncd rules +if $programname == "sswsyncd" then { + /var/log/syslog + stop +} + +## Ansible rules +if $programname startswith "ansible" then { + /var/log/messages + stop +} + diff --git a/ansible/roles/sonic-common/files/rsyslog.d/99-default.conf b/ansible/roles/sonic-common/files/rsyslog.d/99-default.conf new file mode 100644 index 0000000000..a26ba7baf8 --- /dev/null +++ b/ansible/roles/sonic-common/files/rsyslog.d/99-default.conf @@ -0,0 +1,66 @@ +# +# First some standard log files. Log by facility. +# +auth,authpriv.* /var/log/auth.log +*.*;auth,authpriv.none -/var/log/syslog +cron.* /var/log/cron.log +daemon.* -/var/log/daemon.log +kern.* -/var/log/kern.log +kern.* -/var/persist/log/kern.log +lpr.* -/var/log/lpr.log +mail.* -/var/log/mail.log +user.* -/var/log/user.log + +# +# Logging for the mail system. Split it up so that +# it is easy to write scripts to parse these files. +# +mail.info -/var/log/mail.info +mail.warn -/var/log/mail.warn +mail.err /var/log/mail.err + +# +# Logging for INN news system. +# +news.crit /var/log/news/news.crit +news.err /var/log/news/news.err +news.notice -/var/log/news/news.notice + +# +# Some "catch-all" log files. +# +*.=debug;\ + auth,authpriv.none;\ + news.none;mail.none -/var/log/debug +*.=info;*.=notice;*.=warn;\ + auth,authpriv.none;\ + cron,daemon.none;\ + mail,news.none -/var/log/messages +*.=crit;*.=alert;*.=emerg -/var/persist/log/alarms +# +# Emergencies are sent to everybody logged in. +# +*.emerg :omusrmsg:* + +# +# I like to have messages displayed on the console, but only on a virtual +# console I usually leave idle. +# +#daemon,mail.*;\ +# news.=crit;news.=err;news.=notice;\ +# *.=debug;*.=info;\ +# *.=notice;*.=warn /dev/tty8 + +# The named pipe /dev/xconsole is for the `xconsole' utility. To use it, +# you must invoke `xconsole' with the `-file' option: +# +# $ xconsole -file /dev/xconsole [...] +# +# NOTE: adjust the list below, or you'll go crazy if you have a reasonably +# busy site.. +# +daemon.*;mail.*;\ + news.err;\ + *.=debug;*.=info;\ + *.=notice;*.=warn |/dev/xconsole + diff --git a/ansible/roles/sonic-common/files/rsyslog.logrotate b/ansible/roles/sonic-common/files/rsyslog.logrotate new file mode 100644 index 0000000000..a7c7114117 --- /dev/null +++ b/ansible/roles/sonic-common/files/rsyslog.logrotate @@ -0,0 +1,39 @@ +# Ansible managed +/var/log/syslog +{ + rotate 7 + daily + size 100M + missingok + notifempty + compress + delaycompress + postrotate + invoke-rc.d rsyslog rotate > /dev/null + endscript +} +/var/log/mail.info +/var/log/mail.warn +/var/log/mail.err +/var/log/mail.log +/var/log/daemon.log +/var/log/kern.log +/var/log/auth.log +/var/log/user.log +/var/log/lpr.log +/var/log/cron.log +/var/log/debug +/var/log/messages +{ + rotate 4 + weekly + size 100M + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + invoke-rc.d rsyslog rotate > /dev/null + endscript +} diff --git a/ansible/roles/sonic-common/files/ssw/ACS-MSN2700/etc/sensors.conf b/ansible/roles/sonic-common/files/ssw/ACS-MSN2700/etc/sensors.conf new file mode 100644 index 0000000000..a3e3039186 --- /dev/null +++ b/ansible/roles/sonic-common/files/ssw/ACS-MSN2700/etc/sensors.conf @@ -0,0 +1,21 @@ +bus "i2c-7" "i2c-1-mux (chan_id 5)" +chip "lm75-i2c-7-4a" + label temp1 "Ambient Port Temp" + +bus "i2c-5" "i2c-1-mux (chan_id 3)" +chip "ucd9200-i2c-5-27" + label in1 "UCD1 vin" + label in2 "ASIC 3.3 vout" + label in3 "ASIC 1.2 vout" + label temp1 "UCD1 Temp" + label temp2 "UCD1 Temp2" + +chip "ucd9200-i2c-5-41" + label in1 "UCD2 vin" + label in2 "ASIC Vcore vout" + label temp1 "UCD2 Temp1" + label temp2 "UCD2 Temp2" + +bus "i2c-17" "i2c-1-mux (chan_id 7)" +chip "lm75-i2c-17-49" + label temp1 "Ambient Board Temp" diff --git a/ansible/roles/sonic-common/files/ssw/ACS-S6000/etc/sensors.conf b/ansible/roles/sonic-common/files/ssw/ACS-S6000/etc/sensors.conf new file mode 100644 index 0000000000..2208bb6340 --- /dev/null +++ b/ansible/roles/sonic-common/files/ssw/ACS-S6000/etc/sensors.conf @@ -0,0 +1,57 @@ +# libsensors configuration file +# -------------------------------------------------- +# + +# tmp75-i2c-11-4c has sensors close to Networking ASIC. +# tmp75-i2c-11-4d has sensors close to NIC. +# tmp75-i2c-11-4e is an ambient temperature sensor. + +chip "tmp75-*" + set temp1_max 50 + set temp1_max_hyst 25 + +# emc1403-i2c-10-4d has following temperature sensors: +# temp1: CPU0 external Temp Sensor +# temp2: CPU1 external Temp Sensor +# temp3: CPU Internal DTS (Internal die, max of all die readings) + +chip "emc1403-*" + set temp1_crit 85 + set temp1_max 50 + set temp2_crit 85 + set temp2_max 50 + set temp3_crit 85 + set temp3_max 50 + +chip "max6620-i2c-*-2a" + ignore fan3 + ignore fan4 + +chip "w83627dhg-*" + label in0 "VCore 1" + label in1 "VCore 2" + set in0_min 0 + set in0_max 1.74 + set in1_min 0 + set in1_max 1.74 + ignore fan1 + ignore fan2 + ignore fan3 + ignore fan4 + ignore fan5 + ignore in4 + ignore in5 + ignore in6 + ignore temp1 + ignore temp2 + ignore temp3 + ignore cpu0_vid + ignore intrusion0 + +chip "jc42-*" + set temp1_max 50 + set temp1_crit 85 + +chip "dni_dps460-*" + set temp1_max 50 + set temp2_max 50 diff --git a/ansible/roles/sonic-common/handlers/main.yml b/ansible/roles/sonic-common/handlers/main.yml new file mode 100644 index 0000000000..92af17daca --- /dev/null +++ b/ansible/roles/sonic-common/handlers/main.yml @@ -0,0 +1,62 @@ +--- +# Handlers for acs + +- name: Restart SNMP Daemon + become: true + service: name=snmp + state=restarted + +- name: Restart NTP Daemon + become: true + service: name=ntp + state=restarted + +- name: Restart Syslog Daemon + become: true + service: name=rsyslog + state=restarted + +- name: Restart LLDP Daemon + become: true + service: name=lldp + state=restarted + +- name: Restart Quagga Daemon + become: true + service: name=bgp + state=restarted + +- name: Restart Platform Monitor Container Service + become: true + service: name=platform-monitor + state=restarted + +- name: Update Grub + become: true + shell: /usr/sbin/update-grub + +- name: Restart interface + become: true + shell: ifdown {{ restart_interface }} && ifup {{ restart_interface }}; rc=$?; sleep 3; exit $rc + +- name: Restart smartd + become: true + service: + name=smartd + state=restarted + +- name: Restart vasd + become: true + service: + name=vas + state=restarted + +- name: Restart anything-sync-daemon + become: true + service: + name=asd + state=restarted + +- name: Clean up apt + become: true + shell: apt-get autoremove -y; apt-get autoclean -y; apt-get clean -y diff --git a/ansible/roles/sonic-common/tasks/aptrepo.yml b/ansible/roles/sonic-common/tasks/aptrepo.yml new file mode 100644 index 0000000000..3c13449045 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/aptrepo.yml @@ -0,0 +1,20 @@ +# Apt Sources Setup +- name: Purge apt sources.list File. + become: true + copy: src=apt/sources.list + dest=/etc/apt/sources.list + +- name: Add apt-key for internal Apt Mirror + become: true + apt_key: data="{{ lookup('file', 'apt/sonic.gpg.key') }}" state=present + +# Adding deb mirror so switch can download packages +- name: Add internal Apt Package Mirror + apt_repository: repo='deb http://{{ apt_repo_ip }}/debian/ jessie main contrib non-free' + update_cache=yes + state=present + +# Force apt cache update if the repos are already present but cache is stale +- name: Update Cache if stale + become: true + apt: update_cache=yes diff --git a/ansible/roles/sonic-common/tasks/database.yml b/ansible/roles/sonic-common/tasks/database.yml new file mode 100644 index 0000000000..33aa10e237 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/database.yml @@ -0,0 +1,20 @@ +- name: clean up old container + include: sonicdocker.yml + vars: + docker_container: docker-database + docker_image: "{{ image_id_database }}" + docker_state: absent + + +- name: Start the database docker container (redis-server) + include: sonicdocker.yml + vars: + docker_container: database + docker_image: "{{ image_id_database }}" + docker_state: reloaded + docker_log_driver: syslog + # Prevent color control char to stdout + docker_tty: no +# docker_net: bridge +# docker_host_address: "{{ remote_facts.ansible_facts.ansible_docker0.ipv4.address }}" + diff --git a/ansible/roles/sonic-common/tasks/logrotate.yml b/ansible/roles/sonic-common/tasks/logrotate.yml new file mode 100644 index 0000000000..a2f8d8e578 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/logrotate.yml @@ -0,0 +1,19 @@ +# Setup Logrotate +- name: Install Logrotate package + tags: logrotate + become: true + apt: pkg=logrotate + state=latest + +- name: Setup rsyslog logrotate.d script + tags: logrotate + become: true + copy: src=rsyslog.logrotate + dest=/etc/logrotate.d/rsyslog + +- name: Install cron script to hourly schedule + tags: logrotate + become: true + copy: src=cron.logrotate + dest=/etc/cron.hourly/logrotate + diff --git a/ansible/roles/sonic-common/tasks/main.yml b/ansible/roles/sonic-common/tasks/main.yml new file mode 100644 index 0000000000..76d50d41a1 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/main.yml @@ -0,0 +1,187 @@ +# Gather minigraph facts +- name: Gathering minigraph facts about the device + minigraph_facts: host={{ inventory_hostname }} + connection: local + tags: always + become: no + when: use_minigraph|default(false)|bool + +- name: Set sonic_asic_type fact + set_fact: + sonic_asic_type: broadcom + when: sonic_hwsku in broadcom_hwskus + tags: always + +- name: Set sonic_asic_type fact + set_fact: + sonic_asic_type: mellanox + when: sonic_hwsku in mellanox_hwskus + tags: always + +# Setup apt repo +- include: aptrepo.yml + tags: repo + +# Syslog +- name: Install Syslog daemon + become: true + apt: pkg=rsyslog + state=latest + tags: syslog + +- name: Setup Syslog Daemon Config File + become: true + template: src=rsyslog.conf.j2 + dest=/etc/rsyslog.conf + notify: + - Restart Syslog Daemon + tags: syslog + +- name: Setup Syslog Config Directory + become: true + copy: src=rsyslog.d/ + dest=/etc/rsyslog.d/ + notify: + - Restart Syslog Daemon + tags: syslog + +- name: Ensure Syslog Daemon started and enabled + become: true + service: name=rsyslog + state=started + enabled=yes + tags: syslog + +# NTP +# This needs to be early in the playbook otherwise clock changes can cause problems +- name: Install NTP daemon and ntpstat + become: true + apt: pkg={{ item }} + state=latest + with_items: + - ntp + - ntpstat + tags: ntp + +- name: Setup NTP Daemon Config File + become: true + template: src=ntp.conf.j2 + dest=/etc/ntp.conf + notify: + - Restart NTP Daemon + tags: ntp + +# Update initramfs +- name: Update initramfs to prevent fsck error 2 at bootup + become: yes + shell: update-initramfs -u + when: bootstrap is defined + +# Hosts File +- name: Setup Hosts File + become: true + template: src=hosts.j2 + dest=/etc/hosts + tags: system + +# Assign hostname +- name: Assign hostname + hostname: name={{ inventory_hostname }} + tags: system + +# Setup environment file +- name: Copy /etc/environment + become: true + copy: src=environment + dest=/etc + owner=root + group=root + mode=0644 + tags: system + +# Setup motd file +- name: Copy Message of the Day + become: true + copy: directory_mode=0755 + src=etc/motd + dest=/etc/motd + owner=root + group=root + mode=0644 + tags: system + +- name: Check docker base verison is correct + become: true + apt: pkg=docker-engine={{ version_docker_engine }} + state=present + +# Ensure docker service started and enabled +# Note: keep it before all dockers +- name: Ensure docker service started and enabled + become: true + service: name=docker + state=started + enabled=yes + +## Redis database +- include: database.yml + tags: + - swss + - database + - unsafe + +# DHCP exit hooks hostname sync. +- name: DHCP Client Exit Script Sync. + become: true + template: src=dhclient-exit-hook-hostname + dest=/etc/dhcp/dhclient-exit-hooks.d/hostname + +# SSW +- name: Copy all SSW files. + become: true + copy: directory_mode=0755 + src=ssw/{{ sonic_hwsku }} + dest=/etc/ssw + owner=root + group=root + mode=0644 + tags: ssw + +# Setup Platform +- include: platform.yml + tags: platform,unsafe + +# Install Persistent Iptables Package +- name: Install iptables-persistent + become: true + apt: pkg=iptables-persistent + state=latest + tags: unsafe + +# setup sudoers +- include: sudoers.yml + +# Install Logrotate +- include: logrotate.yml + +- command: /bin/true + notify: Clean up apt + +### Final Actions below this line + +- meta: flush_handlers + +- name: Reboot if required + shell: sleep 2 && shutdown -r now "Ansible updates triggered reboot." + async: 1 + poll: 0 + become: true + ignore_errors: true + when: reboot_required is defined + tags: unsafe + +- name: After rebooting, wait for switch to come back + local_action: wait_for host={{ inventory_hostname }} port=22 state=started delay=30 timeout=300 + become: false + when: reboot_required is defined + tags: unsafe diff --git a/ansible/roles/sonic-common/tasks/platform-dell.yml b/ansible/roles/sonic-common/tasks/platform-dell.yml new file mode 100644 index 0000000000..9850c42921 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/platform-dell.yml @@ -0,0 +1,8 @@ +# Ensure Platform Modules for S6000 are installed +- name: Confirm Platform Modules are installed + become: true + apt: pkg={{ platform_modules_s6000.name }}={{ platform_modules_s6000.version }} + state=present + default_release=trusty + force=yes + register: platform_module_install diff --git a/ansible/roles/sonic-common/tasks/platform-mlnx.yml b/ansible/roles/sonic-common/tasks/platform-mlnx.yml new file mode 100644 index 0000000000..bf0ab98042 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/platform-mlnx.yml @@ -0,0 +1,28 @@ +- name: Ensure pciutils is installed + apt: pkg=pciutils + state=present + force=yes + +- name: Ensure MLNX Firmware tools are installed + apt: pkg=mft + state=present + default_release=trusty + force=yes + +- name: Ensure MLNX Firmware kernel modules are installed + apt: pkg=kernel-mft-modules + state=present + default_release=trusty + force=yes + +- name: Ensure Firmware service started and Enabled + become: true + service: name=mst + state=started + enabled=yes + +- name: Ensure MLNX Platform modules are installed + apt: pkg=hw-management + state=present + default_release=trusty + force=yes diff --git a/ansible/roles/sonic-common/tasks/platform-monitor.yml b/ansible/roles/sonic-common/tasks/platform-monitor.yml new file mode 100644 index 0000000000..99a40c083d --- /dev/null +++ b/ansible/roles/sonic-common/tasks/platform-monitor.yml @@ -0,0 +1,42 @@ +# Ensure S.M.A.R.T. monitoring tools are not installed on host +- name: Ensure S.M.A.R.T. monitoring tools are not installed on host + become: true + apt: pkg=smartmontools + state=absent + +- name: Start the platform monitor docker container + include: sonicdocker.yml + vars: + docker_container: platform-monitor + docker_image: "{{ image_id_platform_monitor }}" + docker_privileged: yes + +- block: + # Setup sensord Conf Files + - name: Copy sensord Conf File + copy: src=ssw/{{ sonic_hwsku }}/etc/sensors.conf + dest=/etc/sensors.d/sensors.conf + owner=root + group=root + mode=0644 + when: sonic_hwsku in sensor_hwskus + notify: + - Restart Platform Monitor Container Service + + # Install S.M.A.R.T. monitoring tools + - include: smartd.yml + tags: smart + + # Force handler flush to trigger daemon restarts + - meta: flush_handlers + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i platform-monitor python + +- name: Copy sensors helper script + become: true + copy: src=bin/sensors + dest=/usr/bin/sensors + owner=root + group=root + mode=0755 diff --git a/ansible/roles/sonic-common/tasks/platform.yml b/ansible/roles/sonic-common/tasks/platform.yml new file mode 100644 index 0000000000..e28df102dc --- /dev/null +++ b/ansible/roles/sonic-common/tasks/platform.yml @@ -0,0 +1,32 @@ +# Install Platform Modules for Dell +- name: Confirm Platform Modules for Dell are installed + become: true + include: platform-dell.yml + when: sonic_hwsku == "Force10-S6000" or sonic_hwsku == "Force10-S6000-Q24S32" or sonic_hwsku == "ACS-S6000" or sonic_hwsku == "ACS-S6000-Q24S32" + +# Install Platform Modules for Mellanox +- name: Confirm Platform Modules for Mellanox are installed + become: true + include: platform-mlnx.yml + when: sonic_hwsku == "ACS-MSN2700" + +# Start container with platform monitor services +- name: Start container with platform monitor services + include: platform-monitor.yml + +# Setup General Conf Files +- name: Copy /etc Files + become: true + copy: directory_mode=0755 + src=etc + dest=/etc + owner=root + group=root + mode=0644 + +# Install acs utilities +- name: Install acs utilities + become: true + apt: pkg=acs-utilities + state=latest + diff --git a/ansible/roles/sonic-common/tasks/sonicdocker.yml b/ansible/roles/sonic-common/tasks/sonicdocker.yml new file mode 100644 index 0000000000..d7299318b7 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/sonicdocker.yml @@ -0,0 +1,179 @@ +## +## Encapsulate docker module with private docker registry, manage the container service +## by systemd on host, so it has full featured depdency control and restart policy +## +## The encapsulated module - sonicdocker +## docker_state: emulate the behavior of docker module +## ref: http://docs.ansible.com/ansible/docker_module.html +## +## +-----------+------+--------------+----------+---------------------+----------------+-------------+ +## | sonicdocker | pull | stop service | docker | post service | enable service | clean image | +## +-----------+------+--------------+----------+---------------------+----------------+-------------+ +## | present | | | present | | y | | +## | started | | | present | started | y | | +## | reloaded | y | pulled? | reloaded | restarted if pulled | y | if pulled | +## | restarted | | | present | restarted | y | | +## | stopped | | y | stopped | | n | | +## | killed | | y | killed | | n | | +## | absent | | y | absent | | n | y | +## +-------------+------+--------------+----------+---------------------+----------------+-------------+ +## + +## Set default values for the module variables, emulating local variable definition +## Note: must be consistent with tail part +- name: "{{docker_container}} - Set docker variable - docker_net" + set_fact: + docker_net: host + when: docker_net is undefined +- name: "{{docker_container}} - Set docker variable - docker_state" + set_fact: + docker_state: reloaded + when: docker_state is undefined +- name: "{{docker_container}} - Set docker variable - docker_volumes" + set_fact: + docker_volumes: [] + when: docker_volumes is undefined +- name: "{{docker_container}} - Set docker variable - docker_privileged" + set_fact: + docker_privileged: no + when: docker_privileged is undefined +- name: "{{docker_container}} - Set docker variable - docker_log_driver" + set_fact: + docker_log_driver: json-file + when: docker_log_driver is undefined +- name: "{{docker_container}} - Set docker variable - docker_env" + set_fact: + docker_env: {} + when: docker_env is undefined +- name: "{{docker_container}} - Set docker variable - docker_tty" + set_fact: + docker_tty: yes + when: docker_tty is undefined +- name: "{{docker_container}} - Set docker variable - docker_log_opt" + set_fact: + docker_log_opt: {} + when: docker_log_driver != "syslog" +- name: "{{docker_container}} - Set docker variable - docker_log_opt" + set_fact: + docker_log_opt: + ## TRICK! TRICK! TRICK! + ## in ansible 2.0.0.2, reference set_fact varialbe will introduce recursive templating + ## so double escape by {{'...'}} and {%raw%}...{%endraw%} + tag: "{{'{%raw%}{{.ID}}({{.Name}}{%endraw%}'}})" + when: docker_log_driver == "syslog" + +## Local variables +- name: "{{docker_container}} - Set docker variable - sonicdocker_container_state" + set_fact: + sonicdocker_container_state: "{{docker_state}}" +- name: "{{docker_container}} - Set docker variable - sonicdocker_container_state" + set_fact: + sonicdocker_container_state: present + when: docker_state in ['present', 'started', 'restarted'] + +## Copy systemd config files for docker container +- name: "{{docker_container}} - Copy systemd config files for docker container" + become: true + template: + src="etc/systemd/system/{{docker_container}}.j2" + dest="/etc/systemd/system/{{docker_container}}.service" + owner=root + group=root + mode=0644 + register: configfile_result + when: "docker_state not in ['absent']" + +- name: "{{docker_container}} - Reload systemd" + command: systemctl daemon-reload + when: configfile_result.changed + +- block: + ## Clean up images before pulling + - name: "{{docker_container}} - Clean up images before pulling" + include: ../../sonic-common/tasks/sonicdocker_clean.yml + + ## Pull docker image from registry + - name: "{{docker_container}} - Pull docker image from registry" + shell: docker login -u {{docker_registry_username}} -p {{docker_registry_password}} -e "@" {{docker_registry_host}}; docker pull {{docker_registry_host}}/{{docker_image}} + register: pull_result + changed_when: "'Status: Downloaded newer image' in pull_result.stdout" + when: docker_state == 'reloaded' + +## Stop container service after pulled +- name: "{{docker_container}} - Stop container service after pulled" + become: true + service: name="{{docker_container}}" + state=stopped + when: "(docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout) \ + or docker_state in ['stopped', 'killed']" + +## Clean up systemd config files for docker container +- name: "{{docker_container}} - Delete systemd config file for docker container" + become: true + file: + path="/etc/systemd/system/{{docker_container}}.service" + state=absent + when: "docker_state in ['absent']" + register: configfile_remove + +- name: "{{docker_container}} - Reload systemd" + command: systemctl daemon-reload + when: configfile_remove.changed + + +- name: "{{docker_container}} - Control docker container" + docker: + name: "{{docker_container}}" + image: "{{docker_registry_host}}/{{docker_image}}" + state: "{{sonicdocker_container_state}}" + ## Already pulled by upper task + pull: missing + detach: yes + net: "{{docker_net}}" + tty: "{{docker_tty}}" + stdin_open: yes + registry: "https://{{docker_registry_host}}" + username: "{{docker_registry_username}}" + password: "{{docker_registry_password}}" + email: "@" + volumes: "{{docker_volumes}}" + privileged: "{{docker_privileged}}" + env: "{{docker_env}}" + log_driver: "{{docker_log_driver}}" + log_opt: "{{docker_log_opt}}" + +## Container service operation +- name: "{{docker_container}} - Post docker - start container service" + become: true + service: name="{{docker_container}}" + state=started + when: docker_state == 'started' +- name: "{{docker_container}} - Post docker - restart container service" + become: true + service: name="{{docker_container}}" + state=restarted + when: "docker_state == 'restarted' or \ + docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout" +- name: "{{docker_container}} - Post docker - enable container service" + become: true + service: name="{{docker_container}}" + enabled={{docker_state in ['present', 'started', 'reloaded', 'restarted']}} + +## Clean up images after pulled and running +- name: "{{docker_container}} - Clean up images after pulled and running" + include: ../../sonic-common/tasks/sonicdocker_clean.yml + when: "(docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout) or \ + docker_state == 'absent'" + +## Reset the module variables to default values to prevent global side-effect +## Note: must be consistent with header part +- name: "{{docker_container}} - Clean up sonicdocker variables" + set_fact: + docker_image: '' + docker_net: host + docker_state: reloaded + docker_volumes: [] + docker_privileged: no + docker_log_driver: json-file + docker_env: {} + docker_tty: yes diff --git a/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml b/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml new file mode 100644 index 0000000000..05632a38b2 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml @@ -0,0 +1,5 @@ +- name: Clean up unused docker images + script: "files/docker_clean.sh" + register: rmi_result + changed_when: rmi_result.stdout != "" + failed_when: rmi_result.stderr != "" or rmi_result.rc != 0 diff --git a/ansible/roles/sonic-common/tasks/sudoers.yml b/ansible/roles/sonic-common/tasks/sudoers.yml new file mode 100644 index 0000000000..57f12d3ba4 --- /dev/null +++ b/ansible/roles/sonic-common/tasks/sudoers.yml @@ -0,0 +1,12 @@ +# Setup Sudoers file +- name: Copy sudoers file to /etc/sudoers.tmp (and kept here for later change checks) + action: template src=../templates/sudoers.j2 dest=/etc/sudoers.tmp mode=440 owner=root group=root + register: sudoers_copy_result + become: yes + tags: sudoers + +- name: Install /etc/sudoers file (and fail if invalid) + action: shell visudo -q -c -f /etc/sudoers.tmp && cp /etc/sudoers.tmp /etc/sudoers + when: sudoers_copy_result.changed + become: yes + tags: sudoers diff --git a/ansible/roles/sonic-common/templates/dhclient-exit-hook-hostname b/ansible/roles/sonic-common/templates/dhclient-exit-hook-hostname new file mode 100644 index 0000000000..b8e924e4ce --- /dev/null +++ b/ansible/roles/sonic-common/templates/dhclient-exit-hook-hostname @@ -0,0 +1,6 @@ +case $reason in + BOUND|RENEW|REBIND|REBOOT) + current_hostname=`hostname` + echo $current_hostname > /etc/hostname + ;; +esac diff --git a/ansible/roles/sonic-common/templates/hosts.j2 b/ansible/roles/sonic-common/templates/hosts.j2 new file mode 100644 index 0000000000..174f42fd0f --- /dev/null +++ b/ansible/roles/sonic-common/templates/hosts.j2 @@ -0,0 +1,8 @@ +127.0.0.1 localhost +127.0.0.1 acs +127.0.0.1 {{ inventory_hostname }} + +# The following lines are desirable for IPv6 capable hosts +::1 localhost ip6-localhost ip6-loopback +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters diff --git a/ansible/roles/sonic-common/templates/ntp.conf.j2 b/ansible/roles/sonic-common/templates/ntp.conf.j2 new file mode 100644 index 0000000000..3ae0c1804e --- /dev/null +++ b/ansible/roles/sonic-common/templates/ntp.conf.j2 @@ -0,0 +1,63 @@ +############################################################################### +# Managed by Ansible +# file: ansible/roles/acs/templates/ntp.conf.j2 +############################################################################### + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + + +# You do need to talk to an NTP server or two (or three). +#server ntp.your-provider.example + +# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will +# pick a different set every time it starts up. Please consider joining the +# pool: +{% for ntp_server in ntp_servers %} +server {{ ntp_server }} iburst +{% endfor %} + +#only listen on localhost and eth0 ips (default is to listen on all ip addresses) +interface ignore wildcard +interface listen {{ minigraph_mgmt_interface.addr }} +interface listen 127.0.0.1 + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery +restrict -6 default kod notrap nomodify nopeer noquery + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient diff --git a/ansible/roles/sonic-common/templates/rsyslog.conf.j2 b/ansible/roles/sonic-common/templates/rsyslog.conf.j2 new file mode 100644 index 0000000000..e00565f056 --- /dev/null +++ b/ansible/roles/sonic-common/templates/rsyslog.conf.j2 @@ -0,0 +1,71 @@ +############################################################################### +# Managed by Ansible +# file: ansible/roles/acs/templates/rsyslog.conf.j2 +############################################################################### +# +# /etc/rsyslog.conf Configuration file for rsyslog. +# +# For more information see +# /usr/share/doc/rsyslog-doc/html/rsyslog_conf.html + + +################# +#### MODULES #### +################# + +$ModLoad imuxsock # provides support for local system logging +$ModLoad imklog # provides kernel logging support +#$ModLoad immark # provides --MARK-- message capability + +# provides UDP syslog reception +$ModLoad imudp +$UDPServerAddress 127.0.0.1 # bind to localhost before udp server run +$UDPServerRun 514 + +# provides TCP syslog reception +#$ModLoad imtcp +#$InputTCPServerRun 514 + + +########################### +#### GLOBAL DIRECTIVES #### +########################### +#Set remote syslog server +{% for server in syslog_servers %} +*.* @{{ server }}:514 +{% endfor %} + +# +# Use traditional timestamp format. +# To enable high precision timestamps, comment out the following line. +# +#$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat + +# Define a custom template +$template ACSFileFormat,"%TIMESTAMP% %HOSTNAME% %syslogseverity-text:::uppercase% %syslogtag%%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n" +$ActionFileDefaultTemplate ACSFileFormat + +# +# Set the default permissions for all log files. +# +$FileOwner root +$FileGroup adm +$FileCreateMode 0640 +$DirCreateMode 0755 +$Umask 0022 + +# +# Where to place spool and state files +# +$WorkDirectory /var/spool/rsyslog + +# +# Include all config files in /etc/rsyslog.d/ +# +$IncludeConfig /etc/rsyslog.d/*.conf + + +############### +#### RULES #### +############### + diff --git a/ansible/roles/sonic-common/templates/sudoers.j2 b/ansible/roles/sonic-common/templates/sudoers.j2 new file mode 100644 index 0000000000..57248ee8c1 --- /dev/null +++ b/ansible/roles/sonic-common/templates/sudoers.j2 @@ -0,0 +1,33 @@ +# +# This file MUST be edited with the 'visudo' command as root. +# +# Please consider adding local content in /etc/sudoers.d/ instead of +# directly modifying this file. +# +# See the man page for details on how to write a sudoers file. +# +Defaults env_reset +#Defaults mail_badpass +Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +Defaults env_keep += "VTYSH_PAGER" + +# Host alias specification + +# User alias specification + +# Cmnd alias specification +# Note: bcmcmd is dangerous for users in read only netgroups because it may operate ASIC +Cmnd_Alias READ_ONLY_CMDS = /usr/bin/portstat,\ + /usr/bin/vtysh -c "show.*" + +# User privilege specification +root ALL=(ALL:ALL) ALL + +# Allow members of group sudo to execute any command +%sudo ALL=(ALL:ALL) NOPASSWD: ALL + + +# See sudoers(5) for more information on "#include" directives: + +#includedir /etc/sudoers.d + diff --git a/ansible/roles/sonicv2/files/ssw/Force10-S6000/port_config.ini b/ansible/roles/sonicv2/files/ssw/Force10-S6000/port_config.ini deleted file mode 100644 index 7161416e4d..0000000000 --- a/ansible/roles/sonicv2/files/ssw/Force10-S6000/port_config.ini +++ /dev/null @@ -1,33 +0,0 @@ -# alias lanes -Ethernet0 29,30,31,32 -Ethernet4 25,26,27,28 -Ethernet8 37,38,39,40 -Ethernet12 33,34,35,36 -Ethernet16 41,42,43,44 -Ethernet20 45,46,47,48 -Ethernet24 5,6,7,8 -Ethernet28 1,2,3,4 -Ethernet32 9,10,11,12 -Ethernet36 13,14,15,16 -Ethernet40 21,22,23,24 -Ethernet44 17,18,19,20 -Ethernet48 49,50,51,52 -Ethernet52 53,54,55,56 -Ethernet56 61,62,63,64 -Ethernet60 57,58,59,60 -Ethernet64 65,66,67,68 -Ethernet68 69,70,71,72 -Ethernet72 77,78,79,80 -Ethernet76 73,74,75,76 -Ethernet80 105,106,107,108 -Ethernet84 109,110,111,112 -Ethernet88 117,118,119,120 -Ethernet92 113,114,115,116 -Ethernet96 121,122,123,124 -Ethernet100 125,126,127,128 -Ethernet104 85,86,87,88 -Ethernet108 81,82,83,84 -Ethernet112 89,90,91,92 -Ethernet116 93,94,95,96 -Ethernet120 97,98,99,100 -Ethernet124 101,102,103,104 diff --git a/ansible/roles/sonicv2/files/ssw/knet.soc b/ansible/roles/sonicv2/files/ssw/knet.soc deleted file mode 100644 index 60bbfbea61..0000000000 --- a/ansible/roles/sonicv2/files/ssw/knet.soc +++ /dev/null @@ -1,65 +0,0 @@ -knet netif create port=xe0 rcpu=no ifname=et0_0 -knet filter create desttype=netif destid=1 ingport=xe0 desc="et0_0" -knet netif create port=xe1 rcpu=no ifname=et0_4 -knet filter create desttype=netif destid=2 ingport=xe1 desc="et0_0" -knet netif create port=xe2 rcpu=no ifname=et0_8 -knet filter create desttype=netif destid=3 ingport=xe2 desc="et0_8" -knet netif create port=xe3 rcpu=no ifname=et0_12 -knet filter create desttype=netif destid=4 ingport=xe3 desc="et0_12" -knet netif create port=xe4 rcpu=no ifname=et0_16 -knet filter create desttype=netif destid=5 ingport=xe4 desc="et0_16" -knet netif create port=xe5 rcpu=no ifname=et0_20 -knet filter create desttype=netif destid=6 ingport=xe5 desc="et0_20" -knet netif create port=xe6 rcpu=no ifname=et0_24 -knet filter create desttype=netif destid=7 ingport=xe6 desc="et0_24" -knet netif create port=xe7 rcpu=no ifname=et0_28 -knet filter create desttype=netif destid=8 ingport=xe7 desc="et0_28" -knet netif create port=xe8 rcpu=no ifname=et0_32 -knet filter create desttype=netif destid=9 ingport=xe8 desc="et0_32" -knet netif create port=xe9 rcpu=no ifname=et0_36 -knet filter create desttype=netif destid=10 ingport=xe9 desc="et0_36" -knet netif create port=xe10 rcpu=no ifname=et0_40 -knet filter create desttype=netif destid=11 ingport=xe10 desc="et0_40" -knet netif create port=xe11 rcpu=no ifname=et0_44 -knet filter create desttype=netif destid=12 ingport=xe11 desc="et0_44" -knet netif create port=xe12 rcpu=no ifname=et0_48 -knet filter create desttype=netif destid=13 ingport=xe12 desc="et0_48" -knet netif create port=xe13 rcpu=no ifname=et0_52 -knet filter create desttype=netif destid=14 ingport=xe13 desc="et0_52" -knet netif create port=xe14 rcpu=no ifname=et0_56 -knet filter create desttype=netif destid=15 ingport=xe14 desc="et0_56" -knet netif create port=xe15 rcpu=no ifname=et0_60 -knet filter create desttype=netif destid=16 ingport=xe15 desc="et0_60" -knet netif create port=xe16 rcpu=no ifname=et0_64 -knet filter create desttype=netif destid=17 ingport=xe16 desc="et0_64" -knet netif create port=xe17 rcpu=no ifname=et0_68 -knet filter create desttype=netif destid=18 ingport=xe17 desc="et0_68" -knet netif create port=xe18 rcpu=no ifname=et0_72 -knet filter create desttype=netif destid=19 ingport=xe18 desc="et0_72" -knet netif create port=xe19 rcpu=no ifname=et0_76 -knet filter create desttype=netif destid=20 ingport=xe19 desc="et0_76" -knet netif create port=xe20 rcpu=no ifname=et0_80 -knet filter create desttype=netif destid=21 ingport=xe20 desc="et0_80" -knet netif create port=xe21 rcpu=no ifname=et0_84 -knet filter create desttype=netif destid=22 ingport=xe21 desc="et0_84" -knet netif create port=xe22 rcpu=no ifname=et0_88 -knet filter create desttype=netif destid=23 ingport=xe22 desc="et0_88" -knet netif create port=xe23 rcpu=no ifname=et0_92 -knet filter create desttype=netif destid=24 ingport=xe23 desc="et0_92" -knet netif create port=xe24 rcpu=no ifname=et0_96 -knet filter create desttype=netif destid=25 ingport=xe24 desc="et0_96" -knet netif create port=xe25 rcpu=no ifname=et0_100 -knet filter create desttype=netif destid=26 ingport=xe25 desc="et0_100" -knet netif create port=xe26 rcpu=no ifname=et0_104 -knet filter create desttype=netif destid=27 ingport=xe26 desc="et0_104" -knet netif create port=xe27 rcpu=no ifname=et0_108 -knet filter create desttype=netif destid=28 ingport=xe27 desc="et0_108" -knet netif create port=xe28 rcpu=no ifname=et0_112 -knet filter create desttype=netif destid=29 ingport=xe28 desc="et0_112" -knet netif create port=xe29 rcpu=no ifname=et0_116 -knet filter create desttype=netif destid=30 ingport=xe29 desc="et0_116" -knet netif create port=xe30 rcpu=no ifname=et0_120 -knet filter create desttype=netif destid=31 ingport=xe30 desc="et0_120" -knet netif create port=xe31 rcpu=no ifname=et0_124 -knet filter create desttype=netif destid=32 ingport=xe31 desc="et0_124" -pw start diff --git a/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 b/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 index d5c1846c72..0d8042991b 100644 --- a/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 +++ b/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 @@ -5,12 +5,12 @@ After=database.service [Service] User=root -{% if minigraph_hwsku == 'ACS-MSN2700' %} +{% if sonic_hwsku == 'ACS-MSN2700' %} ExecStopPost=/etc/init.d/sxdkernel start {% endif %} ExecStart=/usr/bin/docker start -a syncd ExecStop=/usr/bin/docker stop syncd -{% if minigraph_hwsku == 'ACS-MSN2700' %} +{% if sonic_hwsku == 'ACS-MSN2700' %} ExecStopPost=/etc/init.d/sxdkernel stop {% endif %} Restart=always diff --git a/ansible/shell_plugins/docker.py b/ansible/shell_plugins/docker.py new file mode 100644 index 0000000000..f3e5443978 --- /dev/null +++ b/ansible/shell_plugins/docker.py @@ -0,0 +1,94 @@ +from __future__ import (absolute_import, division) +__metaclass__ = type + +import os +import re +import pipes +import ansible.constants as C +import time +import random +import shlex +import getopt +from ansible.compat.six import text_type +from ansible.plugins.shell.sh import ShellModule as sh +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound + +class ShellModule(sh): + + def __init__(self, *args, **kwargs): + super(ShellModule, self).__init__(*args, **kwargs) + self.dtemps = [] + + def join_path(self, *args): + ## HACK! HACK! HACK! + ## We observe the interactions between ShellModule and ActionModule, and + ## find the temporary directories Ansible created on remote machine. So we + ## collect them and copied to docker container in build_module_command + if len(args) >= 2 and args[0].startswith('/home/') and args[1] == '': + self.dtemps.append(args[0]) + + return super(ShellModule, self).join_path(*args) + + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): + # assert(self.container_name) + argv = shlex.split(shebang.replace("#!", "")) + assert(argv[0] == 'docker') + assert(argv[1] == 'exec') + opts, args = getopt.getopt(argv[2:], 'i') + self.container_name = args[0] + + # Inject environment variable before python in the shebang string + assert(args[1].endswith('python')) + args[1] = 'env {0} {1}'.format(env_string, args[1]) + argv_env = argv[0:2] + [o for opt in opts for o in opt] + args + shebang_env = ' '.join(argv_env) + + ## Note: Docker cp behavior + ## DEST_PATH exists and is a directory + ## SRC_PATH does end with /. + ## the content of the source directory is copied into this directory + ## Ref: https://docs.docker.com/engine/reference/commandline/cp/ + pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; ' + .format(dtemp, self.container_name) for dtemp in self.dtemps) + + if rm_tmp: + post = ''.join('docker exec {1} rm -rf {0}; ' + .format(dtemp, self.container_name) for dtemp in self.dtemps) + else: + post = '' + + return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post + + def checksum(self, path, python_interp): + """ + Return the command to calculate the checksum for the file in ansible controlled machine + Arguments: + path: + the file path + python_interp: + the path for the python interpreter + Example: + path: + /zebra.conf + python_interp: + docker exec -i debian python + cmd: + rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x"$rc" != "xflag" ] && echo "${rc} "/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf) + returns: + docker exec -i debian sh -c "rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\"\$rc\" != \"xflag\" ] && echo \"\${rc} \"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)" + """ + ## Super class implements this function by sh commands and python scripts + ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python + ## script part in super class. Instead we should influence both + simple_interp = 'python' + assert(python_interp.startswith('docker exec ')) + assert(python_interp.endswith(' ' + simple_interp)) + + docker_prefix = re.sub(simple_interp, '', python_interp) + cmd = super(ShellModule, self).checksum(path, simple_interp) + ## Escape the cmd: + ## " --> \" + cmd_escaped = cmd.replace('"', '\\"') + ## $ --> \$ + cmd_escaped = cmd_escaped.replace('$', '\\$') + return '%s sh -c "%s"' % (docker_prefix, cmd_escaped) diff --git a/ansible/vars/docker_registry.yml b/ansible/vars/docker_registry.yml new file mode 100644 index 0000000000..644bad6efb --- /dev/null +++ b/ansible/vars/docker_registry.yml @@ -0,0 +1,5 @@ +docker_registry_host: yourhost.domain.com:5000 + +docker_registry_username: username +docker_registry_password: password +