From d91506c57abc618d4da77380e5501e9edf5c9185 Mon Sep 17 00:00:00 2001 From: wintamute Date: Sun, 18 Mar 2018 17:44:12 +0100 Subject: [PATCH 1/3] added low-level api classes for images and containers added high-level model classes for images and containers --- aiodocker/__init__.py | 6 +- aiodocker/api/__init__.py | 1 + aiodocker/api/client.py | 226 +++++++++ aiodocker/api/container.py | 628 ++++++++++++++++++++++++ aiodocker/api/image.py | 331 +++++++++++++ aiodocker/api/system.py | 20 + aiodocker/client.py | 45 ++ aiodocker/errors.py | 119 +++++ aiodocker/models/__init__.py | 0 aiodocker/models/containers.py | 452 +++++++++++++++++ aiodocker/models/images.py | 341 +++++++++++++ aiodocker/models/resource.py | 92 ++++ aiodocker/types/__init__.py | 4 + aiodocker/types/base.py | 6 + aiodocker/types/containers.py | 513 +++++++++++++++++++ aiodocker/types/healthcheck.py | 86 ++++ aiodocker/types/networks.py | 33 ++ aiodocker/utils/__init__.py | 4 + aiodocker/utils/utils.py | 554 +++++++++++++++++++++ docs/_build/doctrees/environment.pickle | Bin 1291553 -> 0 bytes docs/_build/doctrees/index.doctree | Bin 14389 -> 15778 bytes docs/_build/html/_sources/index.rst.txt | 23 +- docs/_build/html/_static/basic.css | 32 +- docs/_build/html/_static/doctools.js | 56 ++- docs/_build/html/_static/jquery.js | 8 +- docs/_build/html/_static/searchtools.js | 5 +- docs/_build/html/_static/websupport.js | 4 +- docs/_build/html/genindex.html | 434 ++++++++++++++-- docs/_build/html/index.html | 75 ++- docs/_build/html/search.html | 58 +-- docs/api.rst | 38 ++ docs/exceptions.rst | 16 + docs/index.rst | 1 + examples/info_new_api.py | 41 ++ tests/new-api/conftest.py | 161 ++++++ tests/new-api/test_containers_api.py | 88 ++++ tests/new-api/test_images_api.py | 158 ++++++ tests/new-api/test_system_api.py | 8 + tests/test_system.py | 2 +- 39 files changed, 4513 insertions(+), 156 deletions(-) create mode 100644 aiodocker/api/__init__.py create mode 100644 aiodocker/api/client.py create mode 100644 aiodocker/api/container.py create mode 100644 aiodocker/api/image.py create mode 100644 aiodocker/api/system.py create mode 100644 aiodocker/client.py create mode 100644 aiodocker/errors.py create mode 100644 aiodocker/models/__init__.py create mode 100644 aiodocker/models/containers.py create mode 100644 aiodocker/models/images.py create mode 100644 aiodocker/models/resource.py create mode 100644 aiodocker/types/__init__.py create mode 100644 aiodocker/types/base.py create mode 100644 aiodocker/types/containers.py create mode 100644 aiodocker/types/healthcheck.py create mode 100644 aiodocker/types/networks.py create mode 100644 aiodocker/utils/__init__.py create mode 100644 aiodocker/utils/utils.py delete mode 100644 docs/_build/doctrees/environment.pickle create mode 100644 docs/api.rst create mode 100755 examples/info_new_api.py create mode 100644 tests/new-api/conftest.py create mode 100644 tests/new-api/test_containers_api.py create mode 100644 tests/new-api/test_images_api.py create mode 100644 tests/new-api/test_system_api.py diff --git a/aiodocker/__init__.py b/aiodocker/__init__.py index 1de30c93..8bb46e52 100644 --- a/aiodocker/__init__.py +++ b/aiodocker/__init__.py @@ -1,7 +1,11 @@ from .docker import Docker +from .api.client import APIClient +from .client import DockerClient __version__ = '0.11.0a0' -__all__ = ("Docker", ) +__all__ = ("Docker", + "APIClient", + "DockerClient") diff --git a/aiodocker/api/__init__.py b/aiodocker/api/__init__.py new file mode 100644 index 00000000..7260e953 --- /dev/null +++ b/aiodocker/api/__init__.py @@ -0,0 +1 @@ +from .client import APIClient diff --git a/aiodocker/api/client.py b/aiodocker/api/client.py new file mode 100644 index 00000000..072b302d --- /dev/null +++ b/aiodocker/api/client.py @@ -0,0 +1,226 @@ +import asyncio +import json +import logging +import os +from pathlib import Path +import re +import ssl + +import aiohttp +from yarl import URL + +from ..utils import utils + +# Sub-API classes +from .container import DockerContainerAPI +from ..events import DockerEvents +from ..errors import create_api_error_from_response +from .image import DockerImageAPI +from .system import DockerSystemAPI +# from .logs import DockerLog +# from .swarm import DockerSwarm +# from .services import DockerServices +# from .tasks import DockerTasks +# from .volumes import DockerVolumes, DockerVolume +# from .nodes import DockerSwarmNodes +# from .system import DockerSystem + +# __all__ = ( +# 'Docker', +# 'DockerContainers', 'DockerContainer', +# 'DockerEvents', +# 'DockerError', +# 'DockerImages', +# 'DockerLog', +# 'DockerSwarm', +# 'DockerServices', +# 'DockerTasks', +# 'DockerVolumes', 'DockerVolume', +# 'DockerSwarmNodes', +# 'DockerSystem' +# ) + +log = logging.getLogger(__name__) + +_sock_search_paths = [ + Path('/run/docker.sock'), + Path('/var/run/docker.sock'), +] + +_rx_version = re.compile(r'^v\d+\.\d+$') +_rx_tcp_schemes = re.compile(r'^(tcp|http)://') + + +class APIClient: + def __init__(self, + url=None, + connector=None, + session=None, + ssl_context=None, + api_version='v1.30'): + + docker_host = url # rename + if docker_host is None: + docker_host = os.environ.get('DOCKER_HOST', None) + if docker_host is None: + for sockpath in _sock_search_paths: + if sockpath.is_socket(): + docker_host = 'unix://' + str(sockpath) + break + self.docker_host = docker_host + + assert _rx_version.search(api_version) is not None, \ + 'Invalid API version format' + self.api_version = api_version + + if docker_host is None: + raise ValueError( + "Missing valid docker_host." + "Either DOCKER_HOST or local sockets are not available." + ) + + if connector is None: + if _rx_tcp_schemes.search(docker_host): + if os.environ.get('DOCKER_TLS_VERIFY', '0') == '1': + ssl_context = self._docker_machine_ssl_context() + docker_host = _rx_tcp_schemes.sub('https://', docker_host) + else: + ssl_context = None + connector = aiohttp.TCPConnector(ssl_context=ssl_context) + self.docker_host = docker_host + elif docker_host.startswith('unix://'): + connector = aiohttp.UnixConnector(docker_host[7:]) + # dummy hostname for URL composition + self.docker_host = "unix://localhost" + else: + raise ValueError('Missing protocol scheme in docker_host.') + self.connector = connector + if session is None: + session = aiohttp.ClientSession(connector=self.connector) + self.session = session + + self.events = DockerEvents(self) + self._container = DockerContainerAPI(self) + self._image = DockerImageAPI(self) + # self.swarm = DockerSwarm(self) + # self.services = DockerServices(self) + # self.tasks = DockerTasks(self) + # self.volumes = DockerVolumes(self) + # self.nodes = DockerSwarmNodes(self) + self._system = DockerSystemAPI(self) + + async def close(self): + await self.events.stop() + await self.session.close() + + async def auth(self, **credentials): + response = await self._query_json( + "auth", "POST", + data=credentials, + ) + return response + + async def version(self): + data = await self._query_json("version") + return data + + def _canonicalize_url(self, path): + return URL("{self.docker_host}/{self.api_version}/{path}" + .format(self=self, path=path)) + + async def _query(self, path, method='GET', *, + params=None, data=None, headers=None, + timeout=None): + ''' + Get the response object by performing the HTTP request. + The caller is responsible to finalize the response object. + ''' + url = self._canonicalize_url(path) + if headers and 'content-type' not in headers: + headers['content-type'] = 'application/json' + try: + response = await self.session.request( + method, url, + params=utils.httpize(params), + headers=headers, + data=data, + timeout=timeout) + except asyncio.TimeoutError: + raise + if (response.status // 100) in [4, 5]: + await create_api_error_from_response(response) + return response + + async def _query_json(self, path, method='GET', *, + params=None, data=None, headers=None, + timeout=None): + """ + A shorthand of _query() that treats the input as JSON. + """ + if headers is None: + headers = {} + headers['content-type'] = 'application/json' + if not isinstance(data, (str, bytes)): + data = json.dumps(data) + response = await self._query( + path, method, + params=params, data=data, headers=headers, + timeout=timeout) + data = await utils.parse_result(response) + return data + + async def _websocket(self, path, **params): + if not params: + params = { + 'stdin': True, + 'stdout': True, + 'stderr': True, + 'stream': True + } + url = self._canonicalize_url(path) + # ws_connect() does not have params arg. + url = url.with_query(utils.httpize(params)) + ws = await self.session.ws_connect( + url, + protocols=['chat'], + origin='http://localhost', + autoping=True, + autoclose=True) + return ws + + @staticmethod + def _docker_machine_ssl_context(): + """ + Create a SSLContext object using DOCKER_* env vars. + """ + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS) + certs_path = os.environ.get('DOCKER_CERT_PATH', None) + if certs_path is None: + raise ValueError("Cannot create ssl context, " + "DOCKER_CERT_PATH is not set!") + certs_path = Path(certs_path) + context.load_verify_locations(cafile=certs_path / 'ca.pem') + context.load_cert_chain(certfile=certs_path / 'cert.pem', + keyfile=certs_path / 'key.pem') + return context + + @property + def container(self): + """ + An object for managing containers on the server. See the + :ref:`low-level containers documentation ` for full details. + """ + return self._container + + @property + def image(self): + """ + An object for managing images on the server. See the + :ref:`low-level images documentation ` for full details. + """ + return self._image + + @property + def system(self): + return self._system diff --git a/aiodocker/api/container.py b/aiodocker/api/container.py new file mode 100644 index 00000000..26cdd97d --- /dev/null +++ b/aiodocker/api/container.py @@ -0,0 +1,628 @@ +import json +import tarfile +from typing import Any, List, Mapping + +from ..exceptions import DockerError, DockerContainerError +from ..jsonstream import json_stream_result +from ..multiplexed import multiplexed_result +from ..utils import identical, parse_result, clean_filters +from ..types import ContainerConfig, HostConfig, NetworkingConfig, EndpointConfig + +from ..logs import DockerLog + + +class DockerContainerAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def list(self, all=False, limit=-1, size=False, filters: Mapping = None) -> List[Mapping]: + """ + List containers. Similar to the ``docker ps`` command. + + Args: + all (bool): Show all containers. Only running containers are shown + by default + limit (int): Show `limit` last created containers, include + non-running ones + size (bool): Display sizes + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str): format either ``"key"`` or ``"key=value"`` + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + Returns: + A list of dicts, one per container + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'limit': limit, + 'all': all, + 'size': size + } + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "containers/json", + method='GET', + params=params + ) + return response + + async def create(self, image, command=None, hostname=None, user=None, + detach=False, stdin_open=False, tty=False, ports=None, + environment=None, volumes=None, + network_disabled=False, name=None, entrypoint=None, + working_dir=None, domainname=None, host_config=None, + mac_address=None, labels=None, stop_signal=None, + networking_config=None, healthcheck=None, + stop_timeout=None, runtime=None): + """ + Creates a container. Parameters are similar to those for the ``docker + run`` command except it doesn't support the attach options (``-a``). + + The arguments that are passed directly to this function are + host-independent configuration options. Host-specific configuration + is passed with the `host_config` argument. You'll normally want to + use this method in combination with the :py:meth:`create_host_config` + method to generate ``host_config``. + + **Port bindings** + + Port binding is done in two parts: first, provide a list of ports to + open inside the container with the ``ports`` parameter, then declare + bindings with the ``host_config`` parameter. For example: + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', ports=[1111, 2222], + host_config=client.container.create_host_config(port_bindings={ + 1111: 4567, + 2222: None + }) + ) + + + You can limit the host address on which the port will be exposed like + such: + + .. code-block:: python + + client.container.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)}) + + Or without host port assignment: + + .. code-block:: python + + client.container.create_host_config(port_bindings={1111: ('127.0.0.1',)}) + + If you wish to use UDP instead of TCP (default), you need to declare + ports as such in both the config and host config: + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', ports=[(1111, 'udp'), 2222], + host_config = client.container.create_host_config(port_bindings={ + '1111/udp': 4567, 2222: None + }) + ) + + To bind multiple host ports to a single container port, use the + following syntax: + + .. code-block:: python + + client.container.create_host_config(port_bindings={ + 1111: [1234, 4567] + }) + + You can also bind multiple IPs to a single container port: + + .. code-block:: python + + client.container.create_host_config(port_bindings={ + 1111: [ + ('192.168.0.100', 1234), + ('192.168.0.101', 1234) + ] + }) + + **Using volumes** + + Volume declaration is done in two parts. Provide a list of + paths to use as mountpoints inside the container with the + ``volumes`` parameter, and declare mappings from paths on the host + in the ``host_config`` section. + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], + host_config=cli.create_host_config(binds={ + '/home/user1/': { + 'bind': '/mnt/vol2', + 'mode': 'rw', + }, + '/var/www': { + 'bind': '/mnt/vol1', + 'mode': 'ro', + } + }) + ) + + You can alternatively specify binds as a list. This code is equivalent + to the example above: + + .. code-block:: python + + container_id = api.container.create( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], + host_config=cli.create_host_config(binds=[ + '/home/user1/:/mnt/vol2', + '/var/www:/mnt/vol1:ro', + ]) + ) + + **Networking** + + You can specify networks to connect the container to by using the + ``networking_config`` parameter. At the time of creation, you can + only connect a container to a single networking, but you + can create more connections by using + :py:meth:`~connect_container_to_network`. + + For example: + + .. code-block:: python + + networking_config = api.container.create_networking_config({ + 'network1': docker_client.create_endpoint_config( + ipv4_address='172.28.0.124', + aliases=['foo', 'bar'], + links=['container2'] + ) + }) + + ctnr = api.container.create( + img, command, networking_config=networking_config + ) + + Args: + image (str): The image to run + command (str or list): The command to be run in the container + hostname (str): Optional hostname for the container + user (str or int): Username or UID + detach (bool): Detached mode: run container in the background and + return container ID + stdin_open (bool): Keep STDIN open even if not attached + tty (bool): Allocate a pseudo-TTY + ports (list of ints): A list of port numbers + environment (dict or list): A dictionary or a list of strings in + the following format ``["PASSWORD=xxx"]`` or + ``{"PASSWORD": "xxx"}``. + volumes (str or list): List of paths inside the container to use + as volumes. + network_disabled (bool): Disable networking + name (str): A name for the container + entrypoint (str or list): An entrypoint + working_dir (str): Path to the working directory + domainname (str): The domain name to use for the container + host_config (dict): A dictionary created with + :py:meth:`create_host_config`. + mac_address (str): The Mac Address to assign the container + labels (dict or list): A dictionary of name-value labels (e.g. + ``{"label1": "value1", "label2": "value2"}``) or a list of + names of labels to set with empty values (e.g. + ``["label1", "label2"]``) + stop_signal (str): The stop signal to use to stop the container + (e.g. ``SIGINT``). + stop_timeout (int): Timeout to stop the container, in seconds. + Default: 10 + networking_config (dict): A networking configuration generated + by :py:meth:`create_networking_config`. + runtime (str): Runtime to use with this container. + healthcheck (dict): Specify a test to perform to check that the + container is healthy. + + Returns: + A dictionary with an image 'Id' key and a 'Warnings' key. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + if isinstance(volumes, str): + volumes = [volumes, ] + + config = self.create_container_config( + image, command, hostname, user, detach, stdin_open, tty, + ports, environment, volumes, + network_disabled, entrypoint, working_dir, domainname, + host_config, mac_address, labels, + stop_signal, networking_config, healthcheck, + stop_timeout, runtime + ) + return await self.create_from_config(config, name) + + def create_container_config(self, *args, **kwargs): + return ContainerConfig(self.api_client.version, *args, **kwargs) + + async def create_from_config(self, config, name=None) -> Mapping[str, Any]: + url = "containers/create" + + config = json.dumps(config, sort_keys=True).encode('utf-8') + kwargs = {} + if name: + kwargs['name'] = name + response = await self.api_client._query_json( + url, + method='POST', + data=config, + params=kwargs + ) + return response + + def create_host_config(self, *args, **kwargs): + """ + Create a dictionary for the ``host_config`` argument to + :py:meth:`create`. + + Args: + auto_remove (bool): enable auto-removal of the container on daemon + side when the container's process exits. + binds (dict): Volumes to bind. See :py:meth:`create_container` + for more information. + blkio_weight_device: Block IO weight (relative device weight) in + the form of: ``[{"Path": "device_path", "Weight": weight}]``. + blkio_weight: Block IO weight (relative weight), accepts a weight + value between 10 and 1000. + cap_add (list of str): Add kernel capabilities. For example, + ``["SYS_ADMIN", "MKNOD"]``. + cap_drop (list of str): Drop kernel capabilities. + cpu_period (int): The length of a CPU period in microseconds. + cpu_quota (int): Microseconds of CPU time that the container can + get in a CPU period. + cpu_shares (int): CPU shares (relative weight). + cpuset_cpus (str): CPUs in which to allow execution (``0-3``, + ``0,1``). + cpuset_mems (str): Memory nodes (MEMs) in which to allow execution + (``0-3``, ``0,1``). Only effective on NUMA systems. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. + device_read_bps: Limit read rate (bytes per second) from a device + in the form of: `[{"Path": "device_path", "Rate": rate}]` + device_read_iops: Limit read rate (IO per second) from a device. + device_write_bps: Limit write rate (bytes per second) from a + device. + device_write_iops: Limit write rate (IO per second) from a device. + devices (:py:class:`list`): Expose host devices to the container, + as a list of strings in the form + ``::``. + + For example, ``/dev/sda:/dev/xvda:rwm`` allows the container + to have read-write access to the host's ``/dev/sda`` via a + node named ``/dev/xvda`` inside the container. + dns (:py:class:`list`): Set custom DNS servers. + dns_opt (:py:class:`list`): Additional options to be added to the + container's ``resolv.conf`` file + dns_search (:py:class:`list`): DNS search domains. + extra_hosts (dict): Addtional hostnames to resolve inside the + container, as a mapping of hostname to IP address. + group_add (:py:class:`list`): List of additional group names and/or + IDs that the container process will run as. + init (bool): Run an init inside the container that forwards + signals and reaps processes + init_path (str): Path to the docker-init binary + ipc_mode (str): Set the IPC mode for the container. + isolation (str): Isolation technology to use. Default: `None`. + links (dict or list of tuples): Either a dictionary mapping name + to alias or as a list of ``(name, alias)`` tuples. + log_config (dict): Logging configuration, as a dictionary with + keys: + + - ``type`` The logging driver name. + - ``config`` A dictionary of configuration for the logging + driver. + + lxc_conf (dict): LXC config. + mem_limit (float or str): Memory limit. Accepts float values + (which represent the memory limit of the created container in + bytes) or a string with a units identification char + (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is + specified without a units character, bytes are assumed as an + mem_swappiness (int): Tune a container's memory swappiness + behavior. Accepts number between 0 and 100. + memswap_limit (str or int): Maximum amount of memory + swap a + container is allowed to consume. + mounts (:py:class:`list`): Specification for mounts to be added to + the container. More powerful alternative to ``binds``. Each + item in the list is expected to be a + :py:class:`docker.types.Mount` object. + network_mode (str): One of: + + - ``bridge`` Create a new network stack for the container on + on the bridge network. + - ``none`` No networking for this container. + - ``container:`` Reuse another container's network + stack. + - ``host`` Use the host network stack. + oom_kill_disable (bool): Whether to disable OOM killer. + oom_score_adj (int): An integer value containing the score given + to the container in order to tune OOM killer preferences. + pid_mode (str): If set to ``host``, use the host PID namespace + inside the container. + pids_limit (int): Tune a container's pids limit. Set ``-1`` for + unlimited. + port_bindings (dict): See :py:meth:`create_container` + for more information. + privileged (bool): Give extended privileges to this container. + publish_all_ports (bool): Publish all ports to the host. + read_only (bool): Mount the container's root filesystem as read + only. + restart_policy (dict): Restart the container when it exits. + Configured as a dictionary with keys: + + - ``Name`` One of ``on-failure``, or ``always``. + - ``MaximumRetryCount`` Number of times to restart the + container on failure. + security_opt (:py:class:`list`): A list of string values to + customize labels for MLS systems, such as SELinux. + shm_size (str or int): Size of /dev/shm (e.g. ``1G``). + storage_opt (dict): Storage driver options per container as a + key-value mapping. + sysctls (dict): Kernel parameters to set in the container. + tmpfs (dict): Temporary filesystems to mount, as a dictionary + mapping a path inside the container to options for that path. + + For example: + + .. code-block:: python + + { + '/mnt/vol2': '', + '/mnt/vol1': 'size=3G,uid=1000' + } + + ulimits (:py:class:`list`): Ulimits to set inside the container, + as a list of dicts. + userns_mode (str): Sets the user namespace mode for the container + when user namespace remapping option is enabled. Supported + values are: ``host`` + volumes_from (:py:class:`list`): List of container names or IDs to + get volumes from. + runtime (str): Runtime to use with this container. + + + Returns: + (dict) A dictionary which can be passed to the ``host_config`` + argument to :py:meth:`create`. + + Example: + + .. code-block:: python + + api.container.create_host_config(privileged=True, cap_drop=['MKNOD'], + volumes_from=['nostalgic_newton']) + {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, + 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} + + """ + if not kwargs: + kwargs = {} + if 'version' in kwargs: + raise TypeError( + "create_host_config() got an unexpected " + "keyword argument 'version'" + ) + kwargs['version'] = self.api_client.version + return HostConfig(*args, **kwargs) + + def create_networking_config(self, *args, **kwargs): + """ + Create a networking config dictionary to be used as the + ``networking_config`` parameter in :py:meth:`create`. + + Args: + endpoints_config (dict): A dictionary mapping network names to + endpoint configurations generated by + :py:meth:`create_endpoint_config`. + + Returns: + (dict) A networking config. + + Example: + + .. code-block:: python + + api.container.create_network('network1') + networking_config = api.container.create_networking_config({ + 'network1': api.container.create_endpoint_config() + }) + container = api.container.create( + img, command, networking_config=networking_config + ) + + """ + return NetworkingConfig(*args, **kwargs) + + def create_endpoint_config(self, *args, **kwargs): + """ + Create an endpoint config dictionary to be used with + :py:meth:`create_networking_config`. + + Args: + aliases (:py:class:`list`): A list of aliases for this endpoint. + Names in that list can be used within the network to reach the + container. Defaults to ``None``. + links (:py:class:`list`): A list of links for this endpoint. + Containers declared in this list will be linked to this + container. Defaults to ``None``. + ipv4_address (str): The IP address of this container on the + network, using the IPv4 protocol. Defaults to ``None``. + ipv6_address (str): The IP address of this container on the + network, using the IPv6 protocol. Defaults to ``None``. + link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) + addresses. + + Returns: + (dict) An endpoint config. + + Example: + + .. code-block:: python + + endpoint_config = api.container.create_endpoint_config( + aliases=['web', 'app'], + links=['app_db'], + ipv4_address='132.65.0.123' + ) + + """ + return EndpointConfig(self.api_client.version, *args, **kwargs) + + async def remove(self, container, v=False, link=False, force=False): + """ + Remove a container. Similar to the ``docker rm`` command. + + Args: + container (str): The container to remove + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'v': v, + 'link': link, + 'force': force + } + + response = await self.api_client._query( + "containers/{id}".format(id=container), + method='DELETE', + params=params + ) + await response.release() + return + + async def inspect(self, container_id, size=False) -> Mapping[str, Any]: + params = { + 'size': size + } + response = await self.api_client._query_json( + "containers/{id}/json".format(id=container_id), + method='GET', + params=params + ) + return response + + async def logs(self, container_id, stdout=False, stderr=False, follow=False, **kwargs): + if stdout is False and stderr is False: + raise TypeError("Need one of stdout or stderr") + + params = { + "stdout": stdout, + "stderr": stderr, + "follow": follow, + } + params.update(kwargs) + + inspect_info = await self.inspect(container_id) + is_tty = inspect_info['Config']['Tty'] + + response = await self.api_client._query( + "containers/{id}/logs".format(id=container_id), + method='GET', + params=params, + ) + return await multiplexed_result(response, follow, is_tty=is_tty) + + async def prune(self, filters: Mapping = None) -> Mapping[str, Any]: + """ + Delete stopped containers + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted container IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = {} + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "containers/prune", + method='POST', + params=params + ) + return response + + async def restart(self, container_id, timeout=None): + params = {} + if timeout is not None: + params['t'] = timeout + response = await self.api_client._query( + "containers/{id}/restart".format(id=container_id), + method='POST', + params=params + ) + await response.release() + return + + async def start(self, container_id): + response = await self.api_client._query( + "containers/{}/start".format(container_id), + method='POST' + ) + await response.release() + return + + async def stop(self, container_id, **kwargs): + response = await self.api_client._query( + "containers/{id}/stop".format(id=container_id), + method='POST', + params=kwargs + ) + await response.release() + return + + async def wait(self, container_id, timeout=None, **kwargs): + data = await self.api_client._query_json( + "containers/{id}/wait".format(id=container_id), + method='POST', + params=kwargs, + timeout=timeout, + ) + return data diff --git a/aiodocker/api/image.py b/aiodocker/api/image.py new file mode 100644 index 00000000..b5152c1f --- /dev/null +++ b/aiodocker/api/image.py @@ -0,0 +1,331 @@ +import json +import tarfile +from typing import ( + Optional, Union, Any, + List, MutableMapping, Mapping, + BinaryIO, +) + +from ..jsonstream import json_stream_result +from ..multiplexed import multiplexed_result +from ..utils import identical, parse_result, clean_filters, compose_auth_header, clean_map + + +class DockerImageAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def build(self, *, + remote: str=None, + fileobj: BinaryIO=None, + path_dockerfile: str=None, + tag: str=None, + quiet: bool=False, + nocache: bool=False, + buildargs: Mapping=None, + pull: bool=False, + rm: bool=True, + forcerm: bool=False, + labels: Mapping=None, + stream: bool=False, + encoding: str=None) -> Mapping: + """ + Build an image given a remote Dockerfile + or a file object with a Dockerfile inside + + Args: + path_dockerfile: path within the build context to the Dockerfile + remote: a Git repository URI or HTTP/HTTPS context URI + quiet: suppress verbose build output + nocache: do not use the cache when building the image + rm: remove intermediate containers after a successful build + pull: downloads any updates to the FROM image in Dockerfiles + encoding: set `Content-Encoding` for the file object your send + forcerm: always remove intermediate containers, even upon failure + labels: arbitrary key/value labels to set on the image + fileobj: a tar archive compressed or not + tag (str): A tag to add to the final image + buildargs (dict): A dictionary of build arguments + stream: + """ + + local_context = None + + headers = {} + + params = { + 't': tag, + 'rm': rm, + 'q': quiet, + 'pull': pull, + 'remote': remote, + 'nocache': nocache, + 'forcerm': forcerm, + 'dockerfile': path_dockerfile, + } + + if remote is None and fileobj is None: + raise ValueError("You need to specify either remote or fileobj") + + if fileobj and remote: + raise ValueError("You cannot specify both fileobj and remote") + + if fileobj and not encoding: + raise ValueError("You need to specify an encoding") + + if remote is None and fileobj is None: + raise ValueError("Either remote or fileobj needs to be provided.") + + if fileobj: + local_context = fileobj.read() + headers["content-type"] = "application/x-tar" + + if fileobj and encoding: + headers['Content-Encoding'] = encoding + + if buildargs: + params.update({'buildargs': json.dumps(buildargs)}) + + if labels: + params.update({'labels': json.dumps(labels)}) + + response = await self.api_client._query( + "build", + "POST", + params=clean_map(params), + headers=headers, + data=local_context + ) + + return await json_stream_result(response, stream=stream) + + async def history(self, image: str) -> Mapping: + """ + Show the history of an image. + + Args: + image (str): The image to show history for + + Returns: + (str): The history of the image + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + response = await self.api_client._query_json( + "images/{name}/history".format(name=image), + ) + return response + + async def inspect(self, image: str) -> Mapping[str, Any]: + """ + Get detailed information about an image. Similar to the ``docker + image inspect`` command. + + Args: + image (str): The image to inspect + + Returns: + (dict): Similar to the output of ``docker image inspect``, but as a + single dict + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + response = await self.api_client._query_json( + "images/{image}/json".format(image=image), + ) + return response + + async def list(self, name=None, all=False, digests=False, filters: Mapping=None) -> List[Mapping]: + """ + List images. Similar to the ``docker images`` command. + + Args: + name (str): Only show images belonging to the repository ``name`` + all (bool): Show intermediate image layers. By default, these are + filtered out. + digests (bool): Show digest information as a RepoDigests field on each image. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - ``label`` (str): format either ``key`` or ``key=value`` + - before=([:], or ) + - reference=([:]) + - since=([:], or ) + + Returns: + A dictionary. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'all': all, + 'digests': digests + } + if name: + params['filter'] = name + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "images/json", "GET", + params=params, + ) + return response + + async def pull(self, name: str, *, + auth_config: Optional[Union[MutableMapping, str, bytes]]=None, + tag: str=None, + repo: str=None, + stream: bool=False, + platform=None) -> Mapping: + """ + Similar to `docker pull`, pull an image locally + + Args: + name: name of the image to pull + repo: repository name given to an image when it is imported + tag: if empty when pulling an image all tags + for the given image to be pulled + auth_config: special {'auth': base64} pull private repo + stream: + platform (str): Platform in the format ``os[/arch[/variant]]`` + """ + params = { + 'fromImage': name, + } + headers = {} + if repo: + params['repo'] = repo + if tag: + params['tag'] = tag + if auth_config is not None: + registry, has_registry_host, _ = name.partition('/') + if not has_registry_host: + raise ValueError('Image should have registry host ' + 'when auth information is provided') + # TODO: assert registry == repo? + headers['X-Registry-Auth'] = compose_auth_header(auth_config, registry) + response = await self.api_client._query( + "images/create", + "POST", + params=clean_map(params), + headers=headers, + ) + return await json_stream_result(response, stream=stream) + + async def push(self, repository: str, *, + auth_config: Union[MutableMapping, str, bytes]=None, + tag: str=None, + stream: bool=False) -> Mapping: + """ + Push an image or a repository to the registry. Similar to the ``docker + push`` command. + + Args: + repository (str): The repository to push to + tag (str): An optional tag to push + stream (bool): Stream the output as a blocking generator + auth_config (dict): Override the credentials that + :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + + Returns: + (generator or str): The output from the server. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + >>> for line in client.image.push('yourname/app', stream=True): + ... print line + {"status":"Pushing repository yourname/app (1 tags)"} + {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} + {"status":"Image already pushed, skipping","progressDetail":{}, + "id":"511136ea3c5a"} + ... + + """ + params = {} + headers = { + # Anonymous push requires a dummy auth header. + 'X-Registry-Auth': 'placeholder', + } + if tag: + params['tag'] = tag + if auth_config is not None: + registry, has_registry_host, _ = repository.partition('/') + if not has_registry_host: + raise ValueError('Image should have registry host ' + 'when auth information is provided') + headers['X-Registry-Auth'] = compose_auth_header(auth_config, registry) + response = await self.api_client._query( + "images/{name}/push".format(name=repository), + "POST", + params=params, + headers=headers, + ) + return await json_stream_result(response, stream=stream) + + async def remove(self, name: str, *, force: bool=False, + noprune: bool=False) -> List: + """ + Remove an image along with any untagged parent + images that were referenced by that image + + Args: + name: name/id of the image to delete + force: remove the image even if it is being used + by stopped containers or has other tags + noprune: don't delete untagged parent images + + Returns: + List of deleted images + """ + params = {'force': force, 'noprune': noprune} + response = await self.api_client._query_json( + "images/{name}".format(name=name), + "DELETE", + params=params, + ) + return response + + async def tag(self, image: str, repository: str, *, tag: str=None) -> bool: + """ + Tag an image into a repository. Similar to the ``docker tag`` command. + + Args: + image (str): The image to tag + repository (str): The repository to set for the tag + tag (str): The tag name + + Returns: + (bool): ``True`` if successful + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.image.tag('ubuntu', 'localhost:5000/ubuntu', 'latest') + """ + params = {"repo": repository} + + if tag: + params["tag"] = tag + + await self.api_client._query_json( + "images/{image}/tag".format(image=image), + "POST", + params=params, + ) + return True diff --git a/aiodocker/api/system.py b/aiodocker/api/system.py new file mode 100644 index 00000000..60239033 --- /dev/null +++ b/aiodocker/api/system.py @@ -0,0 +1,20 @@ + + +class DockerSystemAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def info(self): + """ + Display system-wide information. Identical to the ``docker info`` + command. + + Returns: + (dict): The info as a dict + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + data = await self.api_client._query_json("info") + return data diff --git a/aiodocker/client.py b/aiodocker/client.py new file mode 100644 index 00000000..9a213089 --- /dev/null +++ b/aiodocker/client.py @@ -0,0 +1,45 @@ +from .api.client import APIClient +from .models.containers import ContainerCollection +from .models.images import ImageCollection + + +class DockerClient(object): + def __init__(self, *args, **kwargs): + self.api = APIClient(*args, **kwargs) + + @property + def containers(self): + """ + An object for managing containers on the server. See the + :doc:`containers documentation ` for full details. + """ + return ContainerCollection(client=self) + + @property + def images(self): + """ + An object for managing images on the server. See the + :doc:`images documentation ` for full details. + """ + return ImageCollection(client=self) + + async def close(self): + await self.api.close() + + async def version(self): + return await self.api.version() + version.__doc__ = APIClient.version.__doc__ + + async def info(self): + """ + Display system-wide information. Identical to the ``docker info`` + command. + + Returns: + (dict): The info as a dict + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return await self.api.system.info() diff --git a/aiodocker/errors.py b/aiodocker/errors.py new file mode 100644 index 00000000..24e3f8f8 --- /dev/null +++ b/aiodocker/errors.py @@ -0,0 +1,119 @@ +import json + + +class DockerException(Exception): + """ + A base class from which all other exceptions inherit. + + If you want to catch all errors that the Docker SDK might raise, + catch this base exception. + """ + + +async def create_api_error_from_response(response): + """ + Create a suitable APIError from ClientResponse. + """ + what = await response.read() + content_type = response.headers.get('content-type', '') + response.close() + if content_type == 'application/json': + explanation = json.loads(what.decode('utf8'))['message'] + else: + explanation = what.decode('utf8') + cls = APIError + if response.status == 404: + if explanation and ('No such image' in str(explanation) or + 'not found: does not exist or no pull access' + in str(explanation) or + 'repository does not exist' in str(explanation)): + cls = ImageNotFound + else: + cls = NotFound + raise cls(response=response, explanation=explanation) + + +class APIError(DockerException): + """ + An HTTP error from the API. + """ + def __init__(self, response=None, explanation=None): + self.response = response + self.explanation = explanation + + def __str__(self): + message = super(APIError, self).__str__() + + if self.is_client_error(): + message = '{0} Client Error: {1}'.format(self.response.status, self.response.reason) + + elif self.is_server_error(): + message = '{0} Server Error: {1}'.format(self.response.status, self.response.reason) + + if self.explanation: + message = '{0} ("{1}")'.format(message, self.explanation) + + return message + + @property + def status_code(self): + if self.response is not None: + return self.response.status + + def is_client_error(self): + if self.status_code is None: + return False + return 400 <= self.status_code < 500 + + def is_server_error(self): + if self.status_code is None: + return False + return 500 <= self.status_code < 600 + + +class NotFound(APIError): + pass + + +class ImageNotFound(NotFound): + pass + + +class InvalidVersion(DockerException): + pass + + +class ContainerError(DockerException): + """ + Represents a container that has exited with a non-zero exit code. + """ + def __init__(self, container, exit_status, command, image, stderr): + self.container = container + self.exit_status = exit_status + self.command = command + self.image = image + self.stderr = stderr + + err = ": {}".format(stderr) if stderr is not None else "" + msg = ("Command '{}' in image '{}' returned non-zero exit " + "status {}{}").format(command, image, exit_status, err) + + super(ContainerError, self).__init__(msg) + + +class BuildError(DockerException): + def __init__(self, reason, build_log): + super(BuildError, self).__init__(reason) + self.msg = reason + self.build_log = build_log + + +def create_unexpected_kwargs_error(name, kwargs): + quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)] + text = ["{}() ".format(name)] + if len(quoted_kwargs) == 1: + text.append("got an unexpected keyword argument ") + else: + text.append("got unexpected keyword arguments ") + text.append(', '.join(quoted_kwargs)) + return TypeError(''.join(text)) diff --git a/aiodocker/models/__init__.py b/aiodocker/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/aiodocker/models/containers.py b/aiodocker/models/containers.py new file mode 100644 index 00000000..de24b0d9 --- /dev/null +++ b/aiodocker/models/containers.py @@ -0,0 +1,452 @@ +import copy +import ntpath + +from .resource import Collection, Model +from ..errors import ImageNotFound, ContainerError, APIError, create_unexpected_kwargs_error +from ..exceptions import DockerContainerError +from ..types import HostConfig +from .images import Image + + +class Container(Model): + + @property + def name(self): + """ + The name of the container. + """ + if self.attrs.get('Name') is not None: + return self.attrs['Name'].lstrip('/') + + @property + def labels(self): + """ + The labels of a container as dictionary. + """ + result = self.attrs['Config'].get('Labels') + return result or {} + + @property + def status(self): + """ + The status of the container. For example, ``running``, or ``exited``. + """ + return self.attrs['State']['Status'] + + async def image(self): + """ + The image of the container. + """ + image_id = self.attrs['Image'] + if image_id is None: + return None + return await self.client.api.image.get(image_id.split(':')[1]) + + async def logs(self, **kwargs): + """ + Get logs from this container. Similar to the ``docker logs`` command. + + The ``stream`` parameter makes the ``logs`` function return a blocking + generator you can iterate over to retrieve log output as it happens. + + Args: + stdout (bool): Get ``STDOUT`` + stderr (bool): Get ``STDERR`` + stream (bool): Stream the response + timestamps (bool): Show timestamps + tail (str or int): Output specified number of lines at the end of + logs. Either an integer of number of lines or the string + ``all``. Default ``all`` + since (datetime or int): Show logs since a given datetime or + integer epoch (in seconds) + follow (bool): Follow log output + until (datetime or int): Show logs that occurred before the given + datetime or integer epoch (in seconds) + + Returns: + (generator or str): Logs from the container. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.logs(self.id, **kwargs) + + async def remove(self, v=False, link=False, force=False): + """ + Remove this container. Similar to the ``docker rm`` command. + + Args: + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.remove(self.id, v=v, link=link, force=force) + + async def restart(self, timeout=None): + """ + Restart this container. Similar to the ``docker restart`` command. + + Args: + timeout (int): Number of seconds to try to stop for before killing + the container. Once killed it will then be restarted. Default + is 10 seconds. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.restart(self.id, timeout) + + async def start(self, **kwargs): + """ + Start this container. Similar to the ``docker start`` command, but + doesn't support attach options. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.start(self.id, **kwargs) + + def stop(self, **kwargs): + """ + Stops a container. Similar to the ``docker stop`` command. + + Args: + timeout (int): Timeout in seconds to wait for the container to + stop before sending a ``SIGKILL``. Default: 10 + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return self.client.api.container.stop(self.id, **kwargs) + + async def wait(self, **kwargs): + """ + Block until the container stops, then return its exit code. Similar to + the ``docker wait`` command. + + Args: + timeout (int): Request timeout + condition (str): Wait until a container state reaches the given + condition, either ``not-running`` (default), ``next-exit``, + or ``removed`` + + Returns: + (dict): The API's response as a Python dictionary, including + the container's exit code under the ``StatusCode`` attribute. + + Raises: + :py:class:`aiohttp.ServerTimeoutError` + If the timeout is exceeded. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.wait(self.id, **kwargs) + + +class ContainerCollection(Collection): + model = Container + + async def get(self, container_id): + """ + Get a container by name or ID. + + Args: + container_id (str): Container name or ID. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`aiodocker.errors.NotFound` + If the container does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.container.inspect(container_id) + return self.prepare_model(resp) + + async def create(self, image, command=None, **kwargs): + """ + Create a container without starting it. Similar to ``docker create``. + + Takes the same arguments as :py:meth:`run`, except for ``stdout``, + ``stderr``, and ``remove``. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + if isinstance(image, Image): + image = image.id + kwargs['image'] = image + kwargs['command'] = command + kwargs['version'] = self.client.api.api_version + create_kwargs = _create_container_args(kwargs) + resp = await self.client.api.container.create(**create_kwargs) + return await self.get(resp['Id']) + + async def list(self, all=False, limit=-1, filters=None): + """ + List containers. Similar to the ``docker ps`` command. + + Args: + all (bool): Show all containers. Only running containers are shown + by default + limit (int): Show `limit` last created containers, include + non-running ones + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str): format either ``"key"`` or ``"key=value"`` + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + Returns: + (list of :py:class:`Container`) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.container.list(all=all, limit=limit, filters=filters) + return [await self.get(r['Id']) for r in resp] + + async def run(self, image, command=None, stdout=True, stderr=False, + remove=False, **kwargs): + """ + Create and start a container. + + If container.start() will raise an error the exception will contain + a `container_id` attribute with the id of the container. + """ + if isinstance(image, Image): + image = image.id + stream = kwargs.pop('stream', False) + detach = kwargs.pop('detach', False) + platform = kwargs.pop('platform', None) + + if detach and remove: + kwargs["auto_remove"] = True + + if kwargs.get('network') and kwargs.get('network_mode'): + raise RuntimeError( + 'The options "network" and "network_mode" can not be used ' + 'together.' + ) + + try: + container = await self.create(image=image, command=command, + detach=detach, **kwargs) + except ImageNotFound: + await self.client.images.pull(image, platform=platform) + container = await self.create(image=image, command=command, + detach=detach, **kwargs) + + try: + await container.start() + except APIError as err: + raise DockerContainerError( + err.status_code, + { + "message": err.explanation + }, + container.id + ) + + return container + + async def prune(self, filters=None): + """ + Delete stopped containers + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted container IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.prune(filters=filters) + + +# kwargs to copy straight from run to create +RUN_CREATE_KWARGS = [ + 'command', + 'detach', + 'domainname', + 'entrypoint', + 'environment', + 'healthcheck', + 'hostname', + 'image', + 'labels', + 'mac_address', + 'name', + 'network_disabled', + 'stdin_open', + 'stop_signal', + 'tty', + 'user', + 'volume_driver', + 'working_dir', +] + +# kwargs to copy straight from run to host_config +RUN_HOST_CONFIG_KWARGS = [ + 'auto_remove', + 'blkio_weight_device', + 'blkio_weight', + 'cap_add', + 'cap_drop', + 'cgroup_parent', + 'cpu_count', + 'cpu_percent', + 'cpu_period', + 'cpu_quota', + 'cpu_shares', + 'cpuset_cpus', + 'cpuset_mems', + 'cpu_rt_period', + 'cpu_rt_runtime', + 'device_cgroup_rules', + 'device_read_bps', + 'device_read_iops', + 'device_write_bps', + 'device_write_iops', + 'devices', + 'dns_opt', + 'dns_search', + 'dns', + 'extra_hosts', + 'group_add', + 'init', + 'init_path', + 'ipc_mode', + 'isolation', + 'kernel_memory', + 'links', + 'log_config', + 'lxc_conf', + 'mem_limit', + 'mem_reservation', + 'mem_swappiness', + 'memswap_limit', + 'mounts', + 'nano_cpus', + 'network_mode', + 'oom_kill_disable', + 'oom_score_adj', + 'pid_mode', + 'pids_limit', + 'privileged', + 'publish_all_ports', + 'read_only', + 'restart_policy', + 'security_opt', + 'shm_size', + 'storage_opt', + 'sysctls', + 'tmpfs', + 'ulimits', + 'userns_mode', + 'version', + 'volumes_from', + 'runtime' +] + + +def _create_container_args(kwargs): + """ + Convert arguments to create() to arguments to create_container(). + """ + # Copy over kwargs which can be copied directly + create_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_CREATE_KWARGS: + create_kwargs[key] = kwargs.pop(key) + host_config_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_HOST_CONFIG_KWARGS: + host_config_kwargs[key] = kwargs.pop(key) + + # Process kwargs which are split over both create and host_config + ports = kwargs.pop('ports', {}) + if ports: + host_config_kwargs['port_bindings'] = ports + + volumes = kwargs.pop('volumes', {}) + if volumes: + host_config_kwargs['binds'] = volumes + + network = kwargs.pop('network', None) + if network: + create_kwargs['networking_config'] = { + network: None + } + host_config_kwargs['network_mode'] = network + + # All kwargs should have been consumed by this point, so raise + # error if any are left + if kwargs: + raise create_unexpected_kwargs_error('run', kwargs) + + create_kwargs['host_config'] = HostConfig(**host_config_kwargs) + + # Fill in any kwargs which need processing by create_host_config first + port_bindings = create_kwargs['host_config'].get('PortBindings') + if port_bindings: + # sort to make consistent for tests + create_kwargs['ports'] = [tuple(p.split('/', 1)) + for p in sorted(port_bindings.keys())] + if volumes: + if isinstance(volumes, dict): + create_kwargs['volumes'] = [ + v.get('bind') for v in volumes.values() + ] + else: + create_kwargs['volumes'] = [ + _host_volume_from_bind(v) for v in volumes + ] + return create_kwargs + + +def _host_volume_from_bind(bind): + drive, rest = ntpath.splitdrive(bind) + bits = rest.split(':', 1) + if len(bits) == 1 or bits[1] in ('ro', 'rw'): + return drive + bits[0] + else: + return bits[1].rstrip(':ro').rstrip(':rw') diff --git a/aiodocker/models/images.py b/aiodocker/models/images.py new file mode 100644 index 00000000..d3bb29eb --- /dev/null +++ b/aiodocker/models/images.py @@ -0,0 +1,341 @@ +import re + +from .resource import Model, Collection +from ..errors import BuildError +from ..utils.utils import parse_repository_tag, clean_map + + +class Image(Model): + """ + An image on the server. + """ + def __repr__(self): + return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags)) + + @property + def labels(self): + """ + The labels of an image as dictionary. + """ + result = self.attrs['Config'].get('Labels') + return result or {} + + @property + def short_id(self): + """ + The ID of the image truncated to 10 characters, plus the ``sha256:`` + prefix. + """ + if self.id.startswith('sha256:'): + return self.id[:17] + return self.id[:10] + + @property + def tags(self): + """ + The image's tags. + """ + tags = self.attrs.get('RepoTags') + if tags is None: + tags = [] + return [tag for tag in tags if tag != ':'] + + async def history(self): + """ + Show the history of an image. + + Returns: + (str): The history of the image. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.image.history(self.id) + + # def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + # """ + # Get a tarball of an image. Similar to the ``docker save`` command. + # + # Args: + # chunk_size (int): The number of bytes returned by each iteration + # of the generator. If ``None``, data will be streamed as it is + # received. Default: 2 MB + # + # Returns: + # (generator): A stream of raw archive data. + # + # Raises: + # :py:class:`docker.errors.APIError` + # If the server returns an error. + # + # Example: + # + # >>> image = cli.get_image("busybox:latest") + # >>> f = open('/tmp/busybox-latest.tar', 'w') + # >>> for chunk in image: + # >>> f.write(chunk) + # >>> f.close() + # """ + # return self.client.api.get_image(self.id, chunk_size) + + def tag(self, repository, tag=None): + """ + Tag this image into a repository. Similar to the ``docker tag`` + command. + + Args: + repository (str): The repository to set for the tag + tag (str): The tag name + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Returns: + (bool): ``True`` if successful + """ + return self.client.api.image.tag(self.id, repository, tag=tag) + + +class ImageCollection(Collection): + model = Image + + async def build(self, **kwargs): + """ + Build an image and return it. Similar to the ``docker build`` + command. Either ``path`` or ``fileobj`` must be set. + + If you have a tar file for the Docker build context (including a + Dockerfile) already, pass a readable file-like object to ``fileobj`` + and also pass ``custom_context=True``. If the stream is compressed + also, set ``encoding`` to the correct value (e.g ``gzip``). + + If you want to get the raw output of the build, use the + :py:meth:`~docker.api.build.BuildApiMixin.build` method in the + low-level API. + + Args: + path (str): Path to the directory containing the Dockerfile + fileobj: A file object to use as the Dockerfile. (Or a file-like + object) + tag (str): A tag to add to the final image + quiet (bool): Whether to return the status + nocache (bool): Don't use the cache when set to ``True`` + rm (bool): Remove intermediate containers. The ``docker build`` + command now defaults to ``--rm=true``, but we have kept the old + default of `False` to preserve backward compatibility + timeout (int): HTTP timeout + custom_context (bool): Optional if using ``fileobj`` + encoding (str): The encoding for a stream. Set to ``gzip`` for + compressing + pull (bool): Downloads any updates to the FROM image in Dockerfiles + forcerm (bool): Always remove intermediate containers, even after + unsuccessful builds + dockerfile (str): path within the build context to the Dockerfile + buildargs (dict): A dictionary of build arguments + container_limits (dict): A dictionary of limits applied to each + container created by the build process. Valid keys: + + - memory (int): set memory limit for build + - memswap (int): Total memory (memory + swap), -1 to disable + swap + - cpushares (int): CPU shares (relative weight) + - cpusetcpus (str): CPUs in which to allow execution, e.g., + ``"0-3"``, ``"0,1"`` + shmsize (int): Size of `/dev/shm` in bytes. The size must be + greater than 0. If omitted the system uses 64MB + labels (dict): A dictionary of labels to set on the image + cache_from (list): A list of images used for build cache + resolution + target (str): Name of the build-stage to build in a multi-stage + Dockerfile + network_mode (str): networking mode for the run commands during + build + squash (bool): Squash the resulting images layers into a + single layer. + extra_hosts (dict): Extra hosts to add to /etc/hosts in building + containers, as a mapping of hostname to IP address. + platform (str): Platform in the format ``os[/arch[/variant]]``. + + Returns: + (tuple): The first item is the :py:class:`Image` object for the + image that was build. The second item is a generator of the + build logs as JSON-decoded objects. + + Raises: + :py:class:`docker.errors.BuildError` + If there is an error during the build. + :py:class:`docker.errors.APIError` + If the server returns any other error. + ``TypeError`` + If neither ``path`` nor ``fileobj`` is specified. + """ + json_stream = await self.client.api.image.build(**kwargs) + if isinstance(json_stream, str): + return self.get(json_stream) + last_event = None + image_id = None + for chunk in json_stream: + if 'error' in chunk: + raise BuildError(chunk['error'], json_stream) + if 'stream' in chunk: + match = re.search( + r'(^Successfully built |sha256:)([0-9a-f]+)$', + chunk['stream'] + ) + if match: + image_id = match.group(2) + last_event = chunk + if image_id: + return await self.get(image_id), json_stream + raise BuildError(last_event or 'Unknown', json_stream) + + async def get(self, name): + """ + Gets an image. + + Args: + name (str): The name of the image. + + Returns: + (:py:class:`Image`): The image. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(await self.client.api.image.inspect(name)) + + async def list(self, name=None, all=False, digests=False, filters=None): + """ + List images on the server. + + Args: + name (str): Only show images belonging to the repository ``name`` + all (bool): Show intermediate image layers. By default, these are + filtered out. + digests (bool): Show digest information as a RepoDigests field on each image. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - ``label`` (str): format either ``key`` or ``key=value`` + - before=([:], or ) + - reference=([:]) + - since=([:], or ) + + Returns: + (list of :py:class:`Image`): The images. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.image.list(name=name, all=all, digests=digests, filters=filters) + return [await self.get(r["Id"]) for r in resp] + + async def create(self, attrs=None): + pass + + async def pull(self, repository, tag=None, **kwargs): + """ + Pull an image of the given name and return it. Similar to the + ``docker pull`` command. + If no tag is specified, all tags from that repository will be + pulled. + + If you want to get the raw pull output, use the + :py:meth:`~aiodocker.api.image.pull` method in the + low-level API. + + Args: + repository (str): The repository to pull + tag (str): The tag to pull + auth_config (dict): Override the credentials that + :py:meth:`~aiodocker.client.DockerClient.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + + Returns: + (:py:class:`Image` or list): The image that has been pulled. + If no ``tag`` was specified, the method will return a list + of :py:class:`Image` objects belonging to this repository. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + + .. code-block:: python + + >>> # Pull the image tagged `latest` in the busybox repo + >>> image = client.images.pull('busybox:latest') + + >>> # Pull all tags in the busybox repo + >>> images = client.images.pull('busybox') + """ + if not tag: + repository, tag = parse_repository_tag(repository) + + await self.client.api.image.pull(repository, tag=tag, **kwargs) + if tag: + return await self.get('{0}{2}{1}'.format( + repository, tag, '@' if tag.startswith('sha256:') else ':' + )) + return await self.list(repository) + + async def push(self, repository, tag=None, **kwargs): + """ + Push an image or a repository to the registry. Similar to the ``docker + push`` command. + + Args: + repository (str): The repository to push to + tag (str): An optional tag to push + stream (bool): Stream the output as a blocking generator + auth_config (dict): Override the credentials that + :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + + Returns: + (generator or str): The output from the server. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + >>> for line in client.images.push('yourname/app', stream=True): + ... print line + {"status":"Pushing repository yourname/app (1 tags)"} + {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} + {"status":"Image already pushed, skipping","progressDetail":{}, + "id":"511136ea3c5a"} + ... + + """ + return await self.client.api.image.push(repository, tag=tag, **kwargs) + + async def remove(self, name: str, force: bool = False, noprune: bool = False): + """ + Remove an image along with any untagged parent + images that were referenced by that image + + Args: + name: name/id of the image to delete + force: remove the image even if it is being used + by stopped containers or has other tags + noprune: don't delete untagged parent images + + Returns: + List of deleted images + """ + return await self.client.api.image.remove(name, force=force, noprune=noprune) + + diff --git a/aiodocker/models/resource.py b/aiodocker/models/resource.py new file mode 100644 index 00000000..121b80b5 --- /dev/null +++ b/aiodocker/models/resource.py @@ -0,0 +1,92 @@ + +class Model(object): + """ + A base class for representing a single object on the server. + """ + id_attribute = 'Id' + + def __init__(self, attrs=None, client=None, collection=None): + #: A client pointing at the server that this object is on. + self.client = client + + #: The collection that this model is part of. + self.collection = collection + + #: The raw representation of this object from the API + self.attrs = attrs + if self.attrs is None: + self.attrs = {} + + def __repr__(self): + return "<{}: {}>".format(self.__class__.__name__, self.short_id) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.id == other.id + + def __hash__(self): + return hash("{}:{}".format(self.__class__.__name__, self.id)) + + @property + def id(self): + """ + The ID of the object. + """ + return self.attrs.get(self.id_attribute) + + @property + def short_id(self): + """ + The ID of the object, truncated to 10 characters. + """ + return self.id[:10] + + async def reload(self): + """ + Load this object from the server again and update ``attrs`` with the + new data. + """ + new_model = self.collection.get(self.id) + self.attrs = new_model.attrs + + +class Collection(object): + """ + A base class for representing all objects of a particular type on the + server. + """ + + #: The type of object this collection represents, set by subclasses + model = None + + def __init__(self, client=None): + #: The client pointing at the server that this collection of objects + #: is on. + self.client = client + + def __call__(self, *args, **kwargs): + raise TypeError( + "'{}' object is not callable." + "maybe try aiodocker.APIClient." + .format(self.__class__.__name__)) + + async def list(self): + raise NotImplementedError + + async def get(self, key): + raise NotImplementedError + + async def create(self, attrs=None): + raise NotImplementedError + + def prepare_model(self, attrs): + """ + Create a model from a set of attributes. + """ + if isinstance(attrs, Model): + attrs.client = self.client + attrs.collection = self + return attrs + elif isinstance(attrs, dict): + return self.model(attrs=attrs, client=self.client, collection=self) + else: + raise Exception("Can't create {} from {}".format(self.model.__name__, attrs)) diff --git a/aiodocker/types/__init__.py b/aiodocker/types/__init__.py new file mode 100644 index 00000000..5b26a9a5 --- /dev/null +++ b/aiodocker/types/__init__.py @@ -0,0 +1,4 @@ +# flake8: noqa +from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit +from .healthcheck import Healthcheck +from .networks import NetworkingConfig, EndpointConfig \ No newline at end of file diff --git a/aiodocker/types/base.py b/aiodocker/types/base.py new file mode 100644 index 00000000..42aad7e0 --- /dev/null +++ b/aiodocker/types/base.py @@ -0,0 +1,6 @@ + + +class DictType(dict): + def __init__(self, init): + for k, v in init.items(): + self[k] = v diff --git a/aiodocker/types/containers.py b/aiodocker/types/containers.py new file mode 100644 index 00000000..7f2768a2 --- /dev/null +++ b/aiodocker/types/containers.py @@ -0,0 +1,513 @@ + +from .. import errors +from ..utils.utils import ( + convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds, + format_environment, format_extra_hosts, normalize_links, parse_bytes, + parse_devices, split_command, +) +from .base import DictType +from .healthcheck import Healthcheck + + +class LogConfigTypesEnum(object): + _values = ( + 'json-file', + 'syslog', + 'journald', + 'gelf', + 'fluentd', + 'none' + ) + JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values + + +class LogConfig(DictType): + types = LogConfigTypesEnum + + def __init__(self, **kwargs): + log_driver_type = kwargs.get('type', kwargs.get('Type')) + config = kwargs.get('config', kwargs.get('Config')) or {} + + if config and not isinstance(config, dict): + raise ValueError("LogConfig.config must be a dictionary") + + super(LogConfig, self).__init__({ + 'Type': log_driver_type, + 'Config': config + }) + + @property + def type(self): + return self['Type'] + + @type.setter + def type(self, value): + self['Type'] = value + + @property + def config(self): + return self['Config'] + + def set_config_value(self, key, value): + self.config[key] = value + + def unset_config(self, key): + if key in self.config: + del self.config[key] + + +class Ulimit(DictType): + def __init__(self, **kwargs): + name = kwargs.get('name', kwargs.get('Name')) + soft = kwargs.get('soft', kwargs.get('Soft')) + hard = kwargs.get('hard', kwargs.get('Hard')) + if not isinstance(name, str): + raise ValueError("Ulimit.name must be a string") + if soft and not isinstance(soft, int): + raise ValueError("Ulimit.soft must be an integer") + if hard and not isinstance(hard, int): + raise ValueError("Ulimit.hard must be an integer") + super(Ulimit, self).__init__({ + 'Name': name, + 'Soft': soft, + 'Hard': hard + }) + + @property + def name(self): + return self['Name'] + + @name.setter + def name(self, value): + self['Name'] = value + + @property + def soft(self): + return self.get('Soft') + + @soft.setter + def soft(self, value): + self['Soft'] = value + + @property + def hard(self): + return self.get('Hard') + + @hard.setter + def hard(self, value): + self['Hard'] = value + + +class HostConfig(dict): + def __init__(self, version, binds=None, port_bindings=None, + lxc_conf=None, publish_all_ports=False, links=None, + privileged=False, dns=None, dns_search=None, + volumes_from=None, network_mode=None, restart_policy=None, + cap_add=None, cap_drop=None, devices=None, extra_hosts=None, + read_only=None, pid_mode=None, ipc_mode=None, + security_opt=None, ulimits=None, log_config=None, + mem_limit=None, memswap_limit=None, mem_reservation=None, + kernel_memory=None, mem_swappiness=None, cgroup_parent=None, + group_add=None, cpu_quota=None, cpu_period=None, + blkio_weight=None, blkio_weight_device=None, + device_read_bps=None, device_write_bps=None, + device_read_iops=None, device_write_iops=None, + oom_kill_disable=False, shm_size=None, sysctls=None, + tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None, + cpuset_cpus=None, userns_mode=None, pids_limit=None, + isolation=None, auto_remove=False, storage_opt=None, + init=None, volume_driver=None, + cpu_count=None, cpu_percent=None, nano_cpus=None, + cpuset_mems=None, runtime=None, mounts=None, + cpu_rt_period=None, cpu_rt_runtime=None, + device_cgroup_rules=None): + + if mem_limit is not None: + self['Memory'] = parse_bytes(mem_limit) + + if memswap_limit is not None: + self['MemorySwap'] = parse_bytes(memswap_limit) + + if mem_reservation: + self['MemoryReservation'] = parse_bytes(mem_reservation) + + if kernel_memory: + self['KernelMemory'] = parse_bytes(kernel_memory) + + if mem_swappiness is not None: + if not isinstance(mem_swappiness, int): + raise host_config_type_error( + 'mem_swappiness', mem_swappiness, 'int' + ) + + self['MemorySwappiness'] = mem_swappiness + + if shm_size is not None: + if isinstance(shm_size, bytes): + shm_size = parse_bytes(shm_size) + + self['ShmSize'] = shm_size + + if pid_mode: + self['PidMode'] = pid_mode + + if ipc_mode: + self['IpcMode'] = ipc_mode + + if privileged: + self['Privileged'] = privileged + + if oom_kill_disable: + self['OomKillDisable'] = oom_kill_disable + + if oom_score_adj: + if not isinstance(oom_score_adj, int): + raise host_config_type_error( + 'oom_score_adj', oom_score_adj, 'int' + ) + self['OomScoreAdj'] = oom_score_adj + + if publish_all_ports: + self['PublishAllPorts'] = publish_all_ports + + if read_only is not None: + self['ReadonlyRootfs'] = read_only + + if dns_search: + self['DnsSearch'] = dns_search + + if network_mode: + self['NetworkMode'] = network_mode + elif network_mode is None: + self['NetworkMode'] = 'default' + + if restart_policy: + if not isinstance(restart_policy, dict): + raise host_config_type_error( + 'restart_policy', restart_policy, 'dict' + ) + + self['RestartPolicy'] = restart_policy + + if cap_add: + self['CapAdd'] = cap_add + + if cap_drop: + self['CapDrop'] = cap_drop + + if devices: + self['Devices'] = parse_devices(devices) + + if group_add: + self['GroupAdd'] = [str(grp) for grp in group_add] + + if dns is not None: + self['Dns'] = dns + + if dns_opt is not None: + self['DnsOptions'] = dns_opt + + if security_opt is not None: + if not isinstance(security_opt, list): + raise host_config_type_error( + 'security_opt', security_opt, 'list' + ) + + self['SecurityOpt'] = security_opt + + if sysctls: + if not isinstance(sysctls, dict): + raise host_config_type_error('sysctls', sysctls, 'dict') + self['Sysctls'] = {} + for k, v in sysctls.items(): + self['Sysctls'][k] = str(v) + + if volumes_from is not None: + if isinstance(volumes_from, str): + volumes_from = volumes_from.split(',') + + self['VolumesFrom'] = volumes_from + + if binds is not None: + self['Binds'] = convert_volume_binds(binds) + + if port_bindings is not None: + self['PortBindings'] = convert_port_bindings(port_bindings) + + if extra_hosts is not None: + if isinstance(extra_hosts, dict): + extra_hosts = format_extra_hosts(extra_hosts) + + self['ExtraHosts'] = extra_hosts + + if links is not None: + self['Links'] = normalize_links(links) + + if isinstance(lxc_conf, dict): + formatted = [] + for k, v in lxc_conf.items(): + formatted.append({'Key': k, 'Value': str(v)}) + lxc_conf = formatted + + if lxc_conf is not None: + self['LxcConf'] = lxc_conf + + if cgroup_parent is not None: + self['CgroupParent'] = cgroup_parent + + if ulimits is not None: + if not isinstance(ulimits, list): + raise host_config_type_error('ulimits', ulimits, 'list') + self['Ulimits'] = [] + for l in ulimits: + if not isinstance(l, Ulimit): + l = Ulimit(**l) + self['Ulimits'].append(l) + + if log_config is not None: + if not isinstance(log_config, LogConfig): + if not isinstance(log_config, dict): + raise host_config_type_error( + 'log_config', log_config, 'LogConfig' + ) + log_config = LogConfig(**log_config) + + self['LogConfig'] = log_config + + if cpu_quota: + if not isinstance(cpu_quota, int): + raise host_config_type_error('cpu_quota', cpu_quota, 'int') + self['CpuQuota'] = cpu_quota + + if cpu_period: + if not isinstance(cpu_period, int): + raise host_config_type_error('cpu_period', cpu_period, 'int') + self['CpuPeriod'] = cpu_period + + if cpu_shares: + if not isinstance(cpu_shares, int): + raise host_config_type_error('cpu_shares', cpu_shares, 'int') + + self['CpuShares'] = cpu_shares + + if cpuset_cpus: + self['CpusetCpus'] = cpuset_cpus + + if cpuset_mems: + if not isinstance(cpuset_mems, str): + raise host_config_type_error( + 'cpuset_mems', cpuset_mems, 'str' + ) + self['CpusetMems'] = cpuset_mems + + if cpu_rt_period: + if not isinstance(cpu_rt_period, int): + raise host_config_type_error( + 'cpu_rt_period', cpu_rt_period, 'int' + ) + self['CPURealtimePeriod'] = cpu_rt_period + + if cpu_rt_runtime: + if not isinstance(cpu_rt_runtime, int): + raise host_config_type_error( + 'cpu_rt_runtime', cpu_rt_runtime, 'int' + ) + self['CPURealtimeRuntime'] = cpu_rt_runtime + + if blkio_weight: + if not isinstance(blkio_weight, int): + raise host_config_type_error( + 'blkio_weight', blkio_weight, 'int' + ) + self["BlkioWeight"] = blkio_weight + + if blkio_weight_device: + if not isinstance(blkio_weight_device, list): + raise host_config_type_error( + 'blkio_weight_device', blkio_weight_device, 'list' + ) + self["BlkioWeightDevice"] = blkio_weight_device + + if device_read_bps: + if not isinstance(device_read_bps, list): + raise host_config_type_error( + 'device_read_bps', device_read_bps, 'list' + ) + self["BlkioDeviceReadBps"] = device_read_bps + + if device_write_bps: + if not isinstance(device_write_bps, list): + raise host_config_type_error( + 'device_write_bps', device_write_bps, 'list' + ) + self["BlkioDeviceWriteBps"] = device_write_bps + + if device_read_iops: + if not isinstance(device_read_iops, list): + raise host_config_type_error( + 'device_read_iops', device_read_iops, 'list' + ) + self["BlkioDeviceReadIOps"] = device_read_iops + + if device_write_iops: + if not isinstance(device_write_iops, list): + raise host_config_type_error( + 'device_write_iops', device_write_iops, 'list' + ) + self["BlkioDeviceWriteIOps"] = device_write_iops + + if tmpfs: + self["Tmpfs"] = convert_tmpfs_mounts(tmpfs) + + if userns_mode: + if userns_mode != "host": + raise host_config_value_error("userns_mode", userns_mode) + self['UsernsMode'] = userns_mode + + if pids_limit: + if not isinstance(pids_limit, int): + raise host_config_type_error('pids_limit', pids_limit, 'int') + self["PidsLimit"] = pids_limit + + if isolation: + if not isinstance(isolation, str): + raise host_config_type_error('isolation', isolation, 'string') + self['Isolation'] = isolation + + if auto_remove: + self['AutoRemove'] = auto_remove + + if storage_opt is not None: + self['StorageOpt'] = storage_opt + + if init is not None: + self['Init'] = init + + if volume_driver is not None: + self['VolumeDriver'] = volume_driver + + if cpu_count: + if not isinstance(cpu_count, int): + raise host_config_type_error('cpu_count', cpu_count, 'int') + self['CpuCount'] = cpu_count + + if cpu_percent: + if not isinstance(cpu_percent, int): + raise host_config_type_error('cpu_percent', cpu_percent, 'int') + self['CpuPercent'] = cpu_percent + + if nano_cpus: + if not isinstance(nano_cpus, int): + raise host_config_type_error('nano_cpus', nano_cpus, 'int') + self['NanoCpus'] = nano_cpus + + if runtime: + self['Runtime'] = runtime + + if mounts is not None: + self['Mounts'] = mounts + + if device_cgroup_rules is not None: + if not isinstance(device_cgroup_rules, list): + raise host_config_type_error( + 'device_cgroup_rules', device_cgroup_rules, 'list' + ) + self['DeviceCgroupRules'] = device_cgroup_rules + + +def host_config_type_error(param, param_value, expected): + error_msg = 'Invalid type for {0} param: expected {1} but found {2}' + return TypeError(error_msg.format(param, expected, type(param_value))) + + +def host_config_value_error(param, param_value): + error_msg = 'Invalid value for {0} param: {1}' + return ValueError(error_msg.format(param, param_value)) + + +class ContainerConfig(dict): + def __init__( + self, version, image, command, hostname=None, user=None, detach=False, + stdin_open=False, tty=False, ports=None, environment=None, + volumes=None, network_disabled=False, entrypoint=None, + working_dir=None, domainname=None, host_config=None, mac_address=None, + labels=None, stop_signal=None, networking_config=None, + healthcheck=None, stop_timeout=None, runtime=None + ): + + if isinstance(command, str): + command = split_command(command) + + if isinstance(entrypoint, str): + entrypoint = split_command(entrypoint) + + if isinstance(environment, dict): + environment = format_environment(environment) + + if isinstance(labels, list): + labels = dict((lbl, str('')) for lbl in labels) + + if isinstance(ports, list): + exposed_ports = {} + for port_definition in ports: + port = port_definition + proto = 'tcp' + if isinstance(port_definition, tuple): + if len(port_definition) == 2: + proto = port_definition[1] + port = port_definition[0] + exposed_ports['{0}/{1}'.format(port, proto)] = {} + ports = exposed_ports + + if isinstance(volumes, str): + volumes = [volumes, ] + + if isinstance(volumes, list): + volumes_dict = {} + for vol in volumes: + volumes_dict[vol] = {} + volumes = volumes_dict + + if healthcheck and isinstance(healthcheck, dict): + healthcheck = Healthcheck(**healthcheck) + + attach_stdin = False + attach_stdout = False + attach_stderr = False + stdin_once = False + + if not detach: + attach_stdout = True + attach_stderr = True + + if stdin_open: + attach_stdin = True + stdin_once = True + + self.update({ + 'Hostname': hostname, + 'Domainname': domainname, + 'ExposedPorts': ports, + 'User': str(user) if user else None, + 'Tty': tty, + 'OpenStdin': stdin_open, + 'StdinOnce': stdin_once, + 'AttachStdin': attach_stdin, + 'AttachStdout': attach_stdout, + 'AttachStderr': attach_stderr, + 'Env': environment, + 'Cmd': command, + 'Image': image, + 'Volumes': volumes, + 'NetworkDisabled': network_disabled, + 'Entrypoint': entrypoint, + 'WorkingDir': working_dir, + 'HostConfig': host_config, + 'NetworkingConfig': networking_config, + 'MacAddress': mac_address, + 'Labels': labels, + 'StopSignal': stop_signal, + 'Healthcheck': healthcheck, + 'StopTimeout': stop_timeout, + 'Runtime': runtime + }) diff --git a/aiodocker/types/healthcheck.py b/aiodocker/types/healthcheck.py new file mode 100644 index 00000000..b77185f8 --- /dev/null +++ b/aiodocker/types/healthcheck.py @@ -0,0 +1,86 @@ +from .base import DictType + + +class Healthcheck(DictType): + """ + Defines a healthcheck configuration for a container or service. + + Args: + test (:py:class:`list` or str): Test to perform to determine + container health. Possible values: + + - Empty list: Inherit healthcheck from parent image + - ``["NONE"]``: Disable healthcheck + - ``["CMD", args...]``: exec arguments directly. + - ``["CMD-SHELL", command]``: RUn command in the system's + default shell. + + If a string is provided, it will be used as a ``CMD-SHELL`` + command. + interval (int): The time to wait between checks in nanoseconds. It + should be 0 or at least 1000000 (1 ms). + timeout (int): The time to wait before considering the check to + have hung. It should be 0 or at least 1000000 (1 ms). + retries (integer): The number of consecutive failures needed to + consider a container as unhealthy. + start_period (integer): Start period for the container to + initialize before starting health-retries countdown in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + """ + def __init__(self, **kwargs): + test = kwargs.get('test', kwargs.get('Test')) + if isinstance(test, str): + test = ["CMD-SHELL", test] + + interval = kwargs.get('interval', kwargs.get('Interval')) + timeout = kwargs.get('timeout', kwargs.get('Timeout')) + retries = kwargs.get('retries', kwargs.get('Retries')) + start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) + + super(Healthcheck, self).__init__({ + 'Test': test, + 'Interval': interval, + 'Timeout': timeout, + 'Retries': retries, + 'StartPeriod': start_period + }) + + @property + def test(self): + return self['Test'] + + @test.setter + def test(self, value): + self['Test'] = value + + @property + def interval(self): + return self['Interval'] + + @interval.setter + def interval(self, value): + self['Interval'] = value + + @property + def timeout(self): + return self['Timeout'] + + @timeout.setter + def timeout(self, value): + self['Timeout'] = value + + @property + def retries(self): + return self['Retries'] + + @retries.setter + def retries(self, value): + self['Retries'] = value + + @property + def start_period(self): + return self['StartPeriod'] + + @start_period.setter + def start_period(self, value): + self['StartPeriod'] = value diff --git a/aiodocker/types/networks.py b/aiodocker/types/networks.py new file mode 100644 index 00000000..70279bb4 --- /dev/null +++ b/aiodocker/types/networks.py @@ -0,0 +1,33 @@ +from ..utils.utils import normalize_links + + +class EndpointConfig(dict): + def __init__(self, version, aliases=None, links=None, ipv4_address=None, + ipv6_address=None, link_local_ips=None): + + if aliases: + self["Aliases"] = aliases + + if links: + self["Links"] = normalize_links(links) + + ipam_config = {} + if ipv4_address: + ipam_config['IPv4Address'] = ipv4_address + + if ipv6_address: + ipam_config['IPv6Address'] = ipv6_address + + if link_local_ips is not None: + ipam_config['LinkLocalIPs'] = link_local_ips + + if ipam_config: + self['IPAMConfig'] = ipam_config + + +class NetworkingConfig(dict): + def __init__(self, endpoints_config=None): + if endpoints_config: + self["EndpointsConfig"] = endpoints_config + + diff --git a/aiodocker/utils/__init__.py b/aiodocker/utils/__init__.py new file mode 100644 index 00000000..e7ec2fa4 --- /dev/null +++ b/aiodocker/utils/__init__.py @@ -0,0 +1,4 @@ +from .utils import (httpize, parse_result, _DecodeHelper, identical, human_bool, clean_map, + compose_auth_header, clean_networks, clean_filters, parse_content_type, + format_env, mktar_from_dockerfile) + diff --git a/aiodocker/utils/utils.py b/aiodocker/utils/utils.py new file mode 100644 index 00000000..a62398e5 --- /dev/null +++ b/aiodocker/utils/utils.py @@ -0,0 +1,554 @@ +import asyncio +import base64 +import codecs +from io import BytesIO +import sys +import shlex +from typing import ( + Any, Iterable, Optional, Union, + MutableMapping, Mapping, Tuple, + BinaryIO, IO, +) +import tempfile +import tarfile +import json + +from distutils.version import StrictVersion + +from .. import errors + + +BYTE_UNITS = { + 'b': 1, + 'k': 1024, + 'm': 1024 * 1024, + 'g': 1024 * 1024 * 1024 +} + + +def compare_version(v1, v2): + """Compare docker versions + + >>> v1 = '1.9' + >>> v2 = '1.10' + >>> compare_version(v1, v2) + 1 + >>> compare_version(v2, v1) + -1 + >>> compare_version(v2, v2) + 0 + """ + s1 = StrictVersion(v1) + s2 = StrictVersion(v2) + if s1 == s2: + return 0 + elif s1 > s2: + return -1 + else: + return 1 + + +def version_lt(v1, v2): + return compare_version(v1, v2) > 0 + + +def version_gte(v1, v2): + return not version_lt(v1, v2) + + +async def parse_result(response, response_type=None, *, + encoding='utf-8'): + ''' + Convert the response to native objects by the given response type + or the auto-detected HTTP content-type. + It also ensures release of the response object. + ''' + if response_type is None: + ct = response.headers.get('content-type') + if ct is None: + raise TypeError('Cannot auto-detect respone type ' + 'due to missing Content-Type header.') + main_type, sub_type, extras = parse_content_type(ct) + if sub_type == 'json': + response_type = 'json' + elif sub_type == 'x-tar': + response_type = 'tar' + elif (main_type, sub_type) == ('text', 'plain'): + response_type = 'text' + encoding = extras.get('charset', encoding) + else: + raise TypeError("Unrecognized response type: {ct}" + .format(ct=ct)) + if 'tar' == response_type: + what = await response.read() + return tarfile.open(mode='r', fileobj=BytesIO(what)) + if 'json' == response_type: + data = await response.json(encoding=encoding) + elif 'text' == response_type: + data = await response.text(encoding=encoding) + else: + data = await response.read() + return data + + +def parse_content_type(ct: str) -> Tuple[str, str, Mapping[str, str]]: + ''' + Decompose the value of HTTP "Content-Type" header into + the main/sub MIME types and other extra options as a dictionary. + All parsed values are lower-cased automatically. + ''' + pieces = ct.split(';') + try: + main_type, sub_type = pieces[0].split('/') + except ValueError: + msg = 'Invalid mime-type component: "{0}"'.format(pieces[0]) + raise ValueError(msg) + if len(pieces) > 1: + options = {} + for opt in pieces[1:]: + opt = opt.strip() + if not opt: + continue + try: + k, v = opt.split('=', 1) + except ValueError: + msg = 'Invalid option component: "{0}"'.format(opt) + raise ValueError(msg) + else: + options[k.lower()] = v.lower() + else: + options = {} + return main_type.lower(), sub_type.lower(), options + + +def identical(d1, d2): + if type(d1) != type(d2): + return False + + if isinstance(d1, dict): + keys = set(d1.keys()) | set(d2.keys()) + for key in keys: + if not identical(d1.get(key, {}), d2.get(key, {})): + return False + return True + + if isinstance(d1, list): + if len(d1) != len(d2): + return False + + pairs = zip(d1, d2) + return all((identical(x, y) for (x, y) in pairs)) + + return d1 == d2 + + +_true_strs = frozenset(['true', 'yes', 'y', '1']) +_false_strs = frozenset(['false', 'no', 'n', '0']) + + +def human_bool(s) -> bool: + if isinstance(s, str): + if s.lower() in _true_strs: + return True + if s.lower() in _false_strs: + return False + raise ValueError('Cannot interpret {s!r} as boolean.'.format(s=s)) + else: + return bool(s) + + +def httpize(d: Optional[Mapping]) -> Mapping[str, Any]: + if d is None: + return None + converted = {} + for k, v in d.items(): + if isinstance(v, bool): + v = '1' if v else '0' + if not isinstance(v, str): + v = str(v) + converted[k] = v + return converted + + +class _DecodeHelper: + """ + Decode logs from the Docker Engine + """ + + def __init__(self, generator, encoding): + self._gen = generator.__aiter__() + self._decoder = codecs.getincrementaldecoder(encoding)(errors='ignore') + self._flag = False + + def __aiter__(self): + return self + + # to make it compatible with Python 3.5.0 and 3.5.2 + # https://www.python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions + if sys.version_info <= (3, 5, 2): + __aiter__ = asyncio.coroutine(__aiter__) + + async def __anext__(self): + if self._flag: + raise StopAsyncIteration + + # we catch StopAsyncIteration from self._gen + # because we need to close the decoder + # then we raise StopAsyncIteration checking self._flag + try: + stream = await self._gen.__anext__() + except StopAsyncIteration: + self._flag = True + stream_decoded = self._decoder.decode(b'', final=True) + if stream_decoded: + return stream_decoded + raise StopAsyncIteration + else: + return self._decoder.decode(stream) + + +def clean_map(obj: Mapping[Any, Any]) -> Mapping[Any, Any]: + """ + Return a new copied dictionary without the keys with ``None`` values from + the given Mapping object. + """ + return {k: v for k, v in obj.items() if v is not None} + + +def format_env(key, value: Union[None, bytes, str]) -> str: + """ + Formats envs from {key:value} to ['key=value'] + """ + if value is None: + return key + if isinstance(value, bytes): + value = value.decode('utf-8') + + return "{key}={value}".format(key=key, value=value) + + +def clean_networks(networks: Iterable[str]=None) -> Iterable[str]: + """ + Cleans the values inside `networks` + Returns a new list + """ + if not networks: + return networks + if not isinstance(networks, list): + raise TypeError('networks parameter must be a list.') + + result = [] + for n in networks: + if isinstance(n, str): + n = {'Target': n} + result.append(n) + return result + + +def clean_filters(filters: Mapping=None) -> str: + """ + Checks the values inside `filters` + https://docs.docker.com/engine/api/v1.29/#operation/ServiceList + Returns a new dictionary in the format `map[string][]string` jsonized + """ + + if filters and isinstance(filters, dict): + for k, v in filters.items(): + if isinstance(v, bool): + v = 'true' if v else 'false' + if not isinstance(v, list): + v = [v, ] + filters[k] = v + + return json.dumps(filters) + + +def mktar_from_dockerfile(fileobject: BinaryIO) -> IO: + """ + Create a zipped tar archive from a Dockerfile + **Remember to close the file object** + Args: + fileobj: a Dockerfile + Returns: + a NamedTemporaryFile() object + """ + + f = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w:gz', fileobj=f) + + if isinstance(fileobject, BytesIO): + dfinfo = tarfile.TarInfo('Dockerfile') + dfinfo.size = len(fileobject.getvalue()) + fileobject.seek(0) + else: + dfinfo = t.gettarinfo(fileobj=fileobject, arcname='Dockerfile') + + t.addfile(dfinfo, fileobject) + t.close() + f.seek(0) + return f + + +def compose_auth_header(auth: Union[MutableMapping, str, bytes], + registry_addr: str=None) -> str: + """ + Validate and compose base64-encoded authentication header + with an optional support for parsing legacy-style "user:password" + strings. + + Args: + auth: Authentication information + registry_addr: An address of the registry server + + Returns: + A base64-encoded X-Registry-Auth header value + """ + if isinstance(auth, Mapping): + # Validate the JSON format only. + if 'identitytoken' in auth: + pass + elif 'auth' in auth: + return compose_auth_header(auth['auth'], registry_addr) + else: + if registry_addr: + auth['serveraddress'] = registry_addr + auth_json = json.dumps(auth).encode('utf-8') + auth = base64.b64encode(auth_json).decode('ascii') + elif isinstance(auth, (str, bytes)): + # Parse simple "username:password"-formatted strings + # and attach the server address specified. + if isinstance(auth, bytes): + auth = auth.decode('utf-8') + s = base64.b64decode(auth) + username, passwd = s.split(b':', 1) + config = { + "username": username.decode('utf-8'), + "password": passwd.decode('utf-8'), + "email": None, + "serveraddress": registry_addr, + } + auth_json = json.dumps(config).encode('utf-8') + auth = base64.b64encode(auth_json).decode('ascii') + else: + raise TypeError( + "auth must be base64 encoded string/bytes or a dictionary") + return auth + + +def parse_repository_tag(repo_name): + parts = repo_name.rsplit('@', 1) + if len(parts) == 2: + return tuple(parts) + parts = repo_name.rsplit(':', 1) + if len(parts) == 2 and '/' not in parts[1]: + return tuple(parts) + return repo_name, None + + +def parse_bytes(s): + if isinstance(s, (int, float)): + return s + if len(s) == 0: + return 0 + + if s[-2:-1].isalpha() and s[-1].isalpha(): + if s[-1] == "b" or s[-1] == "B": + s = s[:-1] + units = BYTE_UNITS + suffix = s[-1].lower() + + # Check if the variable is a string representation of an int + # without a units part. Assuming that the units are bytes. + if suffix.isdigit(): + digits_part = s + suffix = 'b' + else: + digits_part = s[:-1] + + if suffix in units.keys() or suffix.isdigit(): + try: + digits = int(digits_part) + except ValueError: + raise errors.DockerException( + 'Failed converting the string value for memory ({0}) to' + ' an integer.'.format(digits_part) + ) + + # Reconvert to long for the final result + s = int(digits * units[suffix]) + else: + raise errors.DockerException( + 'The specified value for memory ({0}) should specify the' + ' units. The postfix should be one of the `b` `k` `m` `g`' + ' characters'.format(s) + ) + + return s + + +def parse_devices(devices): + device_list = [] + for device in devices: + if isinstance(device, dict): + device_list.append(device) + continue + if not isinstance(device, str): + raise errors.DockerException( + 'Invalid device type {0}'.format(type(device)) + ) + device_mapping = device.split(':') + if device_mapping: + path_on_host = device_mapping[0] + if len(device_mapping) > 1: + path_in_container = device_mapping[1] + else: + path_in_container = path_on_host + if len(device_mapping) > 2: + permissions = device_mapping[2] + else: + permissions = 'rwm' + device_list.append({ + 'PathOnHost': path_on_host, + 'PathInContainer': path_in_container, + 'CgroupPermissions': permissions + }) + return device_list + + +def _convert_port_binding(binding): + result = {'HostIp': '', 'HostPort': ''} + if isinstance(binding, tuple): + if len(binding) == 2: + result['HostPort'] = binding[1] + result['HostIp'] = binding[0] + elif isinstance(binding[0], str): + result['HostIp'] = binding[0] + else: + result['HostPort'] = binding[0] + elif isinstance(binding, dict): + if 'HostPort' in binding: + result['HostPort'] = binding['HostPort'] + if 'HostIp' in binding: + result['HostIp'] = binding['HostIp'] + else: + raise ValueError(binding) + else: + result['HostPort'] = binding + + if result['HostPort'] is None: + result['HostPort'] = '' + else: + result['HostPort'] = str(result['HostPort']) + + return result + + +def convert_port_bindings(port_bindings): + result = {} + for k, v in port_bindings.items(): + key = str(k) + if '/' not in key: + key += '/tcp' + if isinstance(v, list): + result[key] = [_convert_port_binding(binding) for binding in v] + else: + result[key] = [_convert_port_binding(v)] + return result + + +def convert_volume_binds(binds): + if isinstance(binds, list): + return binds + + result = [] + for k, v in binds.items(): + if isinstance(k, bytes): + k = k.decode('utf-8') + + if isinstance(v, dict): + if 'ro' in v and 'mode' in v: + raise ValueError( + 'Binding cannot contain both "ro" and "mode": {}' + .format(repr(v)) + ) + + bind = v['bind'] + if isinstance(bind, bytes): + bind = bind.decode('utf-8') + + if 'ro' in v: + mode = 'ro' if v['ro'] else 'rw' + elif 'mode' in v: + mode = v['mode'] + else: + mode = 'rw' + + result.append('{0}:{1}:{2}').format(k, bind, mode) + else: + if isinstance(v, bytes): + v = v.decode('utf-8') + result.append('{0}:{1}:rw').format(k, v) + return result + + +def convert_tmpfs_mounts(tmpfs): + if isinstance(tmpfs, dict): + return tmpfs + + if not isinstance(tmpfs, list): + raise ValueError( + 'Expected tmpfs value to be either a list or a dict, found: {}' + .format(type(tmpfs).__name__) + ) + + result = {} + for mount in tmpfs: + if isinstance(mount, str): + if ":" in mount: + name, options = mount.split(":", 1) + else: + name = mount + options = "" + + else: + raise ValueError( + "Expected item in tmpfs list to be a string, found: {}" + .format(type(mount).__name__) + ) + + result[name] = options + return result + + +def normalize_links(links): + if isinstance(links, dict): + links = links.items() + + return ['{0}:{1}'.format(k, v) for k, v in sorted(links)] + + +def split_command(command): + return shlex.split(command) + + +def format_environment(environment): + def format_env(key, value): + if value is None: + return key + if isinstance(value, bytes): + value = value.decode('utf-8') + + return u'{key}={value}'.format(key=key, value=value) + return [format_env(*var) for var in environment.items()] + + +def format_extra_hosts(extra_hosts, task=False): + # Use format dictated by Swarm API if container is part of a task + if task: + return [ + '{} {}'.format(v, k) for k, v in sorted(extra_hosts.items()) + ] + + return [ + '{}:{}'.format(k, v) for k, v in sorted(extra_hosts.items) + ] diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle deleted file mode 100644 index 024c29d8cd6a5d31ada21fc6d054e70ba3775314..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1291553 zcmcG%>5p7Vk|)+v#Y*OVF?rVYx~dO$cLiBI>gbx?8S(657msW*YpQ27gWSl7$c!N4 z>NrSd0V7Bsc6t_YX(ep#X9)|<0{dkJL0AaFepw070j(rR`k=o-fB*sd5A-v0H-B#L zz3_Yy%pRbpm@nS@nYp>Sxw*Ny`M-bapT7NrKl%avpV=Jto2~BaD~;~U)}YtzG`gcB zvwnNrYS-@tkN(Ng&;Em>zdpKb7VEv*XwYcXTZ1EW^J=r#Xx0IttNlUmd80NO zUahuzc-ESQnnH9&Z{5^Na{@}S= z=)G(V2CX`%{PmG}XV@5x`$vz>V&nCw(H*vW-Qm$+p%Le#(XU5WTHR4&VE;ZcXM!JI zwi>T$y?W!wEL7Xoz3LF}9htNC4RA0TwDzu4hlkx-t9R5e^X+Q)U|c{Jiht!f7xHa6C<F2;g$Lx z{(a@{v)yN--k^c@ul5fQNPC96!_i?Im~$f3#pbBf-W@d?oyJk~oY_qO-0k&8+$p44 zOX#)Qpv6y)%xBw&qh_z0++{^dZf!qEYV8*A6ZAx*p5P}Z`vmHjKrGGCsDEUZ4qBt; zcyD(+q~4mx^Gdt52c?rA29192sQHmuaWC$TMs~` z8`O!@(;wfyf9L*}cOcchUi}c-@*3TG`s15lUcd9{ts}EMgjVlW2k8&Lym|A+y(68y4M?zuF&sC zX03}RAK;r_*F9MsHmZYKbFcT>K4=Wh2EE&CwEMf%LES1k+bnx+Wp!SqA@O<_O2>*q zLv+nP!p~#-48yaTm=*uw6?}1I&ilVfA^25h=g7PlSNXY4wL7l1k7#Hu;NOtK0gMAv zX@1oFcmL|B`Ipsi{)m3w0Hs!UzehtaWQJaA{v|XfdD3kp?H2xDps7i3KXIe;3e@FM zYglWvVG0_(@sO#W2@K3`r`qqgx(94YFOb3xzq-ms@=Bl8{Yr0ea1{pRG}>$R`DgN$ zdvaxfZpK&wlfx^m-ql9;D!Oy8-N0vmb2J|BnJqWUL1!0cs@|f0JNidvQ$6qWs~EQb z$jrk=j_@k={$&jQ&R(N#NAR%OXpHd2*;>0-eX-ldPSJgpxf84DdJ#PN^o6pSResA2Z zlfDg){xPKQ%>921v(;|f$A5j){Qa+P{a=6a`=g(Z{=K1Dt~dG(%n6utfN7R$y>=U| zW*bTdbRYJARBwT)e*)u2^Dg=pFTRal#-o3D^c0UW~LjjFP zBiIvZ{^!lVGjpWH@K~DvY`llL6EhYjdwE68;orbzOU29iaXYucM$TIQM`TDq@ z{;^lBy+B{nFJp6hR6Sq|*8I=LdqcC>#vB^7y7|wB=G2IKjbwmkEbWca4^Xof=5T2` zG1&a)+{^{~VYdYjAouHKv*vsb3@soB~+Xzcq7UU_5A%8Z1`3wPp{`X^Q{5 zar0j{|93$BuQ4ck>EGg||8cz6{BNzYh{P^8{|Eh``M+fT{y#E*{|mGHpbI+)%gfd- z&=HXM|6+WUX_8s_7{e%qmj?d7F<@!n|8v|+|2j3kF9m8Fac@j zwPT>AomWb{hwgp;3oDOI50~iRb9Z{3&bZsEv0f*&Ci$W5Y&2+asFeNos?F^ z(-NWV979Ef@|zOxO%_TSwcoro#^PYWp8vyE4NJR5W&nJD3=gFwa^~YI)2U#EfmMX1 zBhq+v3PO%Fz%1JN zuh(rK;@w5e>H_=xpHmB)|N8k6{)<%~kJlq}rOev?Dlc$(>1$4rYkl<12*c$+DIwc` zgZHd#8xyB1>Vs6%;_nfjHCyrbCRpndV|nud{Fz!~$ZE?zdv+ZEZ^yK|q}Gz(1Ujs? zj*e*UzdeS6Xzl-0;yovcxVRuzn&RUx5z>Dbo3Dtie?zX=8V1cS^b(G{RsX++`|-`` zAwn+r0V@Ejv*elCM~E8*kJjCo!wP_XVC%vC6};MY9z46F-tM5m@hU=Cp+SOYE^r0! zctgg7dWxVJR!3d-n#u9}qZ+=xY)<#~I3R{U;P3xnY~HT*`~1t&0PBn0;bCX5hn17W z;4I4U3cY3Kp4nf$JNYjW$HGdzJ8JE>5HWa+_PHq2Hr&L{5Korn$*lnvbdPC80M~b5 zeIl$X;Y72G_L%%qcpU#q7<=nEd(6+*-}=E1(C*WnUVTiq#Q)wuB);A6&ws?Pxi(SU zD*eqp9FFR?p;#Lqq){FruuyAJ41(fuBxd;wvWEwbCvyn6(KJU=oIhxE*-RWg{}OcJ z^-CDA-K$k8(zLO=GpKfl*7w^U^j;srrau2AUdEdL&@I>S$h?CQh;YK+UFEL@t9m`7+K$ZZnJCn-yA)^i&vlDV_f`21RNEt{aN&5nhns1aD0${ zue#Mg&FT~ZAtARq91Jl_ZUsUMIRpUjV;Uhf`L^5o-CetuguDHZkM^KIZ9B(2{|BJ# z9&Yv!gKS|*u*-N@+P4UOfpl@#4GFV^o~396Eu87sjW8-wtJazg>g(q7$G?8^!Sg?- zZeN57ePuO=elqiw${wPn(EQ70&ISjuuDyQcizD-Ei6hWTIt)Erzlzx3pgK6b3Kt5C z_MweveO&9*EpkPkdvNnd6@+cf=}HAISFH)1ACQoT_fa;1W44{`ZQwnx66j4wka#(#h+wAMT=J87uqd~+h?9I#Ma zs=yf^jwp~eRMdX;X4Fzpp|nt`bXr4_yP{KOd`8BOKnfMKr`xO0zQjPQ^&7S)1r