summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.travis.yml1
-rw-r--r--CONTRIBUTING.md34
-rw-r--r--Dockerfile1
-rw-r--r--Dockerfile-docs5
-rw-r--r--Dockerfile-py31
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile20
-rw-r--r--README.md79
-rw-r--r--docker/__init__.py5
-rw-r--r--docker/api/__init__.py10
-rw-r--r--docker/api/build.py83
-rw-r--r--docker/api/client.py430
-rw-r--r--docker/api/container.py886
-rw-r--r--docker/api/daemon.py89
-rw-r--r--docker/api/exec_api.py62
-rw-r--r--docker/api/image.py256
-rw-r--r--docker/api/network.py99
-rw-r--r--docker/api/service.py131
-rw-r--r--docker/api/swarm.py207
-rw-r--r--docker/api/volume.py85
-rw-r--r--docker/auth.py (renamed from docker/auth/auth.py)2
-rw-r--r--docker/auth/__init__.py8
-rw-r--r--docker/client.py549
-rw-r--r--docker/constants.py1
-rw-r--r--docker/errors.py93
-rw-r--r--docker/models/__init__.py0
-rw-r--r--docker/models/containers.py883
-rw-r--r--docker/models/images.py269
-rw-r--r--docker/models/networks.py181
-rw-r--r--docker/models/nodes.py88
-rw-r--r--docker/models/resource.py90
-rw-r--r--docker/models/services.py240
-rw-r--r--docker/models/swarm.py146
-rw-r--r--docker/models/volumes.py84
-rw-r--r--docker/ssladapter/__init__.py1
-rw-r--r--docker/tls.py21
-rw-r--r--docker/transport/__init__.py1
-rw-r--r--docker/transport/ssladapter.py (renamed from docker/ssladapter/ssladapter.py)0
-rw-r--r--docker/types/__init__.py4
-rw-r--r--docker/types/containers.py490
-rw-r--r--docker/types/healthcheck.py53
-rw-r--r--docker/types/networks.py104
-rw-r--r--docker/types/services.py126
-rw-r--r--docker/utils/__init__.py10
-rw-r--r--docker/utils/json_stream.py79
-rw-r--r--docker/utils/ports.py (renamed from docker/utils/ports/ports.py)0
-rw-r--r--docker/utils/ports/__init__.py4
-rw-r--r--docker/utils/socket.py10
-rw-r--r--docker/utils/types.py7
-rw-r--r--docker/utils/utils.py599
-rw-r--r--docker/version.py2
-rw-r--r--docs-requirements.txt3
-rw-r--r--docs/_static/custom.css3
-rw-r--r--docs/_templates/page.html2
-rw-r--r--docs/api.md1237
-rw-r--r--docs/api.rst116
-rw-r--r--docs/change-log.md (renamed from docs/change_log.md)2
-rw-r--r--docs/client.rst30
-rw-r--r--docs/conf.py365
-rw-r--r--docs/containers.rst51
-rw-r--r--docs/contributing.md36
-rw-r--r--docs/host-devices.md29
-rw-r--r--docs/hostconfig.md141
-rw-r--r--docs/images.rst39
-rw-r--r--docs/index.md15
-rw-r--r--docs/index.rst93
-rw-r--r--docs/machine.md26
-rw-r--r--docs/networks.md177
-rw-r--r--docs/networks.rst33
-rw-r--r--docs/nodes.rst30
-rw-r--r--docs/port-bindings.md58
-rw-r--r--docs/services.md268
-rw-r--r--docs/services.rst36
-rw-r--r--docs/swarm.md274
-rw-r--r--docs/swarm.rst24
-rw-r--r--docs/tls.md86
-rw-r--r--docs/tls.rst37
-rw-r--r--docs/tmpfs.md33
-rw-r--r--docs/user_guides/swarm_services.md65
-rw-r--r--docs/volumes.md34
-rw-r--r--docs/volumes.rst31
-rw-r--r--mkdocs.yml21
-rw-r--r--setup.py22
-rw-r--r--tests/base.py36
-rw-r--r--tests/helpers.py27
-rw-r--r--tests/integration/api_build_test.py (renamed from tests/integration/build_test.py)8
-rw-r--r--tests/integration/api_client_test.py (renamed from tests/integration/api_test.py)27
-rw-r--r--tests/integration/api_container_test.py (renamed from tests/integration/container_test.py)68
-rw-r--r--tests/integration/api_exec_test.py (renamed from tests/integration/exec_test.py)4
-rw-r--r--tests/integration/api_healthcheck_test.py50
-rw-r--r--tests/integration/api_image_test.py (renamed from tests/integration/image_test.py)12
-rw-r--r--tests/integration/api_network_test.py (renamed from tests/integration/network_test.py)37
-rw-r--r--tests/integration/api_service_test.py (renamed from tests/integration/service_test.py)4
-rw-r--r--tests/integration/api_swarm_test.py (renamed from tests/integration/swarm_test.py)4
-rw-r--r--tests/integration/api_volume_test.py (renamed from tests/integration/volume_test.py)4
-rw-r--r--tests/integration/base.py38
-rw-r--r--tests/integration/client_test.py20
-rw-r--r--tests/integration/conftest.py3
-rw-r--r--tests/integration/errors_test.py4
-rw-r--r--tests/integration/models_containers_test.py204
-rw-r--r--tests/integration/models_images_test.py67
-rw-r--r--tests/integration/models_networks_test.py64
-rw-r--r--tests/integration/models_nodes_test.py34
-rw-r--r--tests/integration/models_resources_test.py16
-rw-r--r--tests/integration/models_services_test.py100
-rw-r--r--tests/integration/models_swarm_test.py22
-rw-r--r--tests/integration/models_volumes_test.py30
-rw-r--r--tests/integration/regression_test.py4
-rw-r--r--tests/unit/api_build_test.py (renamed from tests/unit/build_test.py)9
-rw-r--r--tests/unit/api_container_test.py (renamed from tests/unit/container_test.py)76
-rw-r--r--tests/unit/api_exec_test.py (renamed from tests/unit/exec_test.py)4
-rw-r--r--tests/unit/api_image_test.py (renamed from tests/unit/image_test.py)10
-rw-r--r--tests/unit/api_network_test.py (renamed from tests/unit/network_test.py)24
-rw-r--r--tests/unit/api_test.py93
-rw-r--r--tests/unit/api_volume_test.py (renamed from tests/unit/volume_test.py)4
-rw-r--r--tests/unit/auth_test.py17
-rw-r--r--tests/unit/client_test.py132
-rw-r--r--tests/unit/dockertypes_test.py255
-rw-r--r--tests/unit/errors_test.py22
-rw-r--r--tests/unit/fake_api.py103
-rw-r--r--tests/unit/fake_api_client.py61
-rw-r--r--tests/unit/models_containers_test.py465
-rw-r--r--tests/unit/models_images_test.py102
-rw-r--r--tests/unit/models_networks_test.py64
-rw-r--r--tests/unit/models_resources_test.py14
-rw-r--r--tests/unit/models_services_test.py52
-rw-r--r--tests/unit/ssladapter_test.py12
-rw-r--r--tests/unit/swarm_test.py4
-rw-r--r--tests/unit/utils_json_stream_test.py62
-rw-r--r--tests/unit/utils_test.py348
-rw-r--r--tox.ini2
132 files changed, 8736 insertions, 4140 deletions
diff --git a/.gitignore b/.gitignore
index 34ccd38..e626dc6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,7 +10,7 @@ dist
html/*
# Compiled Documentation
-site/
+_build/
README.rst
env/
diff --git a/.travis.yml b/.travis.yml
index fb62a34..6b48142 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,7 +3,6 @@ language: python
python:
- "3.5"
env:
- - TOX_ENV=py26
- TOX_ENV=py27
- TOX_ENV=py33
- TOX_ENV=py34
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1bd8d42..dbc1c02 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,5 +1,8 @@
# Contributing guidelines
+See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
+The following is specific to docker-py.
+
Thank you for your interest in the project. We look forward to your
contribution. In order to make the process as fast and streamlined as possible,
here is a set of guidelines we recommend you follow.
@@ -100,3 +103,34 @@ here are the steps to get you started.
5. Run `python setup.py develop` to install the dev version of the project
and required dependencies. We recommend you do so inside a
[virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs)
+
+## Running the tests & Code Quality
+
+To get the source source code and run the unit tests, run:
+```
+$ git clone git://github.com/docker/docker-py.git
+$ cd docker-py
+$ pip install tox
+$ tox
+```
+
+## Building the docs
+
+```
+$ make docs
+$ open _build/index.html
+```
+
+## Release Checklist
+
+Before a new release, please go through the following checklist:
+
+* Bump version in docker/version.py
+* Add a release note in docs/change_log.md
+* Git tag the version
+* Upload to pypi
+
+## Vulnerability Reporting
+For any security issues, please do NOT file an issue or pull request on github!
+Please contact [security@docker.com](mailto:security@docker.com) or read [the
+Docker security page](https://www.docker.com/resources/security/).
diff --git a/Dockerfile b/Dockerfile
index 012a125..993ac01 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,4 @@
FROM python:2.7
-MAINTAINER Joffrey F <joffrey@docker.com>
RUN mkdir /home/docker-py
WORKDIR /home/docker-py
diff --git a/Dockerfile-docs b/Dockerfile-docs
index 1103ffd..705649f 100644
--- a/Dockerfile-docs
+++ b/Dockerfile-docs
@@ -1,8 +1,11 @@
-FROM python:2.7
+FROM python:3.5
RUN mkdir /home/docker-py
WORKDIR /home/docker-py
+COPY requirements.txt /home/docker-py/requirements.txt
+RUN pip install -r requirements.txt
+
COPY docs-requirements.txt /home/docker-py/docs-requirements.txt
RUN pip install -r docs-requirements.txt
diff --git a/Dockerfile-py3 b/Dockerfile-py3
index 21e713b..c746651 100644
--- a/Dockerfile-py3
+++ b/Dockerfile-py3
@@ -1,5 +1,4 @@
FROM python:3.5
-MAINTAINER Joffrey F <joffrey@docker.com>
RUN mkdir /home/docker-py
WORKDIR /home/docker-py
diff --git a/MAINTAINERS b/MAINTAINERS
index ed93c01..1f46236 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12,6 +12,7 @@
[Org."Core maintainers"]
people = [
"aanand",
+ "bfirsh",
"dnephin",
"mnowster",
"mpetazzoni",
@@ -31,6 +32,11 @@
Email = "aanand@docker.com"
GitHub = "aanand"
+ [people.bfirsh]
+ Name = "Ben Firshman"
+ Email = "b@fir.sh"
+ GitHub = "bfirsh"
+
[people.dnephin]
Name = "Daniel Nephin"
Email = "dnephin@gmail.com"
diff --git a/Makefile b/Makefile
index b997722..425fffd 100644
--- a/Makefile
+++ b/Makefile
@@ -27,28 +27,28 @@ test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
.PHONY: unit-test
unit-test: build
- docker run docker-py py.test tests/unit
+ docker run --rm docker-py py.test tests/unit
.PHONY: unit-test-py3
unit-test-py3: build-py3
- docker run docker-py3 py.test tests/unit
+ docker run --rm docker-py3 py.test tests/unit
.PHONY: integration-test
integration-test: build
- docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py py.test tests/integration/${file}
+ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock docker-py py.test tests/integration/${file}
.PHONY: integration-test-py3
integration-test-py3: build-py3
- docker run -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration/${file}
+ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock docker-py3 py.test tests/integration/${file}
.PHONY: integration-dind
integration-dind: build build-py3
docker rm -vf dpy-dind || :
docker run -d --name dpy-dind --privileged dockerswarm/dind:1.12.0 docker daemon\
-H tcp://0.0.0.0:2375
- docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py\
+ docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py\
py.test tests/integration
- docker run --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3\
+ docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-py3\
py.test tests/integration
docker rm -vf dpy-dind
@@ -60,21 +60,21 @@ integration-dind-ssl: build-dind-certs build build-py3
-v /tmp --privileged dockerswarm/dind:1.12.0 docker daemon --tlsverify\
--tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
--tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375
- docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
+ docker run --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
--link=dpy-dind-ssl:docker docker-py py.test tests/integration
- docker run --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
+ docker run --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
--link=dpy-dind-ssl:docker docker-py3 py.test tests/integration
docker rm -vf dpy-dind-ssl dpy-dind-certs
.PHONY: flake8
flake8: build
- docker run docker-py flake8 docker tests
+ docker run --rm docker-py flake8 docker tests
.PHONY: docs
docs: build-docs
- docker run -v `pwd`/docs:/home/docker-py/docs/ -p 8000:8000 docker-py-docs mkdocs serve -a 0.0.0.0:8000
+ docker run --rm -it -v `pwd`:/home/docker-py docker-py-docs sphinx-build docs ./_build
.PHONY: shell
shell: build
diff --git a/README.md b/README.md
index 876ed02..11fcbad 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,75 @@
-docker-py
-=========
+# Docker SDK for Python
-[![Build Status](https://travis-ci.org/docker/docker-py.png)](https://travis-ci.org/docker/docker-py)
+[![Build Status](https://travis-ci.org/docker/docker-py.svg?branch=master)](https://travis-ci.org/docker/docker-py)
-A Python library for the Docker Remote API. It does everything the `docker` command does, but from within Python – run containers, manage them, pull/push images, etc.
+**Warning:** This readme is for the development version of docker-py, which is significantly different to the stable version. [Documentation for the stable version is here.](https://docker-py.readthedocs.io/)
-Installation
-------------
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
-The latest stable version is always available on PyPi.
+## Installation
+
+The latest stable version [is available on PyPi](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker-py
-Documentation
--------------
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+... print line.strip()
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
-[![Documentation Status](https://readthedocs.org/projects/docker-py/badge/?version=latest)](https://readthedocs.org/projects/docker-py/?badge=latest)
+You can manage images:
-[Read the full documentation here](https://docker-py.readthedocs.io/en/latest/).
-The source is available in the `docs/` directory.
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
-License
--------
-Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
diff --git a/docker/__init__.py b/docker/__init__.py
index 0f4c8ec..96a9ef0 100644
--- a/docker/__init__.py
+++ b/docker/__init__.py
@@ -1,6 +1,7 @@
+# flake8: noqa
+from .api import APIClient
+from .client import DockerClient, from_env
from .version import version, version_info
__version__ = version
__title__ = 'docker-py'
-
-from .client import Client, AutoVersionClient, from_env # flake8: noqa
diff --git a/docker/api/__init__.py b/docker/api/__init__.py
index bc7e93c..ff51844 100644
--- a/docker/api/__init__.py
+++ b/docker/api/__init__.py
@@ -1,10 +1,2 @@
# flake8: noqa
-from .build import BuildApiMixin
-from .container import ContainerApiMixin
-from .daemon import DaemonApiMixin
-from .exec_api import ExecApiMixin
-from .image import ImageApiMixin
-from .network import NetworkApiMixin
-from .service import ServiceApiMixin
-from .swarm import SwarmApiMixin
-from .volume import VolumeApiMixin
+from .client import APIClient
diff --git a/docker/api/build.py b/docker/api/build.py
index 68aa962..7cf4e0f 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -19,6 +19,89 @@ class BuildApiMixin(object):
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None):
+ """
+ Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+ needs to be set. ``path`` can be a local path (to a directory
+ containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+ readable file-like object to a Dockerfile.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ Example:
+ >>> from io import BytesIO
+ >>> from docker import APIClient
+ >>> dockerfile = '''
+ ... # Shared Volume
+ ... FROM busybox:buildroot-2014.02
+ ... VOLUME /data
+ ... CMD ["/bin/sh"]
+ ... '''
+ >>> f = BytesIO(dockerfile.encode('utf-8'))
+ >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+ >>> response = [line for line in cli.build(
+ ... fileobj=f, rm=True, tag='yourname/volume'
+ ... )]
+ >>> response
+ ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+ '{"stream":"Step 1 : VOLUME /data\\n"}',
+ '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+ '{"stream":" ---\\u003e 713bca62012e\\n"}',
+ '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+ '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+ '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+ '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+ '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+ '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ stream (bool): *Deprecated for API version > 1.8 (always True)*.
+ Return a blocking generator you can iterate over to retrieve
+ build output as it happens
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``.
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB.
+ labels (dict): A dictionary of labels to set on the image.
+
+ Returns:
+ A generator for the build output.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
remote = context = None
headers = {}
container_limits = container_limits or {}
diff --git a/docker/api/client.py b/docker/api/client.py
new file mode 100644
index 0000000..0b4d161
--- /dev/null
+++ b/docker/api/client.py
@@ -0,0 +1,430 @@
+import json
+import struct
+import warnings
+from functools import partial
+
+import requests
+import requests.exceptions
+import six
+import websocket
+
+from .build import BuildApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+from .. import auth
+from ..constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT,
+ IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION,
+ STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
+ MINIMUM_DOCKER_API_VERSION)
+from ..errors import (DockerException, TLSParameterError,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport import SSLAdapter, UnixAdapter
+from ..utils import utils, check_resource, update_headers
+from ..utils.socket import frames_iter
+try:
+ from ..transport import NpipeAdapter
+except ImportError:
+ pass
+
+
+class APIClient(
+ requests.Session,
+ BuildApiMixin,
+ ContainerApiMixin,
+ DaemonApiMixin,
+ ExecApiMixin,
+ ImageApiMixin,
+ NetworkApiMixin,
+ ServiceApiMixin,
+ SwarmApiMixin,
+ VolumeApiMixin):
+ """
+ A low-level client for the Docker Remote API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.24',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2016-09-27T23:38:15.810178467+00:00',
+ u'Experimental': True,
+ u'GitCommit': u'45bed2c',
+ u'GoVersion': u'go1.6.3',
+ u'KernelVersion': u'4.4.22-moby',
+ u'Os': u'linux',
+ u'Version': u'1.12.2-rc1'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.24``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
+ super(APIClient, self).__init__()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._auth_configs = auth.load_config()
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://localunixsocket'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeAdapter(
+ base_url, timeout, pool_connections=num_pools
+ )
+ except NameError:
+ raise DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLAdapter(pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None:
+ self._version = DEFAULT_DOCKER_API_VERSION
+ elif isinstance(version, six.string_types):
+ if version.lower() == 'auto':
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ else:
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ warnings.warn(
+ 'The minimum API version supported is {}, but you are using '
+ 'version {}. It is recommended you either upgrade Docker '
+ 'Engine or use an older version of docker-py.'.format(
+ MINIMUM_DOCKER_API_VERSION, self._version)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, six.string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise create_api_error_from_http_exception(e)
+
+ def _result(self, response, json=False, binary=False):
+ assert not (json and binary)
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None:
+ for k, v in six.iteritems(data):
+ if v is not None:
+ data2[k] = v
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ @check_resource
+ def _attach_websocket(self, container, params=None):
+ url = self._url("/containers/{0}/attach/ws", container)
+ req = requests.Request("POST", url, params=self._attach_params(params))
+ full_url = req.prepare().url
+ full_url = full_url.replace("http://", "ws://", 1)
+ full_url = full_url.replace("https://", "wss://", 1)
+ return self._create_websocket_connection(full_url)
+
+ def _create_websocket_connection(self, url):
+ return websocket.create_connection(url)
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif six.PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+ if response.raw._fp.chunked:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ if decode:
+ if six.PY3:
+ data = data.decode('utf-8')
+ # remove the trailing newline
+ data = data.strip()
+ # split the data at any newlines
+ data_list = data.split("\r\n")
+ # load and yield each line seperately
+ for data in data_list:
+ data = json.loads(data)
+ yield data
+ else:
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ walker = 0
+ while True:
+ if len(buf[walker:]) < 8:
+ break
+ _, length = struct.unpack_from('>BxxxL', buf[walker:])
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ _, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result_old(self, response):
+ ''' Stream raw output for API versions below 1.6 '''
+ self._raise_for_status(response)
+ for line in response.iter_lines(chunk_size=1,
+ decode_unicode=True):
+ # filter out keep-alive new lines
+ if line:
+ yield line
+
+ def _stream_raw_result(self, response):
+ ''' Stream result for TTY-enabled container above API 1.6 '''
+ self._raise_for_status(response)
+ for out in response.iter_content(chunk_size=1, decode_unicode=True):
+ yield out
+
+ def _read_from_socket(self, response, stream):
+ socket = self._get_raw_response_socket(response)
+
+ if stream:
+ return frames_iter(socket)
+ else:
+ return six.binary_type().join(frames_iter(socket))
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ def _get_result(self, container, stream, res):
+ cont = self.inspect_container(container)
+ return self._get_result_tty(stream, res, cont['Config']['Tty'])
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # Stream multi-plexing was only introduced in API v1.6. Anything
+ # before that needs old-style streaming.
+ if utils.compare_version('1.6', self._version) < 0:
+ return self._stream_raw_result_old(res)
+
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = six.binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ [x for x in self._multiplexed_buffer_helper(res)]
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except requests.exceptions.InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
diff --git a/docker/api/container.py b/docker/api/container.py
index d71d17a..afe696c 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -4,13 +4,39 @@ from datetime import datetime
from .. import errors
from .. import utils
-from ..utils.utils import create_networking_config, create_endpoint_config
+from ..types import (
+ ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig
+)
class ContainerApiMixin(object):
@utils.check_resource
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
+ """
+ Attach to a container.
+
+ The ``.logs()`` function is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ container (str): The container to attach to.
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
@@ -30,6 +56,20 @@ class ContainerApiMixin(object):
@utils.check_resource
def attach_socket(self, container, params=None, ws=False):
+ """
+ Like ``attach``, but returns the underlying socket-like object for the
+ HTTP request.
+
+ Args:
+ container (str): The container to attach to.
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if params is None:
params = {
'stdout': 1,
@@ -56,6 +96,26 @@ class ContainerApiMixin(object):
@utils.check_resource
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ container (str): The image hash of the container
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Remote API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'container': container,
'repo': repository,
@@ -71,6 +131,50 @@ class ContainerApiMixin(object):
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ quiet (bool): Only display numeric Ids
+ all (bool): Show all containers. Only running containers are shown
+ by default trunc (bool): Truncate output
+ latest (bool): Show only the latest created container, include
+ non-running ones.
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ size (bool): Display sizes
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ A list of dicts, one per container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
@@ -93,14 +197,32 @@ class ContainerApiMixin(object):
@utils.check_resource
def copy(self, container, resource):
+ """
+ Identical to the ``docker cp`` command. Get files/folders from the
+ container.
+
+ **Deprecated for API version >= 1.20.** Use
+ :py:meth:`~ContainerApiMixin.get_archive` instead.
+
+ Args:
+ container (str): The container to copy from
+ resource (str): The path within the container
+
+ Returns:
+ The contents of the file as a string
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if utils.version_gte(self._version, '1.20'):
warnings.warn(
- 'Client.copy() is deprecated for API version >= 1.20, '
+ 'APIClient.copy() is deprecated for API version >= 1.20, '
'please use get_archive() instead',
DeprecationWarning
)
res = self._post_json(
- self._url("/containers/{0}/copy".format(container)),
+ self._url("/containers/{0}/copy", container),
data={"Resource": resource},
stream=True
)
@@ -115,8 +237,192 @@ class ContainerApiMixin(object):
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None,
- stop_signal=None, networking_config=None):
+ stop_signal=None, networking_config=None,
+ healthcheck=None):
+ """
+ Creates a container. Parameters are similar to those for the ``docker
+ run`` command except it doesn't support the attach options (``-a``).
+
+ The arguments that are passed directly to this function are
+ host-independent configuration options. Host-specific configuration
+ is passed with the `host_config` argument. You'll normally want to
+ use this method in combination with the :py:meth:`create_host_config`
+ method to generate ``host_config``.
+
+ **Port bindings**
+
+ Port binding is done in two parts: first, provide a list of ports to
+ open inside the container with the ``ports`` parameter, then declare
+ bindings with the ``host_config`` parameter. For example:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[1111, 2222],
+ host_config=cli.create_host_config(port_bindings={
+ 1111: 4567,
+ 2222: None
+ })
+ )
+
+
+ You can limit the host address on which the port will be exposed like
+ such:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
+
+ Or without host port assignment:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
+
+ If you wish to use UDP instead of TCP (default), you need to declare
+ ports as such in both the config and host config:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
+ host_config=cli.create_host_config(port_bindings={
+ '1111/udp': 4567, 2222: None
+ })
+ )
+
+ To bind multiple host ports to a single container port, use the
+ following syntax:
+
+ .. code-block:: python
+
+ cli.create_host_config(port_bindings={
+ 1111: [1234, 4567]
+ })
+
+ You can also bind multiple IPs to a single container port:
+
+ .. code-block:: python
+ cli.create_host_config(port_bindings={
+ 1111: [
+ ('192.168.0.100', 1234),
+ ('192.168.0.101', 1234)
+ ]
+ })
+
+ **Using volumes**
+
+ Volume declaration is done in two parts. Provide a list of mountpoints
+ to the with the ``volumes`` parameter, and declare mappings in the
+ ``host_config`` section.
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds={
+ '/home/user1/': {
+ 'bind': '/mnt/vol2',
+ 'mode': 'rw',
+ },
+ '/var/www': {
+ 'bind': '/mnt/vol1',
+ 'mode': 'ro',
+ }
+ })
+ )
+
+ You can alternatively specify binds as a list. This code is equivalent
+ to the example above:
+
+ .. code-block:: python
+
+ container_id = cli.create_container(
+ 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
+ host_config=cli.create_host_config(binds=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ])
+ )
+
+ **Networking**
+
+ You can specify networks to connect the container to by using the
+ ``networking_config`` parameter. At the time of creation, you can
+ only connect a container to a single networking, but you
+ can create more connections by using
+ :py:meth:`~connect_container_to_network`.
+
+ For example:
+
+ .. code-block:: python
+
+ networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config(
+ ipv4_address='172.28.0.124',
+ aliases=['foo', 'bar'],
+ links=['container2']
+ )
+ })
+
+ ctnr = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ Args:
+ image (str): The image to run
+ command (str or list): The command to be run in the container
+ hostname (str): Optional hostname for the container
+ user (str or int): Username or UID
+ detach (bool): Detached mode: run container in the background and
+ return container ID
+ stdin_open (bool): Keep STDIN open even if not attached
+ tty (bool): Allocate a pseudo-TTY
+ mem_limit (float or str): Memory limit. Accepts float values (which
+ represent the memory limit of the created container in bytes)
+ or a string with a units identification char (``100000b``,
+ ``1000k``, ``128m``, ``1g``). If a string is specified without
+ a units character, bytes are assumed as an intended unit.
+ ports (list of ints): A list of port numbers
+ environment (dict or list): A dictionary or a list of strings in
+ the following format ``["PASSWORD=xxx"]`` or
+ ``{"PASSWORD": "xxx"}``.
+ dns (list): DNS name servers. Deprecated since API version 1.10.
+ Use ``host_config`` instead.
+ dns_opt (list): Additional options to be added to the container's
+ ``resolv.conf`` file
+ volumes (str or list):
+ volumes_from (list): List of container names or Ids to get
+ volumes from.
+ network_disabled (bool): Disable networking
+ name (str): A name for the container
+ entrypoint (str or list): An entrypoint
+ working_dir (str): Path to the working directory
+ domainname (str or list): Set custom DNS search domains
+ memswap_limit (int):
+ host_config (dict): A dictionary created with
+ :py:meth:`create_host_config`.
+ mac_address (str): The Mac Address to assign the container
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ volume_driver (str): The name of a volume driver/plugin.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ networking_config (dict): A networking configuration generated
+ by :py:meth:`create_networking_config`.
+
+ Returns:
+ A dictionary with an image 'Id' key and a 'Warnings' key.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
@@ -126,16 +432,16 @@ class ContainerApiMixin(object):
)
config = self.create_container_config(
- image, command, hostname, user, detach, stdin_open,
- tty, mem_limit, ports, environment, dns, volumes, volumes_from,
+ image, command, hostname, user, detach, stdin_open, tty, mem_limit,
+ ports, dns, environment, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
- volume_driver, stop_signal, networking_config,
+ volume_driver, stop_signal, networking_config, healthcheck,
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
- return utils.create_container_config(self._version, *args, **kwargs)
+ return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
@@ -146,6 +452,130 @@ class ContainerApiMixin(object):
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
+ """
+ Create a dictionary for the ``host_config`` argument to
+ :py:meth:`create_container`.
+
+ Args:
+ binds (dict): Volumes to bind. See :py:meth:`create_container`
+ for more information.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_group (int): The length of a CPU period in microseconds.
+ cpu_period (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (list): Expose host devices to the container, as a list
+ of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (list): Set custom DNS servers.
+ dns_search (list): DNS search domains.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (list): List of additional group names and/or IDs that
+ the container process will run as.
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ lxc_conf (dict): LXC config.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ port_bindings (dict): See :py:meth:`create_container`
+ for more information.
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+ security_opt (list): A list of string values to customize labels
+ for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ ulimits (list): Ulimits to set inside the container, as a list of
+ dicts.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volumes_from (list): List of container names or IDs to get
+ volumes from.
+
+
+ Returns:
+ (dict) A dictionary which can be passed to the ``host_config``
+ argument to :py:meth:`create_container`.
+
+ Example:
+
+ >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
+ volumes_from=['nostalgic_newton'])
+ {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
+ 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
+
+"""
if not kwargs:
kwargs = {}
if 'version' in kwargs:
@@ -154,22 +584,101 @@ class ContainerApiMixin(object):
"keyword argument 'version'"
)
kwargs['version'] = self._version
- return utils.create_host_config(*args, **kwargs)
+ return HostConfig(*args, **kwargs)
def create_networking_config(self, *args, **kwargs):
- return create_networking_config(*args, **kwargs)
+ """
+ Create a networking config dictionary to be used as the
+ ``networking_config`` parameter in :py:meth:`create_container`.
+
+ Args:
+ endpoints_config (dict): A dictionary mapping network names to
+ endpoint configurations generated by
+ :py:meth:`create_endpoint_config`.
+
+ Returns:
+ (dict) A networking config.
+
+ Example:
+
+ >>> docker_client.create_network('network1')
+ >>> networking_config = docker_client.create_networking_config({
+ 'network1': docker_client.create_endpoint_config()
+ })
+ >>> container = docker_client.create_container(
+ img, command, networking_config=networking_config
+ )
+
+ """
+ return NetworkingConfig(*args, **kwargs)
def create_endpoint_config(self, *args, **kwargs):
- return create_endpoint_config(self._version, *args, **kwargs)
+ """
+ Create an endpoint config dictionary to be used with
+ :py:meth:`create_networking_config`.
+
+ Args:
+ aliases (list): A list of aliases for this endpoint. Names in
+ that list can be used within the network to reach the
+ container. Defaults to ``None``.
+ links (list): A list of links for this endpoint. Containers
+ declared in this list will be linked to this container.
+ Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (list): A list of link-local (IPv4/IPv6)
+ addresses.
+
+ Returns:
+ (dict) An endpoint config.
+
+ Example:
+
+ >>> endpoint_config = client.create_endpoint_config(
+ aliases=['web', 'app'],
+ links=['app_db'],
+ ipv4_address='132.65.0.123'
+ )
+
+ """
+ return EndpointConfig(self._version, *args, **kwargs)
@utils.check_resource
def diff(self, container):
+ """
+ Inspect changes on a container's filesystem.
+
+ Args:
+ container (str): The container to diff
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
@utils.check_resource
def export(self, container):
+ """
+ Export the contents of a filesystem as a tar archive.
+
+ Args:
+ container (str): The container to export
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
@@ -179,6 +688,22 @@ class ContainerApiMixin(object):
@utils.check_resource
@utils.minimum_version('1.20')
def get_archive(self, container, path):
+ """
+ Retrieve a file or folder from a container in the form of a tar
+ archive.
+
+ Args:
+ container (str): The container where the file is located
+ path (str): Path to the file or folder to retrieve
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'path': path
}
@@ -193,12 +718,37 @@ class ContainerApiMixin(object):
@utils.check_resource
def inspect_container(self, container):
+ """
+ Identical to the `docker inspect` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of `docker inspect`, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
@utils.check_resource
def kill(self, container, signal=None):
+ """
+ Kill a container or send a signal to a container.
+
+ Args:
+ container (str): The container to kill
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
@@ -212,6 +762,32 @@ class ContainerApiMixin(object):
@utils.check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all', since=None, follow=None):
+ """
+ Get logs from a container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ container (str): The container to get logs from
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+
+ Returns:
+ (generator or str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if utils.compare_version('1.11', self._version) >= 0:
if follow is None:
follow = stream
@@ -248,12 +824,48 @@ class ContainerApiMixin(object):
@utils.check_resource
def pause(self, container):
+ """
+ Pauses all processes within a container.
+
+ Args:
+ container (str): The container to pause
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.check_resource
def port(self, container, private_port):
+ """
+ Lookup the public-facing port that is NAT-ed to ``private_port``.
+ Identical to the ``docker port`` command.
+
+ Args:
+ container (str): The container to look up
+ private_port (int): The private port to inspect
+
+ Returns:
+ (list of dict): The mapping for the host ports
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ .. code-block:: bash
+
+ $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
+ 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
+
+ .. code-block:: python
+
+ >>> cli.port('7174d6347063', 80)
+ [{'HostIp': '0.0.0.0', 'HostPort': '80'}]
+ """
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
@@ -278,6 +890,26 @@ class ContainerApiMixin(object):
@utils.check_resource
@utils.minimum_version('1.20')
def put_archive(self, container, path, data):
+ """
+ Insert a file or folder in an existing container using a tar archive as
+ source.
+
+ Args:
+ container (str): The container where the file(s) will be extracted
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
@@ -286,6 +918,21 @@ class ContainerApiMixin(object):
@utils.check_resource
def remove_container(self, container, v=False, link=False, force=False):
+ """
+ Remove a container. Similar to the ``docker rm`` command.
+
+ Args:
+ container (str): The container to remove
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
@@ -295,6 +942,17 @@ class ContainerApiMixin(object):
@utils.minimum_version('1.17')
@utils.check_resource
def rename(self, container, name):
+ """
+ Rename a container. Similar to the ``docker rename`` command.
+
+ Args:
+ container (str): ID of the container to rename
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
@@ -302,6 +960,18 @@ class ContainerApiMixin(object):
@utils.check_resource
def resize(self, container, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ container (str or dict): The container to resize
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
@@ -309,82 +979,82 @@ class ContainerApiMixin(object):
@utils.check_resource
def restart(self, container, timeout=10):
+ """
+ Restart a container. Similar to the ``docker restart`` command.
+
+ Args:
+ container (str or dict): The container to restart. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
- def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=None, links=None, privileged=None,
- dns=None, dns_search=None, volumes_from=None, network_mode=None,
- restart_policy=None, cap_add=None, cap_drop=None, devices=None,
- extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None):
-
- if utils.compare_version('1.10', self._version) < 0:
- if dns is not None:
- raise errors.InvalidVersion(
- 'dns is only supported for API version >= 1.10'
- )
- if volumes_from is not None:
- raise errors.InvalidVersion(
- 'volumes_from is only supported for API version >= 1.10'
- )
+ def start(self, container, *args, **kwargs):
+ """
+ Start a container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
- if utils.compare_version('1.15', self._version) < 0:
- if security_opt is not None:
- raise errors.InvalidVersion(
- 'security_opt is only supported for API version >= 1.15'
- )
- if ipc_mode:
- raise errors.InvalidVersion(
- 'ipc_mode is only supported for API version >= 1.15'
- )
+ **Deprecation warning:** Passing configuration options in ``start`` is
+ no longer supported. Users are expected to provide host config options
+ in the ``host_config`` parameter of
+ :py:meth:`~ContainerApiMixin.create_container`.
- if utils.compare_version('1.17', self._version) < 0:
- if read_only is not None:
- raise errors.InvalidVersion(
- 'read_only is only supported for API version >= 1.17'
- )
- if pid_mode is not None:
- raise errors.InvalidVersion(
- 'pid_mode is only supported for API version >= 1.17'
- )
- if utils.compare_version('1.18', self._version) < 0:
- if ulimits is not None:
- raise errors.InvalidVersion(
- 'ulimits is only supported for API version >= 1.18'
- )
+ Args:
+ container (str): The container to start
- start_config_kwargs = dict(
- binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
- publish_all_ports=publish_all_ports, links=links, dns=dns,
- privileged=privileged, dns_search=dns_search, cap_add=cap_add,
- cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
- network_mode=network_mode, restart_policy=restart_policy,
- extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
- ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
- )
- start_config = None
-
- if any(v is not None for v in start_config_kwargs.values()):
- if utils.compare_version('1.15', self._version) > 0:
- warnings.warn(
- 'Passing host config parameters in start() is deprecated. '
- 'Please use host_config in create_container instead!',
- DeprecationWarning
- )
- start_config = self.create_host_config(**start_config_kwargs)
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ :py:class:`docker.errors.DeprecatedMethod`
+ If any argument besides ``container`` are provided.
+
+ Example:
+ >>> container = cli.create_container(
+ ... image='busybox:latest',
+ ... command='/bin/sleep 30')
+ >>> cli.start(container=container.get('Id'))
+ """
+ if args or kwargs:
+ raise errors.DeprecatedMethod(
+ 'Providing configuration in the start() method is no longer '
+ 'supported. Use the host_config param in create_container '
+ 'instead.'
+ )
url = self._url("/containers/{0}/start", container)
- res = self._post_json(url, data=start_config)
+ res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource
def stats(self, container, decode=None, stream=True):
+ """
+ Stream statistics for a specific container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ container (str): The container to stream statistics from
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
@@ -395,6 +1065,18 @@ class ContainerApiMixin(object):
@utils.check_resource
def stop(self, container, timeout=10):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ container (str): The container to stop
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
@@ -404,6 +1086,20 @@ class ContainerApiMixin(object):
@utils.check_resource
def top(self, container, ps_args=None):
+ """
+ Display the running processes of a container.
+
+ Args:
+ container (str): The container to inspect
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
@@ -412,6 +1108,12 @@ class ContainerApiMixin(object):
@utils.check_resource
def unpause(self, container):
+ """
+ Unpause all processes within a container.
+
+ Args:
+ container (str): The container to unpause
+ """
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@@ -424,6 +1126,31 @@ class ContainerApiMixin(object):
mem_reservation=None, memswap_limit=None, kernel_memory=None,
restart_policy=None
):
+ """
+ Update resource configs of one or more containers.
+
+ Args:
+ container (str): The container to inspect
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
@@ -459,6 +1186,25 @@ class ContainerApiMixin(object):
@utils.check_resource
def wait(self, container, timeout=None):
+ """
+ Block until a container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ container (str or dict): The container to wait on. If a dict, the
+ ``Id`` key is used.
+ timeout (int): Request timeout
+
+ Returns:
+ (int): The exit code of the container. Returns ``-1`` if the API
+ responds without a ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index 9ebe73c..d40631f 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -2,13 +2,42 @@ import os
import warnings
from datetime import datetime
-from ..auth import auth
+from .. import auth, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from ..utils import utils
class DaemonApiMixin(object):
def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ (generator): A blocking generator you can iterate over to retrieve
+ events as they happen.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events()
+ ... print event
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+ """
+
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
@@ -30,10 +59,42 @@ class DaemonApiMixin(object):
)
def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether refresh existing authentication on the
+ Docker server.
+ dockercfg_path (str): Use a custom path for the ``.dockercfg`` file
+ (default ``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
@@ -69,8 +130,30 @@ class DaemonApiMixin(object):
return self._result(response, json=True)
def ping(self):
- return self._result(self._get(self._url('/_ping')))
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index 6e49996..694b30a 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -9,6 +9,28 @@ class ExecApiMixin(object):
@utils.check_resource
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user=''):
+ """
+ Sets up an exec instance in a running container.
+
+ Args:
+ container (str): Target container where exec instance will be
+ created
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+
+ Returns:
+ (dict): A dictionary with an exec ``Id`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
if privileged and utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'Privileged exec is not supported in API < 1.19'
@@ -37,6 +59,19 @@ class ExecApiMixin(object):
@utils.minimum_version('1.16')
def exec_inspect(self, exec_id):
+ """
+ Return low-level information about an exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+
+ Returns:
+ (dict): Dictionary of values returned by the endpoint.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
@@ -44,6 +79,15 @@ class ExecApiMixin(object):
@utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None):
+ """
+ Resize the tty session used by the specified exec command.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ height (int): Height of tty session
+ width (int): Width of tty session
+ """
+
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -55,6 +99,24 @@ class ExecApiMixin(object):
@utils.minimum_version('1.15')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
+ """
+ Start a previously set up exec instance.
+
+ Args:
+ exec_id (str): ID of the exec instance
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Stream response data. Default: False
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
# we want opened socket if socket == True
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
diff --git a/docker/api/image.py b/docker/api/image.py
index 262910c..c1ebc69 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -1,12 +1,11 @@
import logging
import os
-import six
import warnings
-from ..auth import auth
+import six
+
+from .. import auth, errors, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from .. import utils
-from .. import errors
log = logging.getLogger(__name__)
@@ -15,17 +14,71 @@ class ImageApiMixin(object):
@utils.check_resource
def get_image(self, image):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Args:
+ image (str): Image name to get
+
+ Returns:
+ (urllib3.response.HTTPResponse object): The response from the
+ daemon.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get_image("fedora:latest")
+ >>> f = open('/tmp/fedora-latest.tar', 'w')
+ >>> f.write(image.data)
+ >>> f.close()
+ """
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource
def history(self, image):
+ """
+ Show the history of an image.
+
+ Args:
+ image (str): The image to show history for
+
+ Returns:
+ (str): The history of the image
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
+ """
+ List images. Similar to the ``docker images`` command.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ quiet (bool): Only return numeric IDs as a list.
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (dict or list): A list if ``quiet=True``, otherwise a dict.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
@@ -45,6 +98,25 @@ class ImageApiMixin(object):
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
+ """
+ Import an image. Similar to the ``docker import`` command.
+
+ If ``src`` is a string or unicode string, it will first be treated as a
+ path to a tarball on the local system. If there is an error reading
+ from that file, ``src`` will be treated as a URL instead to fetch the
+ image from. You can also pass an open file handle as ``src``, in which
+ case the data will be read from that file.
+
+ If ``src`` is unset but ``image`` is set, the ``image`` parameter will
+ be taken as the name of an existing image to import from.
+
+ Args:
+ src (str or file): Path to tarfile, URL, or file-like object
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ image (str): Use another image like the ``FROM`` Dockerfile
+ parameter
+ """
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
@@ -78,6 +150,16 @@ class ImageApiMixin(object):
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
+ allows importing in-memory bytes data.
+
+ Args:
+ data (bytes collection): Bytes collection containing valid tar data
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
+
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
@@ -91,6 +173,19 @@ class ImageApiMixin(object):
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a tar file on disk.
+
+ Args:
+ filename (str): Full path to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+
+ Raises:
+ IOError: File does not exist.
+ """
+
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
@@ -104,12 +199,31 @@ class ImageApiMixin(object):
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from a URL.
+
+ Args:
+ url (str): A URL pointing to a tar file.
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
+ """
+ Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
+ supports importing from another image, like the ``FROM`` Dockerfile
+ parameter.
+
+ Args:
+ image (str): Image name to import from
+ repository (str): The repository to create
+ tag (str): The tag to apply
+ """
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@@ -129,16 +243,75 @@ class ImageApiMixin(object):
@utils.check_resource
def inspect_image(self, image):
+ """
+ Get detailed information about an image. Similar to the ``docker
+ inspect`` command, but only for containers.
+
+ Args:
+ container (str): The container to inspect
+
+ Returns:
+ (dict): Similar to the output of ``docker inspect``, but as a
+ single dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
+ save``). Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+ """
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
+ """
+ Pulls an image. Similar to the ``docker pull`` command.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ stream (bool): Stream the output as a generator
+ insecure_registry (bool): Use an insecure registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (generator or str): The output
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for line in cli.pull('busybox', stream=True):
+ ... print(json.dumps(json.loads(line), indent=4))
+ {
+ "status": "Pulling image (latest) from busybox",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+ {
+ "status": "Pulling image (latest) from busybox, endpoint: ...",
+ "progressDetail": {},
+ "id": "e72ac664f4f0"
+ }
+
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
@@ -178,6 +351,38 @@ class ImageApiMixin(object):
def push(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
+ """
+ Push an image or a repository to the registry. Similar to the ``docker
+ push`` command.
+
+ Args:
+ repository (str): The repository to push to
+ tag (str): An optional tag to push
+ stream (bool): Stream the output as a blocking generator
+ insecure_registry (bool): Use ``http://`` to connect to the
+ registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (generator or str): The output from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ >>> for line in cli.push('yourname/app', stream=True):
+ ... print line
+ {"status":"Pushing repository yourname/app (1 tags)"}
+ {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
+ {"status":"Image already pushed, skipping","progressDetail":{},
+ "id":"511136ea3c5a"}
+ ...
+
+ """
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
@@ -215,11 +420,33 @@ class ImageApiMixin(object):
@utils.check_resource
def remove_image(self, image, force=False, noprune=False):
+ """
+ Remove an image. Similar to the ``docker rmi`` command.
+
+ Args:
+ image (str): The image to remove
+ force (bool): Force removal of the image
+ noprune (bool): Do not delete untagged parents
+ """
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
def search(self, term):
+ """
+ Search for images on Docker Hub. Similar to the ``docker search``
+ command.
+
+ Args:
+ term (str): A term to search for.
+
+ Returns:
+ (list of dicts): The response of the search.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
@@ -227,6 +454,27 @@ class ImageApiMixin(object):
@utils.check_resource
def tag(self, image, repository, tag=None, force=False):
+ """
+ Tag an image into a repository. Similar to the ``docker tag`` command.
+
+ Args:
+ image (str): The image to tag
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Returns:
+ (bool): ``True`` if successful
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
+ force=True)
+ """
params = {
'tag': tag,
'repo': repository,
diff --git a/docker/api/network.py b/docker/api/network.py
index 0ee0dab..33da7ea 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -8,6 +8,21 @@ from ..utils import version_lt
class NetworkApiMixin(object):
@minimum_version('1.21')
def networks(self, names=None, ids=None):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (list): List of names to filter by
+ ids (list): List of ids to filter by
+
+ Returns:
+ (dict): List of network objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
filters = {}
if names:
filters['name'] = names
@@ -24,6 +39,50 @@ class NetworkApiMixin(object):
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (IPAMConfig): Optional custom IP scheme for the network.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``True``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+
+ Returns:
+ (dict): The created network reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.create_network("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> docker_client.create_network("network1", driver="bridge",
+ ipam=ipam_config)
+ """
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -63,12 +122,24 @@ class NetworkApiMixin(object):
@minimum_version('1.21')
def remove_network(self, net_id):
+ """
+ Remove a network. Similar to the ``docker network rm`` command.
+
+ Args:
+ net_id (str): The network's id
+ """
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
def inspect_network(self, net_id):
+ """
+ Get detailed information about a network.
+
+ Args:
+ net_id (str): ID of network
+ """
url = self._url("/networks/{0}", net_id)
res = self._get(url)
return self._result(res, json=True)
@@ -79,6 +150,24 @@ class NetworkApiMixin(object):
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
+ """
+ Connect a container to a network.
+
+ Args:
+ container (str): container-id/name to be connected to the network
+ net_id (str): network id
+ aliases (list): A list of aliases for this endpoint. Names in that
+ list can be used within the network to reach the container.
+ Defaults to ``None``.
+ links (list): A list of links for this endpoint. Containers
+ declared in this list will be linkedto this container.
+ Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
+ """
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
@@ -95,6 +184,16 @@ class NetworkApiMixin(object):
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
+ """
+ Disconnect a container from a network.
+
+ Args:
+ container (str): container ID or name to be disconnected from the
+ network
+ net_id (str): network ID
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+ """
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
diff --git a/docker/api/service.py b/docker/api/service.py
index 2e41b7c..7708b75 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -1,8 +1,5 @@
import warnings
-
-from .. import errors
-from .. import utils
-from ..auth import auth
+from .. import auth, errors, utils
class ServiceApiMixin(object):
@@ -12,6 +9,32 @@ class ServiceApiMixin(object):
update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None
):
+ """
+ Create a service.
+
+ Args:
+ task_template (dict): Specification of the task to start as part
+ of the new service.
+ name (string): User-defined name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (string): Scheduling mode for the service (``replicated`` or
+ ``global``). Defaults to ``replicated``.
+ update_config (dict): Specification for the update strategy of the
+ service. Default: ``None``
+ networks (list): List of network names or IDs to attach the
+ service to. Default: ``None``.
+ endpoint_config (dict): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ A dictionary containing an ``ID`` key for the newly created
+ service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if endpoint_config is not None:
warnings.warn(
'endpoint_config has been renamed to endpoint_spec.',
@@ -46,18 +69,58 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
@utils.check_resource
def inspect_service(self, service):
+ """
+ Return information about a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/services/{0}', service)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource
def inspect_task(self, task):
+ """
+ Retrieve information about a task.
+
+ Args:
+ task (str): Task ID
+
+ Returns:
+ (dict): Information about the task.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource
def remove_service(self, service):
+ """
+ Stop and remove a service.
+
+ Args:
+ service (str): Service name or ID
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
@@ -65,6 +128,20 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def services(self, filters=None):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id`` and ``name``. Default: ``None``.
+
+ Returns:
+ A list of dictionaries containing data about each service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -73,6 +150,22 @@ class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def tasks(self, filters=None):
+ """
+ Retrieve a list of tasks.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``service``, ``node``,
+ ``label`` and ``desired-state``.
+
+ Returns:
+ (list): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -85,7 +178,37 @@ class ServiceApiMixin(object):
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
endpoint_spec=None):
+ """
+ Update a service.
+
+ Args:
+ service (string): A service identifier (either its name or service
+ ID).
+ version (int): The version number of the service object being
+ updated. This is required to avoid conflicting writes.
+ task_template (dict): Specification of the updated task to start
+ as part of the service. See the [TaskTemplate
+ class](#TaskTemplate) for details.
+ name (string): New name for the service. Optional.
+ labels (dict): A map of labels to associate with the service.
+ Optional.
+ mode (string): Scheduling mode for the service (``replicated`` or
+ ``global``). Defaults to ``replicated``.
+ update_config (dict): Specification for the update strategy of the
+ service. See the [UpdateConfig class](#UpdateConfig) for
+ details. Default: ``None``.
+ networks (list): List of network names or IDs to attach the
+ service to. Default: ``None``.
+ endpoint_config (dict): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+
+ Returns:
+ ``True`` if successful.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
if endpoint_config is not None:
warnings.warn(
'endpoint_config has been renamed to endpoint_spec.',
diff --git a/docker/api/swarm.py b/docker/api/swarm.py
index 2fc8774..6a1b752 100644
--- a/docker/api/swarm.py
+++ b/docker/api/swarm.py
@@ -1,5 +1,6 @@
import logging
from six.moves import http_client
+from .. import types
from .. import utils
log = logging.getLogger(__name__)
@@ -7,11 +8,87 @@ log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
- return utils.SwarmSpec(*args, **kwargs)
+ """
+ Create a ``docker.types.SwarmSpec`` instance that can be used as the
+ ``swarm_spec`` argument in
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
+
+ Args:
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+
+ Returns:
+ ``docker.types.SwarmSpec`` instance.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> spec = client.create_swarm_spec(
+ snapshot_interval=5000, log_entries_for_slow_followers=1200
+ )
+ >>> client.init_swarm(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, swarm_spec=spec
+ )
+ """
+ return types.SwarmSpec(*args, **kwargs)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
+ """
+ Initialize a new Swarm using the current connected engine as the first
+ node.
+
+ Args:
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ ``advertise_addr`` is not specified, it will be automatically
+ detected when possible. Default: None
+ listen_addr (string): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: '0.0.0.0:2377'
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ swarm_spec (dict): Configuration settings of the new Swarm. Use
+ ``APIClient.create_swarm_spec`` to generate a valid
+ configuration. Default: None
+
+ Returns:
+ ``True`` if successful.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
@@ -27,18 +104,67 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def inspect_swarm(self):
+ """
+ Retrieve low-level information about the current swarm.
+
+ Returns:
+ A dictionary containing data about the swarm.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
+ """
+ Retrieve low-level information about a swarm node
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A dictionary containing data about this node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
advertise_addr=None):
+ """
+ Make this Engine join a swarm that has already been created.
+
+ Args:
+ remote_addrs (list): Addresses of one or more manager nodes already
+ participating in the Swarm to join.
+ join_token (string): Secret token for joining this Swarm.
+ listen_addr (string): Listen address used for inter-manager
+ communication if the node gets promoted to manager, as well as
+ determining the networking interface used for the VXLAN Tunnel
+ Endpoint (VTEP). Default: ``None``
+ advertise_addr (string): Externally reachable address advertised
+ to other nodes. This can either be an address/port combination
+ in the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used. If
+ AdvertiseAddr is not specified, it will be automatically
+ detected when possible. Default: ``None``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
@@ -52,6 +178,20 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
+ """
+ Leave a swarm.
+
+ Args:
+ force (bool): Leave the swarm even if this node is a manager.
+ Default: ``False``
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
@@ -62,6 +202,21 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def nodes(self, filters=None):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of dictionaries containing data about each swarm node.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
url = self._url('/nodes')
params = {}
if filters:
@@ -71,6 +226,34 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
+ """
+ Update the Node's configuration
+
+ Args:
+
+ version (int): The version number of the node object being
+ updated. This is required to avoid conflicting writes.
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
+ node_spec=node_spec)
+
+ """
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
@@ -79,6 +262,28 @@ class SwarmApiMixin(object):
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
+ """
+ Update the Swarm's configuration
+
+ Args:
+ version (int): The version number of the swarm object being
+ updated. This is required to avoid conflicting writes.
+ swarm_spec (dict): Configuration settings to update. Use
+ :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
+ generate a valid configuration. Default: ``None``.
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
diff --git a/docker/api/volume.py b/docker/api/volume.py
index afc72cb..9c6d5f8 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -5,6 +5,32 @@ from .. import utils
class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def volumes(self, filters=None):
+ """
+ List volumes currently registered by the docker daemon. Similar to the
+ ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (dict): Dictionary with list of volume objects as value of the
+ ``Volumes`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.volumes()
+ {u'Volumes': [{u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'},
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
+ u'Name': u'baz'}]}
+ """
+
params = {
'filters': utils.convert_filters(filters) if filters else None
}
@@ -13,6 +39,34 @@ class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def create_volume(self, name, driver=None, driver_opts=None, labels=None):
+ """
+ Create and register a named volume
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (dict): The created volume reference object
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = cli.create_volume(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+ >>> print(volume)
+ {u'Driver': u'local',
+ u'Labels': {u'key': u'value'},
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
@@ -36,11 +90,42 @@ class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def inspect_volume(self, name):
+ """
+ Retrieve volume info by name.
+
+ Args:
+ name (str): volume name
+
+ Returns:
+ (dict): Volume information dictionary
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> cli.inspect_volume('foobar')
+ {u'Driver': u'local',
+ u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
+ u'Name': u'foobar'}
+
+ """
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.21')
def remove_volume(self, name):
+ """
+ Remove a volume. Similar to the ``docker volume rm`` command.
+
+ Args:
+ name (str): The volume's name
+
+ Raises:
+
+ ``docker.errors.APIError``: If volume failed to remove.
+ """
url = self._url('/volumes/{0}', name)
resp = self._delete(url)
self._raise_for_status(resp)
diff --git a/docker/auth/auth.py b/docker/auth.py
index dc0baea..0a2eda1 100644
--- a/docker/auth/auth.py
+++ b/docker/auth.py
@@ -6,7 +6,7 @@ import os
import dockerpycreds
import six
-from .. import errors
+from . import errors
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
diff --git a/docker/auth/__init__.py b/docker/auth/__init__.py
deleted file mode 100644
index 50127fa..0000000
--- a/docker/auth/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .auth import (
- INDEX_NAME,
- INDEX_URL,
- encode_header,
- load_config,
- resolve_authconfig,
- resolve_repository_name,
-) # flake8: noqa
diff --git a/docker/client.py b/docker/client.py
index aec78c8..b271eb7 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -1,408 +1,167 @@
-import json
-import struct
-from functools import partial
-
-import requests
-import requests.exceptions
-import six
-import websocket
-
-
-from . import api
-from . import constants
-from . import errors
-from .auth import auth
-from .ssladapter import ssladapter
-from .tls import TLSConfig
-from .transport import UnixAdapter
-from .utils import utils, check_resource, update_headers, kwargs_from_env
-from .utils.socket import frames_iter
-try:
- from .transport import NpipeAdapter
-except ImportError:
- pass
-
-
-def from_env(**kwargs):
- return Client.from_env(**kwargs)
-
-
-class Client(
- requests.Session,
- api.BuildApiMixin,
- api.ContainerApiMixin,
- api.DaemonApiMixin,
- api.ExecApiMixin,
- api.ImageApiMixin,
- api.NetworkApiMixin,
- api.ServiceApiMixin,
- api.SwarmApiMixin,
- api.VolumeApiMixin):
- def __init__(self, base_url=None, version=None,
- timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=constants.DEFAULT_USER_AGENT,
- num_pools=constants.DEFAULT_NUM_POOLS):
- super(Client, self).__init__()
-
- if tls and not base_url:
- raise errors.TLSParameterError(
- 'If using TLS, the base_url argument must be provided.'
- )
-
- self.base_url = base_url
- self.timeout = timeout
- self.headers['User-Agent'] = user_agent
-
- self._auth_configs = auth.load_config()
-
- base_url = utils.parse_host(
- base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
- )
- if base_url.startswith('http+unix://'):
- self._custom_adapter = UnixAdapter(
- base_url, timeout, pool_connections=num_pools
- )
- self.mount('http+docker://', self._custom_adapter)
- self._unmount('http://', 'https://')
- self.base_url = 'http+docker://localunixsocket'
- elif base_url.startswith('npipe://'):
- if not constants.IS_WINDOWS_PLATFORM:
- raise errors.DockerException(
- 'The npipe:// protocol is only supported on Windows'
- )
- try:
- self._custom_adapter = NpipeAdapter(
- base_url, timeout, pool_connections=num_pools
- )
- except NameError:
- raise errors.DockerException(
- 'Install pypiwin32 package to enable npipe:// support'
- )
- self.mount('http+docker://', self._custom_adapter)
- self.base_url = 'http+docker://localnpipe'
- else:
- # Use SSLAdapter for the ability to specify SSL version
- if isinstance(tls, TLSConfig):
- tls.configure_client(self)
- elif tls:
- self._custom_adapter = ssladapter.SSLAdapter(
- pool_connections=num_pools
- )
- self.mount('https://', self._custom_adapter)
- self.base_url = base_url
-
- # version detection needs to be after unix adapter mounting
- if version is None:
- self._version = constants.DEFAULT_DOCKER_API_VERSION
- elif isinstance(version, six.string_types):
- if version.lower() == 'auto':
- self._version = self._retrieve_server_version()
- else:
- self._version = version
- else:
- raise errors.DockerException(
- 'Version parameter must be a string or None. Found {0}'.format(
- type(version).__name__
- )
- )
+from .api.client import APIClient
+from .models.containers import ContainerCollection
+from .models.images import ImageCollection
+from .models.networks import NetworkCollection
+from .models.nodes import NodeCollection
+from .models.services import ServiceCollection
+from .models.swarm import Swarm
+from .models.volumes import VolumeCollection
+from .utils import kwargs_from_env
+
+
+class DockerClient(object):
+ """
+ A client for communicating with a Docker server.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.24``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ """
+ def __init__(self, *args, **kwargs):
+ self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
+ """
+ Return a client configured from environment variables.
+
+ The environment variables used are the same as those used by the
+ Docker command-line client. They are:
+
+ .. envvar:: DOCKER_HOST
+
+ The URL to the Docker host.
+
+ .. envvar:: DOCKER_TLS_VERIFY
+
+ Verify the host against a CA certificate.
+
+ .. envvar:: DOCKER_CERT_PATH
+
+ A path to a directory containing TLS certificates to use when
+ connecting to the Docker host.
+
+ Args:
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.24``
+ timeout (int): Default timeout for API calls, in seconds.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+ environment (dict): The environment to read environment variables
+ from. Default: the value of ``os.environ``
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.from_env()
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
timeout = kwargs.pop('timeout', None)
version = kwargs.pop('version', None)
return cls(timeout=timeout, version=version,
**kwargs_from_env(**kwargs))
- def _retrieve_server_version(self):
- try:
- return self.version(api_version=False)["ApiVersion"]
- except KeyError:
- raise errors.DockerException(
- 'Invalid response from docker daemon: key "ApiVersion"'
- ' is missing.'
- )
- except Exception as e:
- raise errors.DockerException(
- 'Error while fetching server API version: {0}'.format(e)
- )
-
- def _set_request_timeout(self, kwargs):
- """Prepare the kwargs for an HTTP request by inserting the timeout
- parameter, if not already present."""
- kwargs.setdefault('timeout', self.timeout)
- return kwargs
-
- @update_headers
- def _post(self, url, **kwargs):
- return self.post(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _get(self, url, **kwargs):
- return self.get(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _put(self, url, **kwargs):
- return self.put(url, **self._set_request_timeout(kwargs))
-
- @update_headers
- def _delete(self, url, **kwargs):
- return self.delete(url, **self._set_request_timeout(kwargs))
-
- def _url(self, pathfmt, *args, **kwargs):
- for arg in args:
- if not isinstance(arg, six.string_types):
- raise ValueError(
- 'Expected a string but found {0} ({1}) '
- 'instead'.format(arg, type(arg))
- )
-
- quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
- args = map(quote_f, args)
-
- if kwargs.get('versioned_api', True):
- return '{0}/v{1}{2}'.format(
- self.base_url, self._version, pathfmt.format(*args)
- )
- else:
- return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
-
- def _raise_for_status(self, response, explanation=None):
- """Raises stored :class:`APIError`, if one occurred."""
- try:
- response.raise_for_status()
- except requests.exceptions.HTTPError as e:
- if e.response.status_code == 404:
- raise errors.NotFound(e, response, explanation=explanation)
- raise errors.APIError(e, response, explanation=explanation)
-
- def _result(self, response, json=False, binary=False):
- assert not (json and binary)
- self._raise_for_status(response)
-
- if json:
- return response.json()
- if binary:
- return response.content
- return response.text
-
- def _post_json(self, url, data, **kwargs):
- # Go <1.1 can't unserialize null to a string
- # so we do this disgusting thing here.
- data2 = {}
- if data is not None:
- for k, v in six.iteritems(data):
- if v is not None:
- data2[k] = v
-
- if 'headers' not in kwargs:
- kwargs['headers'] = {}
- kwargs['headers']['Content-Type'] = 'application/json'
- return self._post(url, data=json.dumps(data2), **kwargs)
-
- def _attach_params(self, override=None):
- return override or {
- 'stdout': 1,
- 'stderr': 1,
- 'stream': 1
- }
-
- @check_resource
- def _attach_websocket(self, container, params=None):
- url = self._url("/containers/{0}/attach/ws", container)
- req = requests.Request("POST", url, params=self._attach_params(params))
- full_url = req.prepare().url
- full_url = full_url.replace("http://", "ws://", 1)
- full_url = full_url.replace("https://", "wss://", 1)
- return self._create_websocket_connection(full_url)
-
- def _create_websocket_connection(self, url):
- return websocket.create_connection(url)
-
- def _get_raw_response_socket(self, response):
- self._raise_for_status(response)
- if self.base_url == "http+docker://localnpipe":
- sock = response.raw._fp.fp.raw.sock
- elif six.PY3:
- sock = response.raw._fp.fp.raw
- if self.base_url.startswith("https://"):
- sock = sock._sock
- else:
- sock = response.raw._fp.fp._sock
- try:
- # Keep a reference to the response to stop it being garbage
- # collected. If the response is garbage collected, it will
- # close TLS sockets.
- sock._response = response
- except AttributeError:
- # UNIX sockets can't have attributes set on them, but that's
- # fine because we won't be doing TLS over them
- pass
-
- return sock
-
- def _stream_helper(self, response, decode=False):
- """Generator for data coming from a chunked-encoded HTTP response."""
- if response.raw._fp.chunked:
- reader = response.raw
- while not reader.closed:
- # this read call will block until we get a chunk
- data = reader.read(1)
- if not data:
- break
- if reader._fp.chunk_left:
- data += reader.read(reader._fp.chunk_left)
- if decode:
- if six.PY3:
- data = data.decode('utf-8')
- # remove the trailing newline
- data = data.strip()
- # split the data at any newlines
- data_list = data.split("\r\n")
- # load and yield each line seperately
- for data in data_list:
- data = json.loads(data)
- yield data
- else:
- yield data
- else:
- # Response isn't chunked, meaning we probably
- # encountered an error immediately
- yield self._result(response, json=decode)
-
- def _multiplexed_buffer_helper(self, response):
- """A generator of multiplexed data blocks read from a buffered
- response."""
- buf = self._result(response, binary=True)
- walker = 0
- while True:
- if len(buf[walker:]) < 8:
- break
- _, length = struct.unpack_from('>BxxxL', buf[walker:])
- start = walker + constants.STREAM_HEADER_SIZE_BYTES
- end = start + length
- walker = end
- yield buf[start:end]
-
- def _multiplexed_response_stream_helper(self, response):
- """A generator of multiplexed data blocks coming from a response
- stream."""
-
- # Disable timeout on the underlying socket to prevent
- # Read timed out(s) for long running processes
- socket = self._get_raw_response_socket(response)
- self._disable_socket_timeout(socket)
-
- while True:
- header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
- if not header:
- break
- _, length = struct.unpack('>BxxxL', header)
- if not length:
- continue
- data = response.raw.read(length)
- if not data:
- break
- yield data
-
- def _stream_raw_result_old(self, response):
- ''' Stream raw output for API versions below 1.6 '''
- self._raise_for_status(response)
- for line in response.iter_lines(chunk_size=1,
- decode_unicode=True):
- # filter out keep-alive new lines
- if line:
- yield line
-
- def _stream_raw_result(self, response):
- ''' Stream result for TTY-enabled container above API 1.6 '''
- self._raise_for_status(response)
- for out in response.iter_content(chunk_size=1, decode_unicode=True):
- yield out
-
- def _read_from_socket(self, response, stream):
- socket = self._get_raw_response_socket(response)
-
- if stream:
- return frames_iter(socket)
- else:
- return six.binary_type().join(frames_iter(socket))
-
- def _disable_socket_timeout(self, socket):
- """ Depending on the combination of python version and whether we're
- connecting over http or https, we might need to access _sock, which
- may or may not exist; or we may need to just settimeout on socket
- itself, which also may or may not have settimeout on it. To avoid
- missing the correct one, we try both.
-
- We also do not want to set the timeout if it is already disabled, as
- you run the risk of changing a socket that was non-blocking to
- blocking, for example when using gevent.
+ # Resources
+ @property
+ def containers(self):
"""
- sockets = [socket, getattr(socket, '_sock', None)]
-
- for s in sockets:
- if not hasattr(s, 'settimeout'):
- continue
-
- timeout = -1
-
- if hasattr(s, 'gettimeout'):
- timeout = s.gettimeout()
-
- # Don't change the timeout if it is already disabled.
- if timeout is None or timeout == 0.0:
- continue
-
- s.settimeout(None)
-
- def _get_result(self, container, stream, res):
- cont = self.inspect_container(container)
- return self._get_result_tty(stream, res, cont['Config']['Tty'])
-
- def _get_result_tty(self, stream, res, is_tty):
- # Stream multi-plexing was only introduced in API v1.6. Anything
- # before that needs old-style streaming.
- if utils.compare_version('1.6', self._version) < 0:
- return self._stream_raw_result_old(res)
-
- # We should also use raw streaming (without keep-alives)
- # if we're dealing with a tty-enabled container.
- if is_tty:
- return self._stream_raw_result(res) if stream else \
- self._result(res, binary=True)
-
- self._raise_for_status(res)
- sep = six.binary_type()
- if stream:
- return self._multiplexed_response_stream_helper(res)
- else:
- return sep.join(
- [x for x in self._multiplexed_buffer_helper(res)]
- )
-
- def _unmount(self, *args):
- for proto in args:
- self.adapters.pop(proto)
-
- def get_adapter(self, url):
- try:
- return super(Client, self).get_adapter(url)
- except requests.exceptions.InvalidSchema as e:
- if self._custom_adapter:
- return self._custom_adapter
- else:
- raise e
+ An object for managing containers on the server. See the
+ :doc:`containers documentation <containers>` for full details.
+ """
+ return ContainerCollection(client=self)
@property
- def api_version(self):
- return self._version
+ def images(self):
+ """
+ An object for managing images on the server. See the
+ :doc:`images documentation <images>` for full details.
+ """
+ return ImageCollection(client=self)
+ @property
+ def networks(self):
+ """
+ An object for managing networks on the server. See the
+ :doc:`networks documentation <networks>` for full details.
+ """
+ return NetworkCollection(client=self)
-class AutoVersionClient(Client):
- def __init__(self, *args, **kwargs):
- if 'version' in kwargs and kwargs['version']:
- raise errors.DockerException(
- 'Can not specify version for AutoVersionClient'
- )
- kwargs['version'] = 'auto'
- super(AutoVersionClient, self).__init__(*args, **kwargs)
+ @property
+ def nodes(self):
+ """
+ An object for managing nodes on the server. See the
+ :doc:`nodes documentation <nodes>` for full details.
+ """
+ return NodeCollection(client=self)
+
+ @property
+ def services(self):
+ """
+ An object for managing services on the server. See the
+ :doc:`services documentation <services>` for full details.
+ """
+ return ServiceCollection(client=self)
+
+ @property
+ def swarm(self):
+ """
+ An object for managing a swarm on the server. See the
+ :doc:`swarm documentation <swarm>` for full details.
+ """
+ return Swarm(client=self)
+
+ @property
+ def volumes(self):
+ """
+ An object for managing volumes on the server. See the
+ :doc:`volumes documentation <volumes>` for full details.
+ """
+ return VolumeCollection(client=self)
+
+ # Top-level methods
+ def events(self, *args, **kwargs):
+ return self.api.events(*args, **kwargs)
+ events.__doc__ = APIClient.events.__doc__
+
+ def info(self, *args, **kwargs):
+ return self.api.info(*args, **kwargs)
+ info.__doc__ = APIClient.info.__doc__
+
+ def login(self, *args, **kwargs):
+ return self.api.login(*args, **kwargs)
+ login.__doc__ = APIClient.login.__doc__
+
+ def ping(self, *args, **kwargs):
+ return self.api.ping(*args, **kwargs)
+ ping.__doc__ = APIClient.ping.__doc__
+
+ def version(self, *args, **kwargs):
+ return self.api.version(*args, **kwargs)
+ version.__doc__ = APIClient.version.__doc__
+
+ def __getattr__(self, name):
+ s = ["'DockerClient' object has no attribute '{}'".format(name)]
+ # If a user calls a method on APIClient, they
+ if hasattr(APIClient, name):
+ s.append("In docker-py 2.0, this method is now on the object "
+ "APIClient. See the low-level API section of the "
+ "documentation for more details.".format(name))
+ raise AttributeError(' '.join(s))
+
+
+from_env = DockerClient.from_env
diff --git a/docker/constants.py b/docker/constants.py
index 0c9a020..c3048cb 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -2,6 +2,7 @@ import sys
from .version import version
DEFAULT_DOCKER_API_VERSION = '1.24'
+MINIMUM_DOCKER_API_VERSION = '1.24'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
diff --git a/docker/errors.py b/docker/errors.py
index df18d57..8572007 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -1,21 +1,44 @@
import requests
-class APIError(requests.exceptions.HTTPError):
- def __init__(self, message, response, explanation=None):
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = response.content.strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and 'No such image' in str(explanation):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise cls(e, response=response, explanation=explanation)
+
+
+class APIError(requests.exceptions.HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
-
self.explanation = explanation
- if self.explanation is None and response.content:
- try:
- self.explanation = response.json()['message']
- except ValueError:
- self.explanation = response.content.strip()
-
def __str__(self):
message = super(APIError, self).__str__()
@@ -32,18 +55,27 @@ class APIError(requests.exceptions.HTTPError):
return message
+ @property
+ def status_code(self):
+ if self.response:
+ return self.response.status_code
+
def is_client_error(self):
- return 400 <= self.response.status_code < 500
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
def is_server_error(self):
- return 500 <= self.response.status_code < 600
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
-class DockerException(Exception):
+class NotFound(APIError):
pass
-class NotFound(APIError):
+class ImageNotFound(NotFound):
pass
@@ -76,3 +108,38 @@ class TLSParameterError(DockerException):
class NullResource(DockerException, ValueError):
pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+ msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
+ "{}").format(command, image, exit_status, stderr)
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(Exception):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
+ text = ["{}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
diff --git a/docker/models/__init__.py b/docker/models/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/docker/models/__init__.py
diff --git a/docker/models/containers.py b/docker/models/containers.py
new file mode 100644
index 0000000..ad1cb61
--- /dev/null
+++ b/docker/models/containers.py
@@ -0,0 +1,883 @@
+import copy
+
+from ..errors import (ContainerError, ImageNotFound,
+ create_unexpected_kwargs_error)
+from ..types import HostConfig
+from .images import Image
+from .resource import Collection, Model
+
+
+class Container(Model):
+
+ @property
+ def name(self):
+ """
+ The name of the container.
+ """
+ if self.attrs.get('Name') is not None:
+ return self.attrs['Name'].lstrip('/')
+
+ @property
+ def status(self):
+ """
+ The status of the container. For example, ``running``, or ``exited``.
+ """
+ return self.attrs['State']['Status']
+
+ def attach(self, **kwargs):
+ """
+ Attach to this container.
+
+ :py:meth:`logs` is a wrapper around this method, which you can
+ use instead if you want to fetch/stream container output without first
+ retrieving the entire backlog.
+
+ Args:
+ stdout (bool): Include stdout.
+ stderr (bool): Include stderr.
+ stream (bool): Return container output progressively as an iterator
+ of strings, rather than a single string.
+ logs (bool): Include the container's previous output.
+
+ Returns:
+ By default, the container's output as a single string.
+
+ If ``stream=True``, an iterator of output strings.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach(self.id, **kwargs)
+
+ def attach_socket(self, **kwargs):
+ """
+ Like :py:meth:`attach`, but returns the underlying socket-like object
+ for the HTTP request.
+
+ Args:
+ params (dict): Dictionary of request parameters (e.g. ``stdout``,
+ ``stderr``, ``stream``).
+ ws (bool): Use websockets instead of raw HTTP.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.attach_socket(self.id, **kwargs)
+
+ def commit(self, repository=None, tag=None, **kwargs):
+ """
+ Commit a container to an image. Similar to the ``docker commit``
+ command.
+
+ Args:
+ repository (str): The repository to push the image to
+ tag (str): The tag to push
+ message (str): A commit message
+ author (str): The name of the author
+ changes (str): Dockerfile instructions to apply while committing
+ conf (dict): The configuration for the container. See the
+ `Remote API documentation
+ <https://docs.docker.com/reference/api/docker_remote_api/>`_
+ for full details.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ resp = self.client.api.commit(self.id, repository=repository, tag=tag,
+ **kwargs)
+ return self.client.images.get(resp['Id'])
+
+ def diff(self):
+ """
+ Inspect changes on a container's filesystem.
+
+ Returns:
+ (str)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.diff(self.id)
+
+ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
+ privileged=False, user='', detach=False, stream=False,
+ socket=False):
+ """
+ Run a command inside this container. Similar to
+ ``docker exec``.
+
+ Args:
+ cmd (str or list): Command to be executed
+ stdout (bool): Attach to stdout. Default: ``True``
+ stderr (bool): Attach to stderr. Default: ``True``
+ stdin (bool): Attach to stdin. Default: ``False``
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ privileged (bool): Run as privileged.
+ user (str): User to execute command as. Default: root
+ detach (bool): If true, detach from the exec command.
+ Default: False
+ tty (bool): Allocate a pseudo-TTY. Default: False
+ stream (bool): Stream response data. Default: False
+
+ Returns:
+ (generator or str): If ``stream=True``, a generator yielding
+ response chunks. A string containing response data otherwise.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.exec_create(
+ self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
+ privileged=privileged, user=user
+ )
+ return self.client.api.exec_start(
+ resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
+ )
+
+ def export(self):
+ """
+ Export the contents of the container's filesystem as a tar archive.
+
+ Returns:
+ (str): The filesystem tar archive
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.export(self.id)
+
+ def get_archive(self, path):
+ """
+ Retrieve a file or folder from the container in the form of a tar
+ archive.
+
+ Args:
+ path (str): Path to the file or folder to retrieve
+
+ Returns:
+ (tuple): First element is a raw tar data stream. Second element is
+ a dict containing ``stat`` information on the specified ``path``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.get_archive(self.id, path)
+
+ def kill(self, signal=None):
+ """
+ Kill or send a signal to the container.
+
+ Args:
+ signal (str or int): The signal to send. Defaults to ``SIGKILL``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ return self.client.api.kill(self.id, signal=signal)
+
+ def logs(self, **kwargs):
+ """
+ Get logs from this container. Similar to the ``docker logs`` command.
+
+ The ``stream`` parameter makes the ``logs`` function return a blocking
+ generator you can iterate over to retrieve log output as it happens.
+
+ Args:
+ stdout (bool): Get ``STDOUT``
+ stderr (bool): Get ``STDERR``
+ stream (bool): Stream the response
+ timestamps (bool): Show timestamps
+ tail (str or int): Output specified number of lines at the end of
+ logs. Either an integer of number of lines or the string
+ ``all``. Default ``all``
+ since (datetime or int): Show logs since a given datetime or
+ integer epoch (in seconds)
+ follow (bool): Follow log output
+
+ Returns:
+ (generator or str): Logs from the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.logs(self.id, **kwargs)
+
+ def pause(self):
+ """
+ Pauses all processes within this container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.pause(self.id)
+
+ def put_archive(self, path, data):
+ """
+ Insert a file or folder in this container using a tar archive as
+ source.
+
+ Args:
+ path (str): Path inside the container where the file(s) will be
+ extracted. Must exist.
+ data (bytes): tar data to be extracted
+
+ Returns:
+ (bool): True if the call succeeds.
+
+ Raises:
+ :py:class:`~docker.errors.APIError` If an error occurs.
+ """
+ return self.client.api.put_archive(self.id, path, data)
+
+ def remove(self, **kwargs):
+ """
+ Remove this container. Similar to the ``docker rm`` command.
+
+ Args:
+ v (bool): Remove the volumes associated with the container
+ link (bool): Remove the specified link and not the underlying
+ container
+ force (bool): Force the removal of a running container (uses
+ ``SIGKILL``)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_container(self.id, **kwargs)
+
+ def rename(self, name):
+ """
+ Rename this container. Similar to the ``docker rename`` command.
+
+ Args:
+ name (str): New name for the container
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.rename(self.id, name)
+
+ def resize(self, height, width):
+ """
+ Resize the tty session.
+
+ Args:
+ height (int): Height of tty session
+ width (int): Width of tty session
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.resize(self.id, height, width)
+
+ def restart(self, **kwargs):
+ """
+ Restart this container. Similar to the ``docker restart`` command.
+
+ Args:
+ timeout (int): Number of seconds to try to stop for before killing
+ the container. Once killed it will then be restarted. Default
+ is 10 seconds.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.restart(self.id, **kwargs)
+
+ def start(self, **kwargs):
+ """
+ Start this container. Similar to the ``docker start`` command, but
+ doesn't support attach options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.start(self.id, **kwargs)
+
+ def stats(self, **kwargs):
+ """
+ Stream statistics for this container. Similar to the
+ ``docker stats`` command.
+
+ Args:
+ decode (bool): If set to true, stream will be decoded into dicts
+ on the fly. False by default.
+ stream (bool): If set to false, only the current stats will be
+ returned instead of a stream. True by default.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stats(self.id, **kwargs)
+
+ def stop(self, **kwargs):
+ """
+ Stops a container. Similar to the ``docker stop`` command.
+
+ Args:
+ timeout (int): Timeout in seconds to wait for the container to
+ stop before sending a ``SIGKILL``. Default: 10
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.stop(self.id, **kwargs)
+
+ def top(self, **kwargs):
+ """
+ Display the running processes of the container.
+
+ Args:
+ ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
+
+ Returns:
+ (str): The output of the top
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.top(self.id, **kwargs)
+
+ def unpause(self):
+ """
+ Unpause all processes within the container.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.unpause(self.id)
+
+ def update(self, **kwargs):
+ """
+ Update resource configuration of the containers.
+
+ Args:
+ blkio_weight (int): Block IO (relative weight), between 10 and 1000
+ cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
+ cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
+ cpu_shares (int): CPU shares (relative weight)
+ cpuset_cpus (str): CPUs in which to allow execution
+ cpuset_mems (str): MEMs in which to allow execution
+ mem_limit (int or str): Memory limit
+ mem_reservation (int or str): Memory soft limit
+ memswap_limit (int or str): Total memory (memory + swap), -1 to
+ disable swap
+ kernel_memory (int or str): Kernel memory limit
+ restart_policy (dict): Restart policy dictionary
+
+ Returns:
+ (dict): Dictionary containing a ``Warnings`` key.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.update_container(self.id, **kwargs)
+
+ def wait(self, **kwargs):
+ """
+ Block until the container stops, then return its exit code. Similar to
+ the ``docker wait`` command.
+
+ Args:
+ timeout (int): Request timeout
+
+ Returns:
+ (int): The exit code of the container. Returns ``-1`` if the API
+ responds without a ``StatusCode`` attribute.
+
+ Raises:
+ :py:class:`requests.exceptions.ReadTimeout`
+ If the timeout is exceeded.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.wait(self.id, **kwargs)
+
+
+class ContainerCollection(Collection):
+ model = Container
+
+ def run(self, image, command=None, stdout=True, stderr=False,
+ remove=False, **kwargs):
+ """
+ Run a container. By default, it will wait for the container to finish
+ and return its logs, similar to ``docker run``.
+
+ If the ``detach`` argument is ``True``, it will start the container
+ and immediately return a :py:class:`Container` object, similar to
+ ``docker run -d``.
+
+ Example:
+ Run a container and get its output:
+
+ >>> import docker
+ >>> client = docker.from_env()
+ >>> client.containers.run('alpine', 'echo hello world')
+ b'hello world\\n'
+
+ Run a container and detach:
+
+ >>> container = client.containers.run('bfirsh/reticulate-splines',
+ detach=True)
+ >>> container.logs()
+ 'Reticulating spline 1...\\nReticulating spline 2...\\n'
+
+ Args:
+ image (str): The image to run.
+ command (str or list): The command to run in the container.
+ blkio_weight_device: Block IO weight (relative device weight) in
+ the form of: ``[{"Path": "device_path", "Weight": weight}]``.
+ blkio_weight: Block IO weight (relative weight), accepts a weight
+ value between 10 and 1000.
+ cap_add (list of str): Add kernel capabilities. For example,
+ ``["SYS_ADMIN", "MKNOD"]``.
+ cap_drop (list of str): Drop kernel capabilities.
+ cpu_group (int): The length of a CPU period in microseconds.
+ cpu_period (int): Microseconds of CPU time that the container can
+ get in a CPU period.
+ cpu_shares (int): CPU shares (relative weight).
+ cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
+ ``0,1``).
+ detach (bool): Run container in the background and return a
+ :py:class:`Container` object.
+ device_read_bps: Limit read rate (bytes per second) from a device
+ in the form of: `[{"Path": "device_path", "Rate": rate}]`
+ device_read_iops: Limit read rate (IO per second) from a device.
+ device_write_bps: Limit write rate (bytes per second) from a
+ device.
+ device_write_iops: Limit write rate (IO per second) from a device.
+ devices (list): Expose host devices to the container, as a list
+ of strings in the form
+ ``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
+
+ For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
+ to have read-write access to the host's ``/dev/sda`` via a
+ node named ``/dev/xvda`` inside the container.
+ dns (list): Set custom DNS servers.
+ dns_opt (list): Additional options to be added to the container's
+ ``resolv.conf`` file.
+ dns_search (list): DNS search domains.
+ domainname (str or list): Set custom DNS search domains.
+ entrypoint (str or list): The entrypoint for the container.
+ environment (dict or list): Environment variables to set inside
+ the container, as a dictionary or a list of strings in the
+ format ``["SOMEVARIABLE=xxx"]``.
+ extra_hosts (dict): Addtional hostnames to resolve inside the
+ container, as a mapping of hostname to IP address.
+ group_add (list): List of additional group names and/or IDs that
+ the container process will run as.
+ hostname (str): Optional hostname for the container.
+ ipc_mode (str): Set the IPC mode for the container.
+ isolation (str): Isolation technology to use. Default: `None`.
+ labels (dict or list): A dictionary of name-value labels (e.g.
+ ``{"label1": "value1", "label2": "value2"}``) or a list of
+ names of labels to set with empty values (e.g.
+ ``["label1", "label2"]``)
+ links (dict or list of tuples): Either a dictionary mapping name
+ to alias or as a list of ``(name, alias)`` tuples.
+ log_config (dict): Logging configuration, as a dictionary with
+ keys:
+
+ - ``type`` The logging driver name.
+ - ``config`` A dictionary of configuration for the logging
+ driver.
+
+ mac_address (str): MAC address to assign to the container.
+ mem_limit (float or str): Memory limit. Accepts float values
+ (which represent the memory limit of the created container in
+ bytes) or a string with a units identification char
+ (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
+ specified without a units character, bytes are assumed as an
+ intended unit.
+ mem_limit (str or int): Maximum amount of memory container is
+ allowed to consume. (e.g. ``1G``).
+ mem_swappiness (int): Tune a container's memory swappiness
+ behavior. Accepts number between 0 and 100.
+ memswap_limit (str or int): Maximum amount of memory + swap a
+ container is allowed to consume.
+ networks (list): A list of network names to connect this
+ container to.
+ name (str): The name for this container.
+ network_disabled (bool): Disable networking.
+ network_mode (str): One of:
+
+ - ``bridge`` Create a new network stack for the container on
+ on the bridge network.
+ - ``none`` No networking for this container.
+ - ``container:<name|id>`` Reuse another container's network
+ stack.
+ - ``host`` Use the host network stack.
+ oom_kill_disable (bool): Whether to disable OOM killer.
+ oom_score_adj (int): An integer value containing the score given
+ to the container in order to tune OOM killer preferences.
+ pid_mode (str): If set to ``host``, use the host PID namespace
+ inside the container.
+ pids_limit (int): Tune a container's pids limit. Set ``-1`` for
+ unlimited.
+ ports (dict): Ports to bind inside the container.
+
+ The keys of the dictionary are the ports to bind inside the
+ container, either as an integer or a string in the form
+ ``port/protocol``, where the protocol is either ``tcp`` or
+ ``udp``.
+
+ The values of the dictionary are the corresponding ports to
+ open on the host, which can be either:
+
+ - The port number, as an integer. For example,
+ ``{'2222/tcp': 3333}`` will expose port 2222 inside the
+ container as port 3333 on the host.
+ - ``None``, to assign a random host port. For example,
+ ``{'2222/tcp': None}``.
+ - A tuple of ``(address, port)`` if you want to specify the
+ host interface. For example,
+ ``{'1111/tcp': ('127.0.0.1', 1111)}``.
+ - A list of integers, if you want to bind multiple host ports
+ to a single container port. For example,
+ ``{'1111/tcp': [1234, 4567]}``.
+
+ privileged (bool): Give extended privileges to this container.
+ publish_all_ports (bool): Publish all ports to the host.
+ read_only (bool): Mount the container's root filesystem as read
+ only.
+ remove (bool): Remove the container when it has finished running.
+ Default: ``False``.
+ restart_policy (dict): Restart the container when it exits.
+ Configured as a dictionary with keys:
+
+ - ``Name`` One of ``on-failure``, or ``always``.
+ - ``MaximumRetryCount`` Number of times to restart the
+ container on failure.
+
+ For example:
+ ``{"Name": "on-failure", "MaximumRetryCount": 5}``
+
+ security_opt (list): A list of string values to customize labels
+ for MLS systems, such as SELinux.
+ shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ stdin_open (bool): Keep ``STDIN`` open even if not attached.
+ stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
+ Default: ``True``.
+ stdout (bool): Return logs from ``STDERR`` when ``detach=False``.
+ Default: ``False``.
+ stop_signal (str): The stop signal to use to stop the container
+ (e.g. ``SIGINT``).
+ sysctls (dict): Kernel parameters to set in the container.
+ tmpfs (dict): Temporary filesystems to mount, as a dictionary
+ mapping a path inside the container to options for that path.
+
+ For example:
+
+ .. code-block:: python
+
+ {
+ '/mnt/vol2': '',
+ '/mnt/vol1': 'size=3G,uid=1000'
+ }
+
+ tty (bool): Allocate a pseudo-TTY.
+ ulimits (list): Ulimits to set inside the container, as a list of
+ dicts.
+ user (str or int): Username or UID to run commands as inside the
+ container.
+ userns_mode (str): Sets the user namespace mode for the container
+ when user namespace remapping option is enabled. Supported
+ values are: ``host``
+ volume_driver (str): The name of a volume driver/plugin.
+ volumes (dict or list): A dictionary to configure volumes mounted
+ inside the container. The key is either the host path or a
+ volume name, and the value is a dictionary with the keys:
+
+ - ``bind`` The path to mount the volume inside the container
+ - ``mode`` Either ``rw`` to mount the volume read/write, or
+ ``ro`` to mount it read-only.
+
+ For example:
+
+ .. code-block:: python
+
+ {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
+ '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
+
+ volumes_from (list): List of container names or IDs to get
+ volumes from.
+ working_dir (str): Path to the working directory.
+
+ Returns:
+ The container logs, either ``STDOUT``, ``STDERR``, or both,
+ depending on the value of the ``stdout`` and ``stderr`` arguments.
+
+ If ``detach`` is ``True``, a :py:class:`Container` object is
+ returned instead.
+
+ Raises:
+ :py:class:`docker.errors.ContainerError`
+ If the container exits with a non-zero exit code and
+ ``detach`` is ``False``.
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ detach = kwargs.pop("detach", False)
+ if detach and remove:
+ raise RuntimeError("The options 'detach' and 'remove' cannot be "
+ "used together.")
+
+ try:
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+ except ImageNotFound:
+ self.client.images.pull(image)
+ container = self.create(image=image, command=command,
+ detach=detach, **kwargs)
+
+ container.start()
+
+ if detach:
+ return container
+
+ exit_status = container.wait()
+ if exit_status != 0:
+ stdout = False
+ stderr = True
+ out = container.logs(stdout=stdout, stderr=stderr)
+ if remove:
+ container.remove()
+ if exit_status != 0:
+ raise ContainerError(container, exit_status, command, image, out)
+ return out
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a container without starting it. Similar to ``docker create``.
+
+ Takes the same arguments as :py:meth:`run`, except for ``stdout``,
+ ``stderr``, and ``remove``.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound`
+ If the specified image does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(image, Image):
+ image = image.id
+ kwargs['image'] = image
+ kwargs['command'] = command
+ kwargs['version'] = self.client.api._version
+ create_kwargs = _create_container_args(kwargs)
+ resp = self.client.api.create_container(**create_kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, container_id):
+ """
+ Get a container by name or ID.
+
+ Args:
+ container_id (str): Container name or ID.
+
+ Returns:
+ A :py:class:`Container` object.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the container does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.inspect_container(container_id)
+ return self.prepare_model(resp)
+
+ def list(self, all=False, before=None, filters=None, limit=-1, since=None):
+ """
+ List containers. Similar to the ``docker ps`` command.
+
+ Args:
+ all (bool): Show all containers. Only running containers are shown
+ by default trunc (bool): Truncate output
+ since (str): Show only containers created since Id or Name, include
+ non-running ones
+ before (str): Show only container created before Id or Name,
+ include non-running ones
+ limit (int): Show `limit` last created containers, include
+ non-running ones
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+
+ - `exited` (int): Only containers with specified exit code
+ - `status` (str): One of ``restarting``, ``running``,
+ ``paused``, ``exited``
+ - `label` (str): format either ``"key"`` or ``"key=value"``
+ - `id` (str): The id of the container.
+ - `name` (str): The name of the container.
+ - `ancestor` (str): Filter by container ancestor. Format of
+ ``<image-name>[:tag]``, ``<image-id>``, or
+ ``<image@digest>``.
+ - `before` (str): Only containers created before a particular
+ container. Give the container name or id.
+ - `since` (str): Only containers created after a particular
+ container. Give container name or id.
+
+ A comprehensive list can be found in the documentation for
+ `docker ps
+ <https://docs.docker.com/engine/reference/commandline/ps>`_.
+
+ Returns:
+ (list of :py:class:`Container`)
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.containers(all=all, before=before,
+ filters=filters, limit=limit,
+ since=since)
+ return [self.get(r['Id']) for r in resp]
+
+
+# kwargs to copy straight from run to create
+RUN_CREATE_KWARGS = [
+ 'command',
+ 'detach',
+ 'domainname',
+ 'entrypoint',
+ 'environment',
+ 'healthcheck',
+ 'hostname',
+ 'image',
+ 'labels',
+ 'mac_address',
+ 'name',
+ 'network_disabled',
+ 'stdin_open',
+ 'stop_signal',
+ 'tty',
+ 'user',
+ 'volume_driver',
+ 'working_dir',
+]
+
+# kwargs to copy straight from run to host_config
+RUN_HOST_CONFIG_KWARGS = [
+ 'blkio_weight_device',
+ 'blkio_weight',
+ 'cap_add',
+ 'cap_drop',
+ 'cgroup_parent',
+ 'cpu_period',
+ 'cpu_quota',
+ 'cpu_shares',
+ 'cpuset_cpus',
+ 'device_read_bps',
+ 'device_read_iops',
+ 'device_write_bps',
+ 'device_write_iops',
+ 'devices',
+ 'dns_opt',
+ 'dns_search',
+ 'dns',
+ 'extra_hosts',
+ 'group_add',
+ 'ipc_mode',
+ 'isolation',
+ 'kernel_memory',
+ 'links',
+ 'log_config',
+ 'lxc_conf',
+ 'mem_limit',
+ 'mem_reservation',
+ 'mem_swappiness',
+ 'memswap_limit',
+ 'network_mode',
+ 'oom_kill_disable',
+ 'oom_score_adj',
+ 'pid_mode',
+ 'pids_limit',
+ 'privileged',
+ 'publish_all_ports',
+ 'read_only',
+ 'restart_policy',
+ 'security_opt',
+ 'shm_size',
+ 'sysctls',
+ 'tmpfs',
+ 'ulimits',
+ 'userns_mode',
+ 'version',
+ 'volumes_from',
+]
+
+
+def _create_container_args(kwargs):
+ """
+ Convert arguments to create() to arguments to create_container().
+ """
+ # Copy over kwargs which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_CREATE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ host_config_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in RUN_HOST_CONFIG_KWARGS:
+ host_config_kwargs[key] = kwargs.pop(key)
+
+ # Process kwargs which are split over both create and host_config
+ ports = kwargs.pop('ports', {})
+ if ports:
+ host_config_kwargs['port_bindings'] = ports
+
+ volumes = kwargs.pop('volumes', {})
+ if volumes:
+ host_config_kwargs['binds'] = volumes
+
+ networks = kwargs.pop('networks', [])
+ if networks:
+ create_kwargs['networking_config'] = {network: None
+ for network in networks}
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error('run', kwargs)
+
+ create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
+
+ # Fill in any kwargs which need processing by create_host_config first
+ port_bindings = create_kwargs['host_config'].get('PortBindings')
+ if port_bindings:
+ # sort to make consistent for tests
+ create_kwargs['ports'] = [tuple(p.split('/', 1))
+ for p in sorted(port_bindings.keys())]
+ binds = create_kwargs['host_config'].get('Binds')
+ if binds:
+ create_kwargs['volumes'] = [v.split(':')[0] for v in binds]
+ return create_kwargs
diff --git a/docker/models/images.py b/docker/models/images.py
new file mode 100644
index 0000000..32068e6
--- /dev/null
+++ b/docker/models/images.py
@@ -0,0 +1,269 @@
+import re
+
+import six
+
+from ..api import APIClient
+from ..errors import BuildError
+from ..utils.json_stream import json_stream
+from .resource import Collection, Model
+
+
+class Image(Model):
+ """
+ An image on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
+
+ @property
+ def short_id(self):
+ """
+ The ID of the image truncated to 10 characters, plus the ``sha256:``
+ prefix.
+ """
+ if self.id.startswith('sha256:'):
+ return self.id[:17]
+ return self.id[:10]
+
+ @property
+ def tags(self):
+ """
+ The image's tags.
+ """
+ return [
+ tag for tag in self.attrs.get('RepoTags', [])
+ if tag != '<none>:<none>'
+ ]
+
+ def history(self):
+ """
+ Show the history of an image.
+
+ Returns:
+ (str): The history of the image.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.history(self.id)
+
+ def save(self):
+ """
+ Get a tarball of an image. Similar to the ``docker save`` command.
+
+ Returns:
+ (urllib3.response.HTTPResponse object): The response from the
+ daemon.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = cli.get("fedora:latest")
+ >>> resp = image.save()
+ >>> f = open('/tmp/fedora-latest.tar', 'w')
+ >>> f.write(resp.data)
+ >>> f.close()
+ """
+ return self.client.api.get_image(self.id)
+
+ def tag(self, repository, tag=None, **kwargs):
+ """
+ Tag this image into a repository. Similar to the ``docker tag``
+ command.
+
+ Args:
+ repository (str): The repository to set for the tag
+ tag (str): The tag name
+ force (bool): Force
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Returns:
+ (bool): ``True`` if successful
+ """
+ self.client.api.tag(self.id, repository, tag=tag, **kwargs)
+
+
+class ImageCollection(Collection):
+ model = Image
+
+ def build(self, **kwargs):
+ """
+ Build an image and return it. Similar to the ``docker build``
+ command. Either ``path`` or ``fileobj`` must be set.
+
+ If you have a tar file for the Docker build context (including a
+ Dockerfile) already, pass a readable file-like object to ``fileobj``
+ and also pass ``custom_context=True``. If the stream is compressed
+ also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+ If you want to get the raw output of the build, use the
+ :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
+ low-level API.
+
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ tag (str): A tag to add to the final image
+ quiet (bool): Whether to return the status
+ nocache (bool): Don't use the cache when set to ``True``
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ stream (bool): *Deprecated for API version > 1.8 (always True)*.
+ Return a blocking generator you can iterate over to retrieve
+ build output as it happens
+ timeout (int): HTTP timeout
+ custom_context (bool): Optional if using ``fileobj``
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ dockerfile (str): path within the build context to the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``.
+
+ Returns:
+ (:py:class:`Image`): The built image.
+
+ Raises:
+ :py:class:`docker.errors.BuildError`
+ If there is an error during the build.
+ :py:class:`docker.errors.APIError`
+ If the server returns any other error.
+ ``TypeError``
+ If neither ``path`` nor ``fileobj`` is specified.
+ """
+ resp = self.client.api.build(**kwargs)
+ if isinstance(resp, six.string_types):
+ return self.get(resp)
+ events = list(json_stream(resp))
+ if not events:
+ return BuildError('Unknown')
+ event = events[-1]
+ if 'stream' in event:
+ match = re.search(r'Successfully built ([0-9a-f]+)',
+ event.get('stream', ''))
+ if match:
+ image_id = match.group(1)
+ return self.get(image_id)
+
+ raise BuildError(event.get('error') or event)
+
+ def get(self, name):
+ """
+ Gets an image.
+
+ Args:
+ name (str): The name of the image.
+
+ Returns:
+ (:py:class:`Image`): The image.
+
+ Raises:
+ :py:class:`docker.errors.ImageNotFound` If the image does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_image(name))
+
+ def list(self, name=None, all=False, filters=None):
+ """
+ List images on the server.
+
+ Args:
+ name (str): Only show images belonging to the repository ``name``
+ all (bool): Show intermediate image layers. By default, these are
+ filtered out.
+ filters (dict): Filters to be processed on the image list.
+ Available filters:
+ - ``dangling`` (bool)
+ - ``label`` (str): format either ``key`` or ``key=value``
+
+ Returns:
+ (list of :py:class:`Image`): The images.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.images(name=name, all=all, filters=filters)
+ return [self.prepare_model(r) for r in resp]
+
+ def load(self, data):
+ """
+ Load an image that was previously saved using
+ :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
+ Similar to ``docker load``.
+
+ Args:
+ data (binary): Image data to be loaded.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.load_image(data)
+
+ def pull(self, name, **kwargs):
+ """
+ Pull an image of the given name and return it. Similar to the
+ ``docker pull`` command.
+
+ If you want to get the raw pull output, use the
+ :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
+ low-level API.
+
+ Args:
+ repository (str): The repository to pull
+ tag (str): The tag to pull
+ insecure_registry (bool): Use an insecure registry
+ auth_config (dict): Override the credentials that
+ :py:meth:`~docker.client.DockerClient.login` has set for
+ this request. ``auth_config`` should contain the ``username``
+ and ``password`` keys to be valid.
+
+ Returns:
+ (:py:class:`Image`): The image that has been pulled.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> image = client.images.pull('busybox')
+ """
+ self.client.api.pull(name, **kwargs)
+ return self.get(name)
+
+ def push(self, repository, tag=None, **kwargs):
+ return self.client.api.push(repository, tag=tag, **kwargs)
+ push.__doc__ = APIClient.push.__doc__
+
+ def remove(self, *args, **kwargs):
+ self.client.api.remove_image(*args, **kwargs)
+ remove.__doc__ = APIClient.remove_image.__doc__
+
+ def search(self, *args, **kwargs):
+ return self.client.api.search(*args, **kwargs)
+ search.__doc__ = APIClient.search.__doc__
diff --git a/docker/models/networks.py b/docker/models/networks.py
new file mode 100644
index 0000000..d5e2097
--- /dev/null
+++ b/docker/models/networks.py
@@ -0,0 +1,181 @@
+from .containers import Container
+from .resource import Model, Collection
+
+
+class Network(Model):
+ """
+ A Docker network.
+ """
+ @property
+ def name(self):
+ """
+ The name of the network.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def containers(self):
+ """
+ The containers that are connected to the network, as a list of
+ :py:class:`~docker.models.containers.Container` objects.
+ """
+ return [
+ self.client.containers.get(cid) for cid in
+ self.attrs.get('Containers', {}).keys()
+ ]
+
+ def connect(self, container):
+ """
+ Connect a container to this network.
+
+ Args:
+ container (str): Container to connect to this network, as either
+ an ID, name, or :py:class:`~docker.models.containers.Container`
+ object.
+ aliases (list): A list of aliases for this endpoint. Names in that
+ list can be used within the network to reach the container.
+ Defaults to ``None``.
+ links (list): A list of links for this endpoint. Containers
+ declared in this list will be linkedto this container.
+ Defaults to ``None``.
+ ipv4_address (str): The IP address of this container on the
+ network, using the IPv4 protocol. Defaults to ``None``.
+ ipv6_address (str): The IP address of this container on the
+ network, using the IPv6 protocol. Defaults to ``None``.
+ link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.connect_container_to_network(container, self.id)
+
+ def disconnect(self, container):
+ """
+ Disconnect a container from this network.
+
+ Args:
+ container (str): Container to disconnect from this network, as
+ either an ID, name, or
+ :py:class:`~docker.models.containers.Container` object.
+ force (bool): Force the container to disconnect from a network.
+ Default: ``False``
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if isinstance(container, Container):
+ container = container.id
+ return self.client.api.disconnect_container_from_network(container,
+ self.id)
+
+ def remove(self):
+ """
+ Remove this network.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_network(self.id)
+
+
+class NetworkCollection(Collection):
+ """
+ Networks on the Docker server.
+ """
+ model = Network
+
+ def create(self, name, *args, **kwargs):
+ """
+ Create a network. Similar to the ``docker network create``.
+
+ Args:
+ name (str): Name of the network
+ driver (str): Name of the driver used to create the network
+ options (dict): Driver options as a key-value dictionary
+ ipam (dict): Optional custom IP scheme for the network.
+ Created with :py:class:`~docker.types.IPAMConfig`.
+ check_duplicate (bool): Request daemon to check for networks with
+ same name. Default: ``True``.
+ internal (bool): Restrict external access to the network. Default
+ ``False``.
+ labels (dict): Map of labels to set on the network. Default
+ ``None``.
+ enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
+
+ Returns:
+ (:py:class:`Network`): The network that was created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+ A network using the bridge driver:
+
+ >>> client.networks.create("network1", driver="bridge")
+
+ You can also create more advanced networks with custom IPAM
+ configurations. For example, setting the subnet to
+ ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
+
+ .. code-block:: python
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='192.168.52.0/24',
+ gateway='192.168.52.254'
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool]
+ )
+ >>> client.networks.create(
+ "network1",
+ driver="bridge",
+ ipam=ipam_config
+ )
+
+ """
+ resp = self.client.api.create_network(name, *args, **kwargs)
+ return self.get(resp['Id'])
+
+ def get(self, network_id):
+ """
+ Get a network by its ID.
+
+ Args:
+ network_id (str): The ID of the network.
+
+ Returns:
+ (:py:class:`Network`) The network.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the network does not exist.
+
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ return self.prepare_model(self.client.api.inspect_network(network_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List networks. Similar to the ``docker networks ls`` command.
+
+ Args:
+ names (list): List of names to filter by.
+ ids (list): List of ids to filter by.
+
+ Returns:
+ (list of :py:class:`Network`) The networks on the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.networks(*args, **kwargs)
+ return [self.prepare_model(item) for item in resp]
diff --git a/docker/models/nodes.py b/docker/models/nodes.py
new file mode 100644
index 0000000..0887f99
--- /dev/null
+++ b/docker/models/nodes.py
@@ -0,0 +1,88 @@
+from .resource import Model, Collection
+
+
+class Node(Model):
+ """A node in a swarm."""
+ id_attribute = 'ID'
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def update(self, node_spec):
+ """
+ Update the node's configuration.
+
+ Args:
+ node_spec (dict): Configuration settings to update. Any values
+ not provided will be removed. Default: ``None``
+
+ Returns:
+ `True` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> node_spec = {'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ }
+ >>> node.update(node_spec)
+
+ """
+ return self.client.api.update_node(self.id, self.version, node_spec)
+
+
+class NodeCollection(Collection):
+ """Nodes on the Docker server."""
+ model = Node
+
+ def get(self, node_id):
+ """
+ Get a node.
+
+ Args:
+ node_id (string): ID of the node to be inspected.
+
+ Returns:
+ A :py:class:`Node` object.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_node(node_id))
+
+ def list(self, *args, **kwargs):
+ """
+ List swarm nodes.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id``, ``name``, ``membership`` and ``role``.
+ Default: ``None``
+
+ Returns:
+ A list of :py:class:`Node` objects.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.nodes.list(filters={'role': 'manager'})
+ """
+ return [
+ self.prepare_model(n)
+ for n in self.client.api.nodes(*args, **kwargs)
+ ]
diff --git a/docker/models/resource.py b/docker/models/resource.py
new file mode 100644
index 0000000..95712ae
--- /dev/null
+++ b/docker/models/resource.py
@@ -0,0 +1,90 @@
+
+class Model(object):
+ """
+ A base class for representing a single object on the server.
+ """
+ id_attribute = 'Id'
+
+ def __init__(self, attrs=None, client=None, collection=None):
+ #: A client pointing at the server that this object is on.
+ self.client = client
+
+ #: The collection that this model is part of.
+ self.collection = collection
+
+ #: The raw representation of this object from the API
+ self.attrs = attrs
+ if self.attrs is None:
+ self.attrs = {}
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.short_id)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.id == other.id
+
+ @property
+ def id(self):
+ """
+ The ID of the object.
+ """
+ return self.attrs.get(self.id_attribute)
+
+ @property
+ def short_id(self):
+ """
+ The ID of the object, truncated to 10 characters.
+ """
+ return self.id[:10]
+
+ def reload(self):
+ """
+ Load this object from the server again and update ``attrs`` with the
+ new data.
+ """
+ new_model = self.collection.get(self.id)
+ self.attrs = new_model.attrs
+
+
+class Collection(object):
+ """
+ A base class for representing all objects of a particular type on the
+ server.
+ """
+
+ #: The type of object this collection represents, set by subclasses
+ model = None
+
+ def __init__(self, client=None):
+ #: The client pointing at the server that this collection of objects
+ #: is on.
+ self.client = client
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError(
+ "'{}' object is not callable. You might be trying to use the old "
+ "(pre-2.0) API - use docker.APIClient if so."
+ .format(self.__class__.__name__))
+
+ def list(self):
+ raise NotImplementedError
+
+ def get(self, key):
+ raise NotImplementedError
+
+ def create(self, attrs=None):
+ raise NotImplementedError
+
+ def prepare_model(self, attrs):
+ """
+ Create a model from a set of attributes.
+ """
+ if isinstance(attrs, Model):
+ attrs.client = self.client
+ attrs.collection = self
+ return attrs
+ elif isinstance(attrs, dict):
+ return self.model(attrs=attrs, client=self.client, collection=self)
+ else:
+ raise Exception("Can't create %s from %s" %
+ (self.model.__name__, attrs))
diff --git a/docker/models/services.py b/docker/models/services.py
new file mode 100644
index 0000000..d70c9e7
--- /dev/null
+++ b/docker/models/services.py
@@ -0,0 +1,240 @@
+import copy
+from docker.errors import create_unexpected_kwargs_error
+from docker.types import TaskTemplate, ContainerSpec
+from .resource import Model, Collection
+
+
+class Service(Model):
+ """A service."""
+ id_attribute = 'ID'
+
+ @property
+ def name(self):
+ """The service's name."""
+ return self.attrs['Spec']['Name']
+
+ @property
+ def version(self):
+ """
+ The version number of the service. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def remove(self):
+ """
+ Stop and remove the service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_service(self.id)
+
+ def tasks(self, filters=None):
+ """
+ List the tasks in this service.
+
+ Args:
+ filters (dict): A map of filters to process on the tasks list.
+ Valid filters: ``id``, ``name``, ``node``,
+ ``label``, and ``desired-state``.
+
+ Returns:
+ (list): List of task dictionaries.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ if filters is None:
+ filters = {}
+ filters['service'] = self.id
+ return self.client.api.tasks(filters=filters)
+
+ def update(self, **kwargs):
+ """
+ Update a service's configuration. Similar to the ``docker service
+ update`` command.
+
+ Takes the same parameters as :py:meth:`~ServiceCollection.create`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ # Image is required, so if it hasn't been set, use current image
+ if 'image' not in kwargs:
+ spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ kwargs['image'] = spec['Image']
+
+ create_kwargs = _get_create_service_kwargs('update', kwargs)
+
+ return self.client.api.update_service(
+ self.id,
+ self.version,
+ **create_kwargs
+ )
+
+
+class ServiceCollection(Collection):
+ """Services on the Docker server."""
+ model = Service
+
+ def create(self, image, command=None, **kwargs):
+ """
+ Create a service. Similar to the ``docker service create`` command.
+
+ Args:
+ image (str): The image name to use for the containers.
+ command (list of str or str): Command to run.
+ args (list of str): Arguments to the command.
+ constraints (list of str): Placement constraints.
+ container_labels (dict): Labels to apply to the container.
+ endpoint_spec (dict): Properties that can be configured to
+ access and load balance a service. Default: ``None``.
+ env (list of str): Environment variables, in the form
+ ``KEY=val``.
+ labels (dict): Labels to apply to the service.
+ log_driver (str): Log driver to use for containers.
+ log_driver_options (dict): Log driver options.
+ mode (string): Scheduling mode for the service (``replicated`` or
+ ``global``). Defaults to ``replicated``.
+ mounts (list of str): Mounts for the containers, in the form
+ ``source:target:options``, where options is either
+ ``ro`` or ``rw``.
+ name (str): Name to give to the service.
+ networks (list): List of network names or IDs to attach the
+ service to. Default: ``None``.
+ resources (dict): Resource limits and reservations. For the
+ format, see the Remote API documentation.
+ restart_policy (dict): Restart policy for containers. For the
+ format, see the Remote API documentation.
+ stop_grace_period (int): Amount of time to wait for
+ containers to terminate before forcefully killing them.
+ update_config (dict): Specification for the update strategy of the
+ service. Default: ``None``
+ user (str): User to run commands as.
+ workdir (str): Working directory for commands to run.
+
+ Returns:
+ (:py:class:`Service`) The created service.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ kwargs['image'] = image
+ kwargs['command'] = command
+ create_kwargs = _get_create_service_kwargs('create', kwargs)
+ service_id = self.client.api.create_service(**create_kwargs)
+ return self.get(service_id)
+
+ def get(self, service_id):
+ """
+ Get a service.
+
+ Args:
+ service_id (str): The ID of the service.
+
+ Returns:
+ (:py:class:`Service`): The service.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the service does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_service(service_id))
+
+ def list(self, **kwargs):
+ """
+ List services.
+
+ Args:
+ filters (dict): Filters to process on the nodes list. Valid
+ filters: ``id`` and ``name``. Default: ``None``.
+
+ Returns:
+ (list of :py:class:`Service`): The services.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return [
+ self.prepare_model(s)
+ for s in self.client.api.services(**kwargs)
+ ]
+
+
+# kwargs to copy straight over to ContainerSpec
+CONTAINER_SPEC_KWARGS = [
+ 'image',
+ 'command',
+ 'args',
+ 'env',
+ 'workdir',
+ 'user',
+ 'labels',
+ 'mounts',
+ 'stop_grace_period',
+]
+
+# kwargs to copy straight over to TaskTemplate
+TASK_TEMPLATE_KWARGS = [
+ 'resources',
+ 'restart_policy',
+]
+
+# kwargs to copy straight over to create_service
+CREATE_SERVICE_KWARGS = [
+ 'name',
+ 'labels',
+ 'mode',
+ 'update_config',
+ 'networks',
+ 'endpoint_spec',
+]
+
+
+def _get_create_service_kwargs(func_name, kwargs):
+ # Copy over things which can be copied directly
+ create_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CREATE_SERVICE_KWARGS:
+ create_kwargs[key] = kwargs.pop(key)
+ container_spec_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in CONTAINER_SPEC_KWARGS:
+ container_spec_kwargs[key] = kwargs.pop(key)
+ task_template_kwargs = {}
+ for key in copy.copy(kwargs):
+ if key in TASK_TEMPLATE_KWARGS:
+ task_template_kwargs[key] = kwargs.pop(key)
+
+ if 'container_labels' in kwargs:
+ container_spec_kwargs['labels'] = kwargs.pop('container_labels')
+
+ if 'constraints' in kwargs:
+ task_template_kwargs['placement'] = {
+ 'Constraints': kwargs.pop('constraints')
+ }
+
+ if 'log_driver' in kwargs:
+ task_template_kwargs['log_driver'] = {
+ 'Name': kwargs.pop('log_driver'),
+ 'Options': kwargs.pop('log_driver_options', {})
+ }
+
+ # All kwargs should have been consumed by this point, so raise
+ # error if any are left
+ if kwargs:
+ raise create_unexpected_kwargs_error(func_name, kwargs)
+
+ container_spec = ContainerSpec(**container_spec_kwargs)
+ task_template_kwargs['container_spec'] = container_spec
+ create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
+ return create_kwargs
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
new file mode 100644
index 0000000..38c1e9f
--- /dev/null
+++ b/docker/models/swarm.py
@@ -0,0 +1,146 @@
+from docker.api import APIClient
+from docker.errors import APIError
+from docker.types import SwarmSpec
+from .resource import Model
+
+
+class Swarm(Model):
+ """
+ The server's Swarm state. This a singleton that must be reloaded to get
+ the current state of the Swarm.
+ """
+ def __init__(self, *args, **kwargs):
+ super(Swarm, self).__init__(*args, **kwargs)
+ if self.client:
+ try:
+ self.reload()
+ except APIError as e:
+ if e.response.status_code != 406:
+ raise
+
+ @property
+ def version(self):
+ """
+ The version number of the swarm. If this is not the same as the
+ server, the :py:meth:`update` function will not work and you will
+ need to call :py:meth:`reload` before calling it again.
+ """
+ return self.attrs.get('Version').get('Index')
+
+ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
+ force_new_cluster=False, swarm_spec=None, **kwargs):
+ """
+ Initialize a new swarm on this Engine.
+
+ Args:
+ advertise_addr (str): Externally reachable address advertised to
+ other nodes. This can either be an address/port combination in
+ the form ``192.168.1.1:4567``, or an interface followed by a
+ port number, like ``eth0:4567``. If the port number is omitted,
+ the port number from the listen address is used.
+
+ If not specified, it will be automatically detected when
+ possible.
+ listen_addr (str): Listen address used for inter-manager
+ communication, as well as determining the networking interface
+ used for the VXLAN Tunnel Endpoint (VTEP). This can either be
+ an address/port combination in the form ``192.168.1.1:4567``,
+ or an interface followed by a port number, like ``eth0:4567``.
+ If the port number is omitted, the default swarm listening port
+ is used. Default: ``0.0.0.0:2377``
+ force_new_cluster (bool): Force creating a new Swarm, even if
+ already part of one. Default: False
+ task_history_retention_limit (int): Maximum number of tasks
+ history stored.
+ snapshot_interval (int): Number of logs entries between snapshot.
+ keep_old_snapshots (int): Number of snapshots to keep beyond the
+ current snapshot.
+ log_entries_for_slow_followers (int): Number of log entries to
+ keep around to sync up slow followers after a snapshot is
+ created.
+ heartbeat_tick (int): Amount of ticks (in seconds) between each
+ heartbeat.
+ election_tick (int): Amount of ticks (in seconds) needed without a
+ leader to trigger a new election.
+ dispatcher_heartbeat_period (int): The delay for an agent to send
+ a heartbeat to the dispatcher.
+ node_cert_expiry (int): Automatic expiry for nodes certificates.
+ external_ca (dict): Configuration for forwarding signing requests
+ to an external certificate authority. Use
+ ``docker.types.SwarmExternalCA``.
+ name (string): Swarm's name
+
+ Returns:
+ ``True`` if the request went through.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> client.swarm.init(
+ advertise_addr='eth0', listen_addr='0.0.0.0:5000',
+ force_new_cluster=False, snapshot_interval=5000,
+ log_entries_for_slow_followers=1200
+ )
+
+ """
+ init_kwargs = {}
+ for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:
+ if arg in kwargs:
+ init_kwargs[arg] = kwargs[arg]
+ del kwargs[arg]
+ init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
+ self.client.api.init_swarm(**init_kwargs)
+ self.reload()
+
+ def join(self, *args, **kwargs):
+ return self.client.api.join_swarm(*args, **kwargs)
+ join.__doc__ = APIClient.join_swarm.__doc__
+
+ def leave(self, *args, **kwargs):
+ return self.client.api.leave_swarm(*args, **kwargs)
+ leave.__doc__ = APIClient.leave_swarm.__doc__
+
+ def reload(self):
+ """
+ Inspect the swarm on the server and store the response in
+ :py:attr:`attrs`.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.attrs = self.client.api.inspect_swarm()
+
+ def update(self, rotate_worker_token=False, rotate_manager_token=False,
+ **kwargs):
+ """
+ Update the swarm's configuration.
+
+ It takes the same arguments as :py:meth:`init`, except
+ ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
+ addition, it takes these arguments:
+
+ Args:
+ rotate_worker_token (bool): Rotate the worker join token. Default:
+ ``False``.
+ rotate_manager_token (bool): Rotate the manager join token.
+ Default: ``False``.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ """
+ # this seems to have to be set
+ if kwargs.get('node_cert_expiry') is None:
+ kwargs['node_cert_expiry'] = 7776000000000000
+
+ return self.client.api.update_swarm(
+ version=self.version,
+ swarm_spec=SwarmSpec(**kwargs),
+ rotate_worker_token=rotate_worker_token,
+ rotate_manager_token=rotate_manager_token
+ )
diff --git a/docker/models/volumes.py b/docker/models/volumes.py
new file mode 100644
index 0000000..5a31541
--- /dev/null
+++ b/docker/models/volumes.py
@@ -0,0 +1,84 @@
+from .resource import Model, Collection
+
+
+class Volume(Model):
+ """A volume."""
+ id_attribute = 'Name'
+
+ @property
+ def name(self):
+ """The name of the volume."""
+ return self.attrs['Name']
+
+ def remove(self):
+ """Remove this volume."""
+ return self.client.api.remove_volume(self.id)
+
+
+class VolumeCollection(Collection):
+ """Volumes on the Docker server."""
+ model = Volume
+
+ def create(self, name, **kwargs):
+ """
+ Create a volume.
+
+ Args:
+ name (str): Name of the volume
+ driver (str): Name of the driver used to create the volume
+ driver_opts (dict): Driver options as a key-value dictionary
+ labels (dict): Labels to set on the volume
+
+ Returns:
+ (:py:class:`Volume`): The volume created.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> volume = client.volumes.create(name='foobar', driver='local',
+ driver_opts={'foo': 'bar', 'baz': 'false'},
+ labels={"key": "value"})
+
+ """
+ obj = self.client.api.create_volume(name, **kwargs)
+ return self.prepare_model(obj)
+
+ def get(self, volume_id):
+ """
+ Get a volume.
+
+ Args:
+ volume_id (str): Volume name.
+
+ Returns:
+ (:py:class:`Volume`): The volume.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the volume does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_volume(volume_id))
+
+ def list(self, **kwargs):
+ """
+ List volumes. Similar to the ``docker volume ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Volume`): The volumes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.volumes(**kwargs)
+ if not resp.get('Volumes'):
+ return []
+ return [self.prepare_model(obj) for obj in resp['Volumes']]
diff --git a/docker/ssladapter/__init__.py b/docker/ssladapter/__init__.py
deleted file mode 100644
index 31b8966..0000000
--- a/docker/ssladapter/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .ssladapter import SSLAdapter # flake8: noqa
diff --git a/docker/tls.py b/docker/tls.py
index 18c7259..6488bbc 100644
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -2,10 +2,24 @@ import os
import ssl
from . import errors
-from .ssladapter import ssladapter
+from .transport import SSLAdapter
class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
cert = None
ca_cert = None
verify = None
@@ -58,6 +72,9 @@ class TLSConfig(object):
)
def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
@@ -68,7 +85,7 @@ class TLSConfig(object):
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(
+ client.mount('https://', SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
index d5560b6..abbee18 100644
--- a/docker/transport/__init__.py
+++ b/docker/transport/__init__.py
@@ -1,5 +1,6 @@
# flake8: noqa
from .unixconn import UnixAdapter
+from .ssladapter import SSLAdapter
try:
from .npipeconn import NpipeAdapter
from .npipesocket import NpipeSocket
diff --git a/docker/ssladapter/ssladapter.py b/docker/transport/ssladapter.py
index 31f45fc..31f45fc 100644
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/transport/ssladapter.py
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 71c0c97..7230723 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -1,5 +1,7 @@
# flake8: noqa
-from .containers import LogConfig, Ulimit
+from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
+from .healthcheck import Healthcheck
+from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ContainerSpec, DriverConfig, EndpointSpec, Mount, Resources, RestartPolicy,
TaskTemplate, UpdateConfig
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 40a44ca..8fdecb3 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -1,6 +1,14 @@
import six
+import warnings
+from .. import errors
+from ..utils.utils import (
+ convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
+ format_environment, normalize_links, parse_bytes, parse_devices,
+ split_command, version_gte, version_lt,
+)
from .base import DictType
+from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
@@ -90,3 +98,485 @@ class Ulimit(DictType):
@hard.setter
def hard(self, value):
self['Hard'] = value
+
+
+class HostConfig(dict):
+ def __init__(self, version, binds=None, port_bindings=None,
+ lxc_conf=None, publish_all_ports=False, links=None,
+ privileged=False, dns=None, dns_search=None,
+ volumes_from=None, network_mode=None, restart_policy=None,
+ cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
+ read_only=None, pid_mode=None, ipc_mode=None,
+ security_opt=None, ulimits=None, log_config=None,
+ mem_limit=None, memswap_limit=None, mem_reservation=None,
+ kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
+ group_add=None, cpu_quota=None, cpu_period=None,
+ blkio_weight=None, blkio_weight_device=None,
+ device_read_bps=None, device_write_bps=None,
+ device_read_iops=None, device_write_iops=None,
+ oom_kill_disable=False, shm_size=None, sysctls=None,
+ tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
+ cpuset_cpus=None, userns_mode=None, pids_limit=None,
+ isolation=None):
+
+ if mem_limit is not None:
+ self['Memory'] = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ self['MemorySwap'] = parse_bytes(memswap_limit)
+
+ if mem_reservation:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('mem_reservation', '1.21')
+
+ self['MemoryReservation'] = parse_bytes(mem_reservation)
+
+ if kernel_memory:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('kernel_memory', '1.21')
+
+ self['KernelMemory'] = parse_bytes(kernel_memory)
+
+ if mem_swappiness is not None:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('mem_swappiness', '1.20')
+ if not isinstance(mem_swappiness, int):
+ raise host_config_type_error(
+ 'mem_swappiness', mem_swappiness, 'int'
+ )
+
+ self['MemorySwappiness'] = mem_swappiness
+
+ if shm_size is not None:
+ if isinstance(shm_size, six.string_types):
+ shm_size = parse_bytes(shm_size)
+
+ self['ShmSize'] = shm_size
+
+ if pid_mode:
+ if version_lt(version, '1.24') and pid_mode != 'host':
+ raise host_config_value_error('pid_mode', pid_mode)
+ self['PidMode'] = pid_mode
+
+ if ipc_mode:
+ self['IpcMode'] = ipc_mode
+
+ if privileged:
+ self['Privileged'] = privileged
+
+ if oom_kill_disable:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('oom_kill_disable', '1.19')
+
+ self['OomKillDisable'] = oom_kill_disable
+
+ if oom_score_adj:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('oom_score_adj', '1.22')
+ if not isinstance(oom_score_adj, int):
+ raise host_config_type_error(
+ 'oom_score_adj', oom_score_adj, 'int'
+ )
+ self['OomScoreAdj'] = oom_score_adj
+
+ if publish_all_ports:
+ self['PublishAllPorts'] = publish_all_ports
+
+ if read_only is not None:
+ self['ReadonlyRootfs'] = read_only
+
+ if dns_search:
+ self['DnsSearch'] = dns_search
+
+ if network_mode:
+ self['NetworkMode'] = network_mode
+ elif network_mode is None and version_gte(version, '1.20'):
+ self['NetworkMode'] = 'default'
+
+ if restart_policy:
+ if not isinstance(restart_policy, dict):
+ raise host_config_type_error(
+ 'restart_policy', restart_policy, 'dict'
+ )
+
+ self['RestartPolicy'] = restart_policy
+
+ if cap_add:
+ self['CapAdd'] = cap_add
+
+ if cap_drop:
+ self['CapDrop'] = cap_drop
+
+ if devices:
+ self['Devices'] = parse_devices(devices)
+
+ if group_add:
+ if version_lt(version, '1.20'):
+ raise host_config_version_error('group_add', '1.20')
+
+ self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
+
+ if dns is not None:
+ self['Dns'] = dns
+
+ if dns_opt is not None:
+ if version_lt(version, '1.21'):
+ raise host_config_version_error('dns_opt', '1.21')
+
+ self['DnsOptions'] = dns_opt
+
+ if security_opt is not None:
+ if not isinstance(security_opt, list):
+ raise host_config_type_error(
+ 'security_opt', security_opt, 'list'
+ )
+
+ self['SecurityOpt'] = security_opt
+
+ if sysctls:
+ if not isinstance(sysctls, dict):
+ raise host_config_type_error('sysctls', sysctls, 'dict')
+ self['Sysctls'] = {}
+ for k, v in six.iteritems(sysctls):
+ self['Sysctls'][k] = six.text_type(v)
+
+ if volumes_from is not None:
+ if isinstance(volumes_from, six.string_types):
+ volumes_from = volumes_from.split(',')
+
+ self['VolumesFrom'] = volumes_from
+
+ if binds is not None:
+ self['Binds'] = convert_volume_binds(binds)
+
+ if port_bindings is not None:
+ self['PortBindings'] = convert_port_bindings(port_bindings)
+
+ if extra_hosts is not None:
+ if isinstance(extra_hosts, dict):
+ extra_hosts = [
+ '{0}:{1}'.format(k, v)
+ for k, v in sorted(six.iteritems(extra_hosts))
+ ]
+
+ self['ExtraHosts'] = extra_hosts
+
+ if links is not None:
+ self['Links'] = normalize_links(links)
+
+ if isinstance(lxc_conf, dict):
+ formatted = []
+ for k, v in six.iteritems(lxc_conf):
+ formatted.append({'Key': k, 'Value': str(v)})
+ lxc_conf = formatted
+
+ if lxc_conf is not None:
+ self['LxcConf'] = lxc_conf
+
+ if cgroup_parent is not None:
+ self['CgroupParent'] = cgroup_parent
+
+ if ulimits is not None:
+ if not isinstance(ulimits, list):
+ raise host_config_type_error('ulimits', ulimits, 'list')
+ self['Ulimits'] = []
+ for l in ulimits:
+ if not isinstance(l, Ulimit):
+ l = Ulimit(**l)
+ self['Ulimits'].append(l)
+
+ if log_config is not None:
+ if not isinstance(log_config, LogConfig):
+ if not isinstance(log_config, dict):
+ raise host_config_type_error(
+ 'log_config', log_config, 'LogConfig'
+ )
+ log_config = LogConfig(**log_config)
+
+ self['LogConfig'] = log_config
+
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise host_config_type_error('cpu_quota', cpu_quota, 'int')
+ if version_lt(version, '1.19'):
+ raise host_config_version_error('cpu_quota', '1.19')
+
+ self['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise host_config_type_error('cpu_period', cpu_period, 'int')
+ if version_lt(version, '1.19'):
+ raise host_config_version_error('cpu_period', '1.19')
+
+ self['CpuPeriod'] = cpu_period
+
+ if cpu_shares:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpu_shares', '1.18')
+
+ if not isinstance(cpu_shares, int):
+ raise host_config_type_error('cpu_shares', cpu_shares, 'int')
+
+ self['CpuShares'] = cpu_shares
+
+ if cpuset_cpus:
+ if version_lt(version, '1.18'):
+ raise host_config_version_error('cpuset_cpus', '1.18')
+
+ self['CpuSetCpus'] = cpuset_cpus
+
+ if blkio_weight:
+ if not isinstance(blkio_weight, int):
+ raise host_config_type_error(
+ 'blkio_weight', blkio_weight, 'int'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight', '1.22')
+ self["BlkioWeight"] = blkio_weight
+
+ if blkio_weight_device:
+ if not isinstance(blkio_weight_device, list):
+ raise host_config_type_error(
+ 'blkio_weight_device', blkio_weight_device, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('blkio_weight_device', '1.22')
+ self["BlkioWeightDevice"] = blkio_weight_device
+
+ if device_read_bps:
+ if not isinstance(device_read_bps, list):
+ raise host_config_type_error(
+ 'device_read_bps', device_read_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_bps', '1.22')
+ self["BlkioDeviceReadBps"] = device_read_bps
+
+ if device_write_bps:
+ if not isinstance(device_write_bps, list):
+ raise host_config_type_error(
+ 'device_write_bps', device_write_bps, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_bps', '1.22')
+ self["BlkioDeviceWriteBps"] = device_write_bps
+
+ if device_read_iops:
+ if not isinstance(device_read_iops, list):
+ raise host_config_type_error(
+ 'device_read_iops', device_read_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_read_iops', '1.22')
+ self["BlkioDeviceReadIOps"] = device_read_iops
+
+ if device_write_iops:
+ if not isinstance(device_write_iops, list):
+ raise host_config_type_error(
+ 'device_write_iops', device_write_iops, 'list'
+ )
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('device_write_iops', '1.22')
+ self["BlkioDeviceWriteIOps"] = device_write_iops
+
+ if tmpfs:
+ if version_lt(version, '1.22'):
+ raise host_config_version_error('tmpfs', '1.22')
+ self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
+
+ if userns_mode:
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('userns_mode', '1.23')
+
+ if userns_mode != "host":
+ raise host_config_value_error("userns_mode", userns_mode)
+ self['UsernsMode'] = userns_mode
+
+ if pids_limit:
+ if not isinstance(pids_limit, int):
+ raise host_config_type_error('pids_limit', pids_limit, 'int')
+ if version_lt(version, '1.23'):
+ raise host_config_version_error('pids_limit', '1.23')
+ self["PidsLimit"] = pids_limit
+
+ if isolation:
+ if not isinstance(isolation, six.string_types):
+ raise host_config_type_error('isolation', isolation, 'string')
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('isolation', '1.24')
+ self['Isolation'] = isolation
+
+
+def host_config_type_error(param, param_value, expected):
+ error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
+ return TypeError(error_msg.format(param, expected, type(param_value)))
+
+
+def host_config_version_error(param, version, less_than=True):
+ operator = '<' if less_than else '>'
+ error_msg = '{0} param is not supported in API versions {1} {2}'
+ return errors.InvalidVersion(error_msg.format(param, operator, version))
+
+
+def host_config_value_error(param, param_value):
+ error_msg = 'Invalid value for {0} param: {1}'
+ return ValueError(error_msg.format(param, param_value))
+
+
+class ContainerConfig(dict):
+ def __init__(
+ self, version, image, command, hostname=None, user=None, detach=False,
+ stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
+ environment=None, volumes=None, volumes_from=None,
+ network_disabled=False, entrypoint=None, cpu_shares=None,
+ working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
+ host_config=None, mac_address=None, labels=None, volume_driver=None,
+ stop_signal=None, networking_config=None, healthcheck=None,
+ ):
+ if isinstance(command, six.string_types):
+ command = split_command(command)
+
+ if isinstance(entrypoint, six.string_types):
+ entrypoint = split_command(entrypoint)
+
+ if isinstance(environment, dict):
+ environment = format_environment(environment)
+
+ if labels is not None and version_lt(version, '1.18'):
+ raise errors.InvalidVersion(
+ 'labels were only introduced in API version 1.18'
+ )
+
+ if cpuset is not None or cpu_shares is not None:
+ if version_gte(version, '1.18'):
+ warnings.warn(
+ 'The cpuset_cpus and cpu_shares options have been moved to'
+ ' host_config in API version 1.18, and will be removed',
+ DeprecationWarning
+ )
+
+ if stop_signal is not None and version_lt(version, '1.21'):
+ raise errors.InvalidVersion(
+ 'stop_signal was only introduced in API version 1.21'
+ )
+
+ if healthcheck is not None and version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'Health options were only introduced in API version 1.24'
+ )
+
+ if version_lt(version, '1.19'):
+ if volume_driver is not None:
+ raise errors.InvalidVersion(
+ 'Volume drivers were only introduced in API version 1.19'
+ )
+ mem_limit = mem_limit if mem_limit is not None else 0
+ memswap_limit = memswap_limit if memswap_limit is not None else 0
+ else:
+ if mem_limit is not None:
+ raise errors.InvalidVersion(
+ 'mem_limit has been moved to host_config in API version'
+ ' 1.19'
+ )
+
+ if memswap_limit is not None:
+ raise errors.InvalidVersion(
+ 'memswap_limit has been moved to host_config in API '
+ 'version 1.19'
+ )
+
+ if isinstance(labels, list):
+ labels = dict((lbl, six.text_type('')) for lbl in labels)
+
+ if mem_limit is not None:
+ mem_limit = parse_bytes(mem_limit)
+
+ if memswap_limit is not None:
+ memswap_limit = parse_bytes(memswap_limit)
+
+ if isinstance(ports, list):
+ exposed_ports = {}
+ for port_definition in ports:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['{0}/{1}'.format(port, proto)] = {}
+ ports = exposed_ports
+
+ if isinstance(volumes, six.string_types):
+ volumes = [volumes, ]
+
+ if isinstance(volumes, list):
+ volumes_dict = {}
+ for vol in volumes:
+ volumes_dict[vol] = {}
+ volumes = volumes_dict
+
+ if volumes_from:
+ if not isinstance(volumes_from, six.string_types):
+ volumes_from = ','.join(volumes_from)
+ else:
+ # Force None, an empty list or dict causes client.start to fail
+ volumes_from = None
+
+ if healthcheck and isinstance(healthcheck, dict):
+ healthcheck = Healthcheck(**healthcheck)
+
+ attach_stdin = False
+ attach_stdout = False
+ attach_stderr = False
+ stdin_once = False
+
+ if not detach:
+ attach_stdout = True
+ attach_stderr = True
+
+ if stdin_open:
+ attach_stdin = True
+ stdin_once = True
+
+ if version_gte(version, '1.10'):
+ message = ('{0!r} parameter has no effect on create_container().'
+ ' It has been moved to host_config')
+ if dns is not None:
+ raise errors.InvalidVersion(message.format('dns'))
+ if volumes_from is not None:
+ raise errors.InvalidVersion(message.format('volumes_from'))
+
+ self.update({
+ 'Hostname': hostname,
+ 'Domainname': domainname,
+ 'ExposedPorts': ports,
+ 'User': six.text_type(user) if user else None,
+ 'Tty': tty,
+ 'OpenStdin': stdin_open,
+ 'StdinOnce': stdin_once,
+ 'Memory': mem_limit,
+ 'AttachStdin': attach_stdin,
+ 'AttachStdout': attach_stdout,
+ 'AttachStderr': attach_stderr,
+ 'Env': environment,
+ 'Cmd': command,
+ 'Dns': dns,
+ 'Image': image,
+ 'Volumes': volumes,
+ 'VolumesFrom': volumes_from,
+ 'NetworkDisabled': network_disabled,
+ 'Entrypoint': entrypoint,
+ 'CpuShares': cpu_shares,
+ 'Cpuset': cpuset,
+ 'CpusetCpus': cpuset,
+ 'WorkingDir': working_dir,
+ 'MemorySwap': memswap_limit,
+ 'HostConfig': host_config,
+ 'NetworkingConfig': networking_config,
+ 'MacAddress': mac_address,
+ 'Labels': labels,
+ 'VolumeDriver': volume_driver,
+ 'StopSignal': stop_signal,
+ 'Healthcheck': healthcheck,
+ })
diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py
new file mode 100644
index 0000000..ba63d21
--- /dev/null
+++ b/docker/types/healthcheck.py
@@ -0,0 +1,53 @@
+from .base import DictType
+
+import six
+
+
+class Healthcheck(DictType):
+ def __init__(self, **kwargs):
+ test = kwargs.get('test', kwargs.get('Test'))
+ if isinstance(test, six.string_types):
+ test = ["CMD-SHELL", test]
+
+ interval = kwargs.get('interval', kwargs.get('Interval'))
+ timeout = kwargs.get('timeout', kwargs.get('Timeout'))
+ retries = kwargs.get('retries', kwargs.get('Retries'))
+
+ super(Healthcheck, self).__init__({
+ 'Test': test,
+ 'Interval': interval,
+ 'Timeout': timeout,
+ 'Retries': retries
+ })
+
+ @property
+ def test(self):
+ return self['Test']
+
+ @test.setter
+ def test(self, value):
+ self['Test'] = value
+
+ @property
+ def interval(self):
+ return self['Interval']
+
+ @interval.setter
+ def interval(self, value):
+ self['Interval'] = value
+
+ @property
+ def timeout(self):
+ return self['Timeout']
+
+ @timeout.setter
+ def timeout(self, value):
+ self['Timeout'] = value
+
+ @property
+ def retries(self):
+ return self['Retries']
+
+ @retries.setter
+ def retries(self, value):
+ self['Retries'] = value
diff --git a/docker/types/networks.py b/docker/types/networks.py
new file mode 100644
index 0000000..a539ac0
--- /dev/null
+++ b/docker/types/networks.py
@@ -0,0 +1,104 @@
+from .. import errors
+from ..utils import normalize_links, version_lt
+
+
+class EndpointConfig(dict):
+ def __init__(self, version, aliases=None, links=None, ipv4_address=None,
+ ipv6_address=None, link_local_ips=None):
+ if version_lt(version, '1.22'):
+ raise errors.InvalidVersion(
+ 'Endpoint config is not supported for API version < 1.22'
+ )
+
+ if aliases:
+ self["Aliases"] = aliases
+
+ if links:
+ self["Links"] = normalize_links(links)
+
+ ipam_config = {}
+ if ipv4_address:
+ ipam_config['IPv4Address'] = ipv4_address
+
+ if ipv6_address:
+ ipam_config['IPv6Address'] = ipv6_address
+
+ if link_local_ips is not None:
+ if version_lt(version, '1.24'):
+ raise errors.InvalidVersion(
+ 'link_local_ips is not supported for API version < 1.24'
+ )
+ ipam_config['LinkLocalIPs'] = link_local_ips
+
+ if ipam_config:
+ self['IPAMConfig'] = ipam_config
+
+
+class NetworkingConfig(dict):
+ def __init__(self, endpoints_config=None):
+ if endpoints_config:
+ self["EndpointsConfig"] = endpoints_config
+
+
+class IPAMConfig(dict):
+ """
+ Create an IPAM (IP Address Management) config dictionary to be used with
+ :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
+
+ Args:
+
+ driver (str): The IPAM driver to use. Defaults to ``default``.
+ pool_configs (list): A list of pool configurations
+ (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
+
+ Example:
+
+ >>> ipam_config = docker.types.IPAMConfig(driver='default')
+ >>> network = client.create_network('network1', ipam=ipam_config)
+
+ """
+ def __init__(self, driver='default', pool_configs=None):
+ self.update({
+ 'Driver': driver,
+ 'Config': pool_configs or []
+ })
+
+
+class IPAMPool(dict):
+ """
+ Create an IPAM pool config dictionary to be added to the
+ ``pool_configs`` parameter of
+ :py:class:`~docker.types.IPAMConfig`.
+
+ Args:
+
+ subnet (str): Custom subnet for this IPAM pool using the CIDR
+ notation. Defaults to ``None``.
+ iprange (str): Custom IP range for endpoints in this IPAM pool using
+ the CIDR notation. Defaults to ``None``.
+ gateway (str): Custom IP address for the pool's gateway.
+ aux_addresses (dict): A dictionary of ``key -> ip_address``
+ relationships specifying auxiliary addresses that need to be
+ allocated by the IPAM driver.
+
+ Example:
+
+ >>> ipam_pool = docker.types.IPAMPool(
+ subnet='124.42.0.0/16',
+ iprange='124.42.0.0/24',
+ gateway='124.42.0.254',
+ aux_addresses={
+ 'reserved1': '124.42.1.1'
+ }
+ )
+ >>> ipam_config = docker.types.IPAMConfig(
+ pool_configs=[ipam_pool])
+ """
+ def __init__(self, subnet=None, iprange=None, gateway=None,
+ aux_addresses=None):
+ self.update({
+ 'Subnet': subnet,
+ 'IPRange': iprange,
+ 'Gateway': gateway,
+ 'AuxiliaryAddresses': aux_addresses
+ })
diff --git a/docker/types/services.py b/docker/types/services.py
index 0ede776..5041f89 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -1,9 +1,26 @@
import six
from .. import errors
+from ..utils import format_environment, split_command
class TaskTemplate(dict):
+ """
+ Describe the task specification to be used when creating or updating a
+ service.
+
+ Args:
+
+ container_spec (ContainerSpec): Container settings for containers
+ started as part of this task.
+ log_driver (DriverConfig): Log configuration for containers created as
+ part of the service.
+ resources (Resources): Resource requirements which apply to each
+ individual container created as part of the service.
+ restart_policy (RestartPolicy): Specification for the restart policy
+ which applies to containers created as part of this service.
+ placement (list): A list of constraints.
+ """
def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None):
self['ContainerSpec'] = container_spec
@@ -36,11 +53,27 @@ class TaskTemplate(dict):
class ContainerSpec(dict):
+ """
+ Describes the behavior of containers that are part of a task, and is used
+ when declaring a :py:class:`~docker.types.TaskTemplate`.
+
+ Args:
+
+ image (string): The image name to use for the container.
+ command (string or list): The command to be run in the image.
+ args (list): Arguments to the command.
+ env (dict): Environment variables.
+ dir (string): The working directory for commands to run in.
+ user (string): The user inside the container.
+ labels (dict): A map of labels to associate with the service.
+ mounts (list): A list of specifications for mounts to be added to
+ containers created as part of the service. See the
+ :py:class:`~docker.types.Mount` class for details.
+ stop_grace_period (int): Amount of time to wait for the container to
+ terminate before forcefully killing it.
+ """
def __init__(self, image, command=None, args=None, env=None, workdir=None,
user=None, labels=None, mounts=None, stop_grace_period=None):
- from ..utils import split_command # FIXME: circular import
- from ..utils import format_environment # FIXME: circular import
-
self['Image'] = image
if isinstance(command, six.string_types):
@@ -70,6 +103,28 @@ class ContainerSpec(dict):
class Mount(dict):
+ """
+ Describes a mounted folder's configuration inside a container. A list of
+ ``Mount``s would be used as part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ target (string): Container path.
+ source (string): Mount source (e.g. a volume name or a host path).
+ type (string): The mount type (``bind`` or ``volume``).
+ Default: ``volume``.
+ read_only (bool): Whether the mount should be read-only.
+ propagation (string): A propagation mode with the value ``[r]private``,
+ ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
+ no_copy (bool): False if the volume should be populated with the data
+ from the target. Default: ``False``. Only valid for the ``volume``
+ type.
+ labels (dict): User-defined name and labels for the volume. Only valid
+ for the ``volume`` type.
+ driver_config (DriverConfig): Volume driver configuration. Only valid
+ for the ``volume`` type.
+ """
def __init__(self, target, source, type='volume', read_only=False,
propagation=None, no_copy=False, labels=None,
driver_config=None):
@@ -98,7 +153,7 @@ class Mount(dict):
if labels:
volume_opts['Labels'] = labels
if driver_config:
- volume_opts['driver_config'] = driver_config
+ volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
if propagation:
@@ -124,6 +179,17 @@ class Mount(dict):
class Resources(dict):
+ """
+ Configures resource allocation for containers when made part of a
+ :py:class:`~docker.types.ContainerSpec`.
+
+ Args:
+
+ cpu_limit (int): CPU limit in units of 10^9 CPU shares.
+ mem_limit (int): Memory limit in Bytes.
+ cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
+ mem_reservation (int): Memory reservation in Bytes.
+ """
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None):
limits = {}
@@ -144,6 +210,19 @@ class Resources(dict):
class UpdateConfig(dict):
+ """
+
+ Used to specify the way container updates should be performed by a service.
+
+ Args:
+
+ parallelism (int): Maximum number of tasks to be updated in one
+ iteration (0 means unlimited parallelism). Default: 0.
+ delay (int): Amount of time between updates.
+ failure_action (string): Action to take if an updated task fails to
+ run, or stops running during the update. Acceptable values are
+ ``continue`` and ``pause``. Default: ``continue``
+ """
def __init__(self, parallelism=0, delay=None, failure_action='continue'):
self['Parallelism'] = parallelism
if delay is not None:
@@ -165,6 +244,21 @@ class RestartConditionTypesEnum(object):
class RestartPolicy(dict):
+ """
+ Used when creating a :py:class:`~docker.types.ContainerSpec`,
+ dictates whether a container should restart after stopping or failing.
+
+ Args:
+
+ condition (string): Condition for restart (``none``, ``on-failure``,
+ or ``any``). Default: `none`.
+ delay (int): Delay between restart attempts. Default: 0
+ attempts (int): Maximum attempts to restart a given container before
+ giving up. Default value is 0, which is ignored.
+ window (int): Time window used to evaluate the restart policy. Default
+ value is 0, which is unbounded.
+ """
+
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
@@ -181,6 +275,17 @@ class RestartPolicy(dict):
class DriverConfig(dict):
+ """
+ Indicates which driver to use, as well as its configuration. Can be used
+ as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
+ and for the `driver_config` in a volume
+ :py:class:`~docker.types.Mount`.
+
+ Args:
+
+ name (string): Name of the driver to use.
+ options (dict): Driver-specific options. Default: ``None``.
+ """
def __init__(self, name, options=None):
self['Name'] = name
if options:
@@ -188,6 +293,19 @@ class DriverConfig(dict):
class EndpointSpec(dict):
+ """
+ Describes properties to access and load-balance a service.
+
+ Args:
+
+ mode (string): The mode of resolution to use for internal load
+ balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
+ ``'vip'`` if not provided.
+ ports (dict): Exposed ports that this service is accessible on from the
+ outside, in the form of ``{ target_port: published_port }`` or
+ ``{ target_port: (published_port, protocol) }``. Ports can only be
+ provided if the ``vip`` resolution mode is used.
+ """
def __init__(self, mode=None, ports=None):
if ports:
self['Ports'] = convert_service_ports(ports)
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index e834505..747743c 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -3,12 +3,10 @@ from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
- create_host_config, create_container_config, parse_bytes, ping_registry,
- parse_env_file, version_lt, version_gte, decode_json_header, split_command,
- create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
- convert_service_networks, format_environment,
+ create_host_config, parse_bytes, ping_registry, parse_env_file, version_lt,
+ version_gte, decode_json_header, split_command, create_ipam_config,
+ create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
+ format_environment
)
-from ..types import LogConfig, Ulimit
-from ..types import SwarmExternalCA, SwarmSpec
from .decorators import check_resource, minimum_version, update_headers
diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py
new file mode 100644
index 0000000..f97ab9e
--- /dev/null
+++ b/docker/utils/json_stream.py
@@ -0,0 +1,79 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import json.decoder
+
+import six
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once docker-py returns text streams instead
+ of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, six.text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(six.text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = six.text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/docker/utils/ports/ports.py b/docker/utils/ports.py
index 326ef94..326ef94 100644
--- a/docker/utils/ports/ports.py
+++ b/docker/utils/ports.py
diff --git a/docker/utils/ports/__init__.py b/docker/utils/ports/__init__.py
deleted file mode 100644
index 485feec..0000000
--- a/docker/utils/ports/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .ports import (
- split_port,
- build_port_bindings
-) # flake8: noqa
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 164b845..4080f25 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -69,7 +69,11 @@ def frames_iter(socket):
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n == 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ n -= len(result)
+ yield result
diff --git a/docker/utils/types.py b/docker/utils/types.py
deleted file mode 100644
index 8098c47..0000000
--- a/docker/utils/types.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Compatibility module. See https://github.com/docker/docker-py/issues/1196
-
-import warnings
-
-from ..types import Ulimit, LogConfig # flake8: noqa
-
-warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 97261cd..4e5f454 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -15,10 +15,8 @@ from fnmatch import fnmatch
import requests
import six
-from .. import constants
from .. import errors
from .. import tls
-from ..types import Ulimit, LogConfig
if six.PY2:
from urllib import splitnport
@@ -37,21 +35,18 @@ BYTE_UNITS = {
}
-def create_ipam_pool(subnet=None, iprange=None, gateway=None,
- aux_addresses=None):
- return {
- 'Subnet': subnet,
- 'IPRange': iprange,
- 'Gateway': gateway,
- 'AuxiliaryAddresses': aux_addresses
- }
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
-def create_ipam_config(driver='default', pool_configs=None):
- return {
- 'Driver': driver,
- 'Config': pool_configs or []
- }
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
def mkbuildcontext(dockerfile):
@@ -163,6 +158,31 @@ def should_include(path, exclude_patterns, include_patterns):
return True
+def should_check_directory(directory_path, exclude_patterns, include_patterns):
+ """
+ Given a directory path, a list of exclude patterns, and a list of inclusion
+ patterns:
+
+ 1. Returns True if the directory path should be included according to
+ should_include.
+ 2. Returns True if the directory path is the prefix for an inclusion
+ pattern
+ 3. Returns False otherwise
+ """
+
+ # To account for exception rules, check directories if their path is a
+ # a prefix to an inclusion pattern. This logic conforms with the current
+ # docker logic (2016-10-27):
+ # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
+
+ path_with_slash = directory_path + os.sep
+ possible_child_patterns = [pattern for pattern in include_patterns if
+ (pattern + os.sep).startswith(path_with_slash)]
+ directory_included = should_include(directory_path, exclude_patterns,
+ include_patterns)
+ return directory_included or len(possible_child_patterns) > 0
+
+
def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
@@ -171,25 +191,13 @@ def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
if parent == '.':
parent = ''
- # If exception rules exist, we can't skip recursing into ignored
- # directories, as we need to look for exceptions in them.
- #
- # It may be possible to optimize this further for exception patterns
- # that *couldn't* match within ignored directores.
- #
- # This matches the current docker logic (as of 2015-11-24):
- # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
-
- if not has_exceptions:
-
- # Remove excluded patterns from the list of directories to traverse
- # by mutating the dirs we're iterating over.
- # This looks strange, but is considered the correct way to skip
- # traversal. See https://docs.python.org/2/library/os.html#os.walk
-
- dirs[:] = [d for d in dirs if
- should_include(os.path.join(parent, d),
- exclude_patterns, include_patterns)]
+ # Remove excluded patterns from the list of directories to traverse
+ # by mutating the dirs we're iterating over.
+ # This looks strange, but is considered the correct way to skip
+ # traversal. See https://docs.python.org/2/library/os.html#os.walk
+ dirs[:] = [d for d in dirs if
+ should_check_directory(os.path.join(parent, d),
+ exclude_patterns, include_patterns)]
for path in dirs:
if should_include(os.path.join(parent, path),
@@ -605,330 +613,6 @@ def parse_bytes(s):
return s
-def host_config_type_error(param, param_value, expected):
- error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
- return TypeError(error_msg.format(param, expected, type(param_value)))
-
-
-def host_config_version_error(param, version, less_than=True):
- operator = '<' if less_than else '>'
- error_msg = '{0} param is not supported in API versions {1} {2}'
- return errors.InvalidVersion(error_msg.format(param, operator, version))
-
-
-def host_config_value_error(param, param_value):
- error_msg = 'Invalid value for {0} param: {1}'
- return ValueError(error_msg.format(param, param_value))
-
-
-def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
- publish_all_ports=False, links=None, privileged=False,
- dns=None, dns_search=None, volumes_from=None,
- network_mode=None, restart_policy=None, cap_add=None,
- cap_drop=None, devices=None, extra_hosts=None,
- read_only=None, pid_mode=None, ipc_mode=None,
- security_opt=None, ulimits=None, log_config=None,
- mem_limit=None, memswap_limit=None,
- mem_reservation=None, kernel_memory=None,
- mem_swappiness=None, cgroup_parent=None,
- group_add=None, cpu_quota=None,
- cpu_period=None, blkio_weight=None,
- blkio_weight_device=None, device_read_bps=None,
- device_write_bps=None, device_read_iops=None,
- device_write_iops=None, oom_kill_disable=False,
- shm_size=None, sysctls=None, version=None, tmpfs=None,
- oom_score_adj=None, dns_opt=None, cpu_shares=None,
- cpuset_cpus=None, userns_mode=None, pids_limit=None):
-
- host_config = {}
-
- if not version:
- warnings.warn(
- 'docker.utils.create_host_config() is deprecated. Please use '
- 'Client.create_host_config() instead.'
- )
- version = constants.DEFAULT_DOCKER_API_VERSION
-
- if mem_limit is not None:
- host_config['Memory'] = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- host_config['MemorySwap'] = parse_bytes(memswap_limit)
-
- if mem_reservation:
- if version_lt(version, '1.21'):
- raise host_config_version_error('mem_reservation', '1.21')
-
- host_config['MemoryReservation'] = parse_bytes(mem_reservation)
-
- if kernel_memory:
- if version_lt(version, '1.21'):
- raise host_config_version_error('kernel_memory', '1.21')
-
- host_config['KernelMemory'] = parse_bytes(kernel_memory)
-
- if mem_swappiness is not None:
- if version_lt(version, '1.20'):
- raise host_config_version_error('mem_swappiness', '1.20')
- if not isinstance(mem_swappiness, int):
- raise host_config_type_error(
- 'mem_swappiness', mem_swappiness, 'int'
- )
-
- host_config['MemorySwappiness'] = mem_swappiness
-
- if shm_size is not None:
- if isinstance(shm_size, six.string_types):
- shm_size = parse_bytes(shm_size)
-
- host_config['ShmSize'] = shm_size
-
- if pid_mode not in (None, 'host'):
- raise host_config_value_error('pid_mode', pid_mode)
- elif pid_mode:
- host_config['PidMode'] = pid_mode
-
- if ipc_mode:
- host_config['IpcMode'] = ipc_mode
-
- if privileged:
- host_config['Privileged'] = privileged
-
- if oom_kill_disable:
- if version_lt(version, '1.20'):
- raise host_config_version_error('oom_kill_disable', '1.19')
-
- host_config['OomKillDisable'] = oom_kill_disable
-
- if oom_score_adj:
- if version_lt(version, '1.22'):
- raise host_config_version_error('oom_score_adj', '1.22')
- if not isinstance(oom_score_adj, int):
- raise host_config_type_error(
- 'oom_score_adj', oom_score_adj, 'int'
- )
- host_config['OomScoreAdj'] = oom_score_adj
-
- if publish_all_ports:
- host_config['PublishAllPorts'] = publish_all_ports
-
- if read_only is not None:
- host_config['ReadonlyRootfs'] = read_only
-
- if dns_search:
- host_config['DnsSearch'] = dns_search
-
- if network_mode:
- host_config['NetworkMode'] = network_mode
- elif network_mode is None and compare_version('1.19', version) > 0:
- host_config['NetworkMode'] = 'default'
-
- if restart_policy:
- if not isinstance(restart_policy, dict):
- raise host_config_type_error(
- 'restart_policy', restart_policy, 'dict'
- )
-
- host_config['RestartPolicy'] = restart_policy
-
- if cap_add:
- host_config['CapAdd'] = cap_add
-
- if cap_drop:
- host_config['CapDrop'] = cap_drop
-
- if devices:
- host_config['Devices'] = parse_devices(devices)
-
- if group_add:
- if version_lt(version, '1.20'):
- raise host_config_version_error('group_add', '1.20')
-
- host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
-
- if dns is not None:
- host_config['Dns'] = dns
-
- if dns_opt is not None:
- if version_lt(version, '1.21'):
- raise host_config_version_error('dns_opt', '1.21')
-
- host_config['DnsOptions'] = dns_opt
-
- if security_opt is not None:
- if not isinstance(security_opt, list):
- raise host_config_type_error('security_opt', security_opt, 'list')
-
- host_config['SecurityOpt'] = security_opt
-
- if sysctls:
- if not isinstance(sysctls, dict):
- raise host_config_type_error('sysctls', sysctls, 'dict')
- host_config['Sysctls'] = {}
- for k, v in six.iteritems(sysctls):
- host_config['Sysctls'][k] = six.text_type(v)
-
- if volumes_from is not None:
- if isinstance(volumes_from, six.string_types):
- volumes_from = volumes_from.split(',')
-
- host_config['VolumesFrom'] = volumes_from
-
- if binds is not None:
- host_config['Binds'] = convert_volume_binds(binds)
-
- if port_bindings is not None:
- host_config['PortBindings'] = convert_port_bindings(port_bindings)
-
- if extra_hosts is not None:
- if isinstance(extra_hosts, dict):
- extra_hosts = [
- '{0}:{1}'.format(k, v)
- for k, v in sorted(six.iteritems(extra_hosts))
- ]
-
- host_config['ExtraHosts'] = extra_hosts
-
- if links is not None:
- host_config['Links'] = normalize_links(links)
-
- if isinstance(lxc_conf, dict):
- formatted = []
- for k, v in six.iteritems(lxc_conf):
- formatted.append({'Key': k, 'Value': str(v)})
- lxc_conf = formatted
-
- if lxc_conf is not None:
- host_config['LxcConf'] = lxc_conf
-
- if cgroup_parent is not None:
- host_config['CgroupParent'] = cgroup_parent
-
- if ulimits is not None:
- if not isinstance(ulimits, list):
- raise host_config_type_error('ulimits', ulimits, 'list')
- host_config['Ulimits'] = []
- for l in ulimits:
- if not isinstance(l, Ulimit):
- l = Ulimit(**l)
- host_config['Ulimits'].append(l)
-
- if log_config is not None:
- if not isinstance(log_config, LogConfig):
- if not isinstance(log_config, dict):
- raise host_config_type_error(
- 'log_config', log_config, 'LogConfig'
- )
- log_config = LogConfig(**log_config)
-
- host_config['LogConfig'] = log_config
-
- if cpu_quota:
- if not isinstance(cpu_quota, int):
- raise host_config_type_error('cpu_quota', cpu_quota, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_quota', '1.19')
-
- host_config['CpuQuota'] = cpu_quota
-
- if cpu_period:
- if not isinstance(cpu_period, int):
- raise host_config_type_error('cpu_period', cpu_period, 'int')
- if version_lt(version, '1.19'):
- raise host_config_version_error('cpu_period', '1.19')
-
- host_config['CpuPeriod'] = cpu_period
-
- if cpu_shares:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpu_shares', '1.18')
-
- if not isinstance(cpu_shares, int):
- raise host_config_type_error('cpu_shares', cpu_shares, 'int')
-
- host_config['CpuShares'] = cpu_shares
-
- if cpuset_cpus:
- if version_lt(version, '1.18'):
- raise host_config_version_error('cpuset_cpus', '1.18')
-
- host_config['CpuSetCpus'] = cpuset_cpus
-
- if blkio_weight:
- if not isinstance(blkio_weight, int):
- raise host_config_type_error('blkio_weight', blkio_weight, 'int')
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight', '1.22')
- host_config["BlkioWeight"] = blkio_weight
-
- if blkio_weight_device:
- if not isinstance(blkio_weight_device, list):
- raise host_config_type_error(
- 'blkio_weight_device', blkio_weight_device, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('blkio_weight_device', '1.22')
- host_config["BlkioWeightDevice"] = blkio_weight_device
-
- if device_read_bps:
- if not isinstance(device_read_bps, list):
- raise host_config_type_error(
- 'device_read_bps', device_read_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_bps', '1.22')
- host_config["BlkioDeviceReadBps"] = device_read_bps
-
- if device_write_bps:
- if not isinstance(device_write_bps, list):
- raise host_config_type_error(
- 'device_write_bps', device_write_bps, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_bps', '1.22')
- host_config["BlkioDeviceWriteBps"] = device_write_bps
-
- if device_read_iops:
- if not isinstance(device_read_iops, list):
- raise host_config_type_error(
- 'device_read_iops', device_read_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_read_iops', '1.22')
- host_config["BlkioDeviceReadIOps"] = device_read_iops
-
- if device_write_iops:
- if not isinstance(device_write_iops, list):
- raise host_config_type_error(
- 'device_write_iops', device_write_iops, 'list'
- )
- if version_lt(version, '1.22'):
- raise host_config_version_error('device_write_iops', '1.22')
- host_config["BlkioDeviceWriteIOps"] = device_write_iops
-
- if tmpfs:
- if version_lt(version, '1.22'):
- raise host_config_version_error('tmpfs', '1.22')
- host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
-
- if userns_mode:
- if version_lt(version, '1.23'):
- raise host_config_version_error('userns_mode', '1.23')
-
- if userns_mode != "host":
- raise host_config_value_error("userns_mode", userns_mode)
- host_config['UsernsMode'] = userns_mode
-
- if pids_limit:
- if not isinstance(pids_limit, int):
- raise host_config_type_error('pids_limit', pids_limit, 'int')
- if version_lt(version, '1.23'):
- raise host_config_version_error('pids_limit', '1.23')
- host_config["PidsLimit"] = pids_limit
-
- return host_config
-
-
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
@@ -936,50 +620,6 @@ def normalize_links(links):
return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
-def create_networking_config(endpoints_config=None):
- networking_config = {}
-
- if endpoints_config:
- networking_config["EndpointsConfig"] = endpoints_config
-
- return networking_config
-
-
-def create_endpoint_config(version, aliases=None, links=None,
- ipv4_address=None, ipv6_address=None,
- link_local_ips=None):
- if version_lt(version, '1.22'):
- raise errors.InvalidVersion(
- 'Endpoint config is not supported for API version < 1.22'
- )
- endpoint_config = {}
-
- if aliases:
- endpoint_config["Aliases"] = aliases
-
- if links:
- endpoint_config["Links"] = normalize_links(links)
-
- ipam_config = {}
- if ipv4_address:
- ipam_config['IPv4Address'] = ipv4_address
-
- if ipv6_address:
- ipam_config['IPv6Address'] = ipv6_address
-
- if link_local_ips is not None:
- if version_lt(version, '1.24'):
- raise errors.InvalidVersion(
- 'link_local_ips is not supported for API version < 1.24'
- )
- ipam_config['LinkLocalIPs'] = link_local_ips
-
- if ipam_config:
- endpoint_config['IPAMConfig'] = ipam_config
-
- return endpoint_config
-
-
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
@@ -993,7 +633,11 @@ def parse_env_file(env_file):
if line[0] == '#':
continue
- parse_line = line.strip().split('=', 1)
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
@@ -1022,147 +666,8 @@ def format_environment(environment):
return [format_env(*var) for var in six.iteritems(environment)]
-def create_container_config(
- version, image, command, hostname=None, user=None, detach=False,
- stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
- dns=None, volumes=None, volumes_from=None, network_disabled=False,
- entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
- memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
- labels=None, volume_driver=None, stop_signal=None, networking_config=None,
-):
- if isinstance(command, six.string_types):
- command = split_command(command)
-
- if isinstance(entrypoint, six.string_types):
- entrypoint = split_command(entrypoint)
-
- if isinstance(environment, dict):
- environment = format_environment(environment)
-
- if labels is not None and compare_version('1.18', version) < 0:
- raise errors.InvalidVersion(
- 'labels were only introduced in API version 1.18'
- )
-
- if cpuset is not None or cpu_shares is not None:
- if version_gte(version, '1.18'):
- warnings.warn(
- 'The cpuset_cpus and cpu_shares options have been moved to '
- 'host_config in API version 1.18, and will be removed',
- DeprecationWarning
- )
-
- if stop_signal is not None and compare_version('1.21', version) < 0:
- raise errors.InvalidVersion(
- 'stop_signal was only introduced in API version 1.21'
- )
-
- if compare_version('1.19', version) < 0:
- if volume_driver is not None:
- raise errors.InvalidVersion(
- 'Volume drivers were only introduced in API version 1.19'
- )
- mem_limit = mem_limit if mem_limit is not None else 0
- memswap_limit = memswap_limit if memswap_limit is not None else 0
- else:
- if mem_limit is not None:
- raise errors.InvalidVersion(
- 'mem_limit has been moved to host_config in API version 1.19'
- )
-
- if memswap_limit is not None:
- raise errors.InvalidVersion(
- 'memswap_limit has been moved to host_config in API '
- 'version 1.19'
- )
-
- if isinstance(labels, list):
- labels = dict((lbl, six.text_type('')) for lbl in labels)
-
- if mem_limit is not None:
- mem_limit = parse_bytes(mem_limit)
-
- if memswap_limit is not None:
- memswap_limit = parse_bytes(memswap_limit)
-
- if isinstance(ports, list):
- exposed_ports = {}
- for port_definition in ports:
- port = port_definition
- proto = 'tcp'
- if isinstance(port_definition, tuple):
- if len(port_definition) == 2:
- proto = port_definition[1]
- port = port_definition[0]
- exposed_ports['{0}/{1}'.format(port, proto)] = {}
- ports = exposed_ports
-
- if isinstance(volumes, six.string_types):
- volumes = [volumes, ]
-
- if isinstance(volumes, list):
- volumes_dict = {}
- for vol in volumes:
- volumes_dict[vol] = {}
- volumes = volumes_dict
-
- if volumes_from:
- if not isinstance(volumes_from, six.string_types):
- volumes_from = ','.join(volumes_from)
- else:
- # Force None, an empty list or dict causes client.start to fail
- volumes_from = None
-
- attach_stdin = False
- attach_stdout = False
- attach_stderr = False
- stdin_once = False
-
- if not detach:
- attach_stdout = True
- attach_stderr = True
-
- if stdin_open:
- attach_stdin = True
- stdin_once = True
-
- if compare_version('1.10', version) >= 0:
- message = ('{0!r} parameter has no effect on create_container().'
- ' It has been moved to host_config')
- if dns is not None:
- raise errors.InvalidVersion(message.format('dns'))
- if volumes_from is not None:
- raise errors.InvalidVersion(message.format('volumes_from'))
-
- return {
- 'Hostname': hostname,
- 'Domainname': domainname,
- 'ExposedPorts': ports,
- 'User': six.text_type(user) if user else None,
- 'Tty': tty,
- 'OpenStdin': stdin_open,
- 'StdinOnce': stdin_once,
- 'Memory': mem_limit,
- 'AttachStdin': attach_stdin,
- 'AttachStdout': attach_stdout,
- 'AttachStderr': attach_stderr,
- 'Env': environment,
- 'Cmd': command,
- 'Dns': dns,
- 'Image': image,
- 'Volumes': volumes,
- 'VolumesFrom': volumes_from,
- 'NetworkDisabled': network_disabled,
- 'Entrypoint': entrypoint,
- 'CpuShares': cpu_shares,
- 'Cpuset': cpuset,
- 'CpusetCpus': cpuset,
- 'WorkingDir': working_dir,
- 'MemorySwap': memswap_limit,
- 'HostConfig': host_config,
- 'NetworkingConfig': networking_config,
- 'MacAddress': mac_address,
- 'Labels': labels,
- 'VolumeDriver': volume_driver,
- 'StopSignal': stop_signal
- }
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/docker/version.py b/docker/version.py
index 3bbd804..ab6838f 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.11.0-dev"
+version = "2.0.0-dev"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/docs-requirements.txt b/docs-requirements.txt
index aede1cb..d69373d 100644
--- a/docs-requirements.txt
+++ b/docs-requirements.txt
@@ -1 +1,2 @@
-mkdocs==0.15.3
+recommonmark==0.4.0
+Sphinx==1.4.6
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
new file mode 100644
index 0000000..5d711ee
--- /dev/null
+++ b/docs/_static/custom.css
@@ -0,0 +1,3 @@
+dl.hide-signature > dt {
+ display: none;
+}
diff --git a/docs/_templates/page.html b/docs/_templates/page.html
new file mode 100644
index 0000000..cf0264c
--- /dev/null
+++ b/docs/_templates/page.html
@@ -0,0 +1,2 @@
+{% extends "!page.html" %}
+{% set css_files = css_files + ["_static/custom.css"] %}
diff --git a/docs/api.md b/docs/api.md
deleted file mode 100644
index fdf3e27..0000000
--- a/docs/api.md
+++ /dev/null
@@ -1,1237 +0,0 @@
-# Client API
-
-To instantiate a `Client` class that will allow you to communicate with a
-Docker daemon, simply do:
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='unix://var/run/docker.sock')
-```
-
-**Params**:
-
-* base_url (str): Refers to the protocol+hostname+port where the Docker server
-is hosted.
-* version (str): The version of the API the client will use. Specify `'auto'`
- to use the API version provided by the server.
-* timeout (int): The HTTP request timeout, in seconds.
-* tls (bool or [TLSConfig](tls.md#TLSConfig)): Equivalent CLI options: `docker --tls ...`
-* user_agent (str): Set a custom user agent for requests to the server.
-
-
-****
-
-## attach
-
-The `.logs()` function is a wrapper around this method, which you can use
-instead if you want to fetch/stream container output without first retrieving
-the entire backlog.
-
-**Params**:
-
-* container (str): The container to attach to
-* stdout (bool): Get STDOUT
-* stderr (bool): Get STDERR
-* stream (bool): Return an iterator
-* logs (bool): Get all previous output
-
-**Returns** (generator or str): The logs or output for the image
-
-## build
-
-Similar to the `docker build` command. Either `path` or `fileobj` needs to be
-set. `path` can be a local path (to a directory containing a Dockerfile) or a
-remote URL. `fileobj` must be a readable file-like object to a Dockerfile.
-
-If you have a tar file for the Docker build context (including a Dockerfile)
-already, pass a readable file-like object to `fileobj` and also pass
-`custom_context=True`. If the stream is compressed also, set `encoding` to the
-correct value (e.g `gzip`).
-
-**Params**:
-
-* path (str): Path to the directory containing the Dockerfile
-* tag (str): A tag to add to the final image
-* quiet (bool): Whether to return the status
-* fileobj: A file object to use as the Dockerfile. (Or a file-like object)
-* nocache (bool): Don't use the cache when set to `True`
-* rm (bool): Remove intermediate containers. The `docker build` command now
- defaults to ``--rm=true``, but we have kept the old default of `False`
- to preserve backward compatibility
-* stream (bool): *Deprecated for API version > 1.8 (always True)*.
- Return a blocking generator you can iterate over to retrieve build output as
- it happens
-* timeout (int): HTTP timeout
-* custom_context (bool): Optional if using `fileobj`
-* encoding (str): The encoding for a stream. Set to `gzip` for compressing
-* pull (bool): Downloads any updates to the FROM image in Dockerfiles
-* forcerm (bool): Always remove intermediate containers, even after unsuccessful builds
-* dockerfile (str): path within the build context to the Dockerfile
-* buildargs (dict): A dictionary of build arguments
-* container_limits (dict): A dictionary of limits applied to each container
- created by the build process. Valid keys:
- - memory (int): set memory limit for build
- - memswap (int): Total memory (memory + swap), -1 to disable swap
- - cpushares (int): CPU shares (relative weight)
- - cpusetcpus (str): CPUs in which to allow execution, e.g., `"0-3"`, `"0,1"`
-* decode (bool): If set to `True`, the returned stream will be decoded into
- dicts on the fly. Default `False`.
-* shmsize (int): Size of /dev/shm in bytes. The size must be greater
- than 0. If omitted the system uses 64MB.
-* labels (dict): A dictionary of labels to set on the image
-
-**Returns** (generator): A generator for the build output
-
-```python
->>> from io import BytesIO
->>> from docker import Client
->>> dockerfile = '''
-... # Shared Volume
-... FROM busybox:buildroot-2014.02
-... MAINTAINER first last, first.last@yourdomain.com
-... VOLUME /data
-... CMD ["/bin/sh"]
-... '''
->>> f = BytesIO(dockerfile.encode('utf-8'))
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> response = [line for line in cli.build(
-... fileobj=f, rm=True, tag='yourname/volume'
-... )]
->>> response
-['{"stream":" ---\\u003e a9eb17255234\\n"}',
-'{"stream":"Step 1 : MAINTAINER first last, first.last@yourdomain.com\\n"}',
-'{"stream":" ---\\u003e Running in 08787d0ee8b1\\n"}',
-'{"stream":" ---\\u003e 23e5e66a4494\\n"}',
-'{"stream":"Removing intermediate container 08787d0ee8b1\\n"}',
-'{"stream":"Step 2 : VOLUME /data\\n"}',
-'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
-'{"stream":" ---\\u003e 713bca62012e\\n"}',
-'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
-'{"stream":"Step 3 : CMD [\\"/bin/sh\\"]\\n"}',
-'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
-'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
-'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
-'{"stream":"Successfully built 032b8b2855fc\\n"}']
-```
-
-**Raises:** [TypeError](
-https://docs.python.org/3.5/library/exceptions.html#TypeError) if `path` nor
-`fileobj` are specified
-
-## commit
-
-Identical to the `docker commit` command.
-
-**Params**:
-
-* container (str): The image hash of the container
-* repository (str): The repository to push the image to
-* tag (str): The tag to push
-* message (str): A commit message
-* author (str): The name of the author
-* changes (str): Dockerfile instructions to apply while committing
-* conf (dict): The configuration for the container. See the [Docker remote api](
-https://docs.docker.com/reference/api/docker_remote_api/) for full details.
-
-## containers
-
-List containers. Identical to the `docker ps` command.
-
-**Params**:
-
-* quiet (bool): Only display numeric Ids
-* all (bool): Show all containers. Only running containers are shown by default
-* trunc (bool): Truncate output
-* latest (bool): Show only the latest created container, include non-running
-ones.
-* since (str): Show only containers created since Id or Name, include
-non-running ones
-* before (str): Show only container created before Id or Name, include
-non-running ones
-* limit (int): Show `limit` last created containers, include non-running ones
-* size (bool): Display sizes
-* filters (dict): Filters to be processed on the image list. Available filters:
- - `exited` (int): Only containers with specified exit code
- - `status` (str): One of `restarting`, `running`, `paused`, `exited`
- - `label` (str): format either `"key"` or `"key=value"`
- - `id` (str): The id of the container.
- - `name` (str): The name of the container.
- - `ancestor` (str): Filter by container ancestor. Format of `<image-name>[:tag]`, `<image-id>`, or `<image@digest>`.
- - `before` (str): Only containers created before a particular container. Give the container name or id.
- - `since` (str): Only containers created after a particular container. Give container name or id.
-
- A comprehensive list can be found [here](https://docs.docker.com/engine/reference/commandline/ps/)
-
-**Returns** (dict): The system's containers
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> cli.containers()
-[{'Command': '/bin/sleep 30',
- 'Created': 1412574844,
- 'Id': '6e276c9e6e5759e12a6a9214efec6439f80b4f37618e1a6547f28a3da34db07a',
- 'Image': 'busybox:buildroot-2014.02',
- 'Names': ['/grave_mayer'],
- 'Ports': [],
- 'Status': 'Up 1 seconds'}]
-```
-
-## connect_container_to_network
-
-Connect a container to a network.
-
-**Params**:
-
-* container (str): container-id/name to be connected to the network
-* net_id (str): network id
-* aliases (list): A list of aliases for this endpoint. Names in that list can
- be used within the network to reach the container. Defaults to `None`.
-* links (list): A list of links for this endpoint. Containers declared in this
- list will be [linked](https://docs.docker.com/engine/userguide/networking/work-with-networks/#linking-containers-in-user-defined-networks)
- to this container. Defaults to `None`.
-* ipv4_address (str): The IP address of this container on the network,
- using the IPv4 protocol. Defaults to `None`.
-* ipv6_address (str): The IP address of this container on the network,
- using the IPv6 protocol. Defaults to `None`.
-* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
-
-## copy
-Identical to the `docker cp` command. Get files/folders from the container.
-**Deprecated for API version >= 1.20** &ndash; Consider using
-[`get_archive`](#get_archive) **instead.**
-
-**Params**:
-
-* container (str): The container to copy from
-* resource (str): The path within the container
-
-**Returns** (str): The contents of the file as a string
-
-## create_container
-
-Creates a container that can then be `.start()` ed. Parameters are similar to
-those for the `docker run` command except it doesn't support the attach
-options (`-a`).
-
-See [Port bindings](port-bindings.md) and [Using volumes](volumes.md) for more
-information on how to create port bindings and volume mappings.
-
-The `mem_limit` variable accepts float values (which represent the memory limit
-of the created container in bytes) or a string with a units identification char
-('100000b', '1000k', '128m', '1g'). If a string is specified without a units
-character, bytes are assumed as an intended unit.
-
-`volumes_from` and `dns` arguments raise [TypeError](
-https://docs.python.org/3.5/library/exceptions.html#TypeError) exception if
-they are used against v1.10 and above of the Docker remote API. Those
-arguments should be passed as part of the `host_config` dictionary.
-
-**Params**:
-
-* image (str): The image to run
-* command (str or list): The command to be run in the container
-* hostname (str): Optional hostname for the container
-* user (str or int): Username or UID
-* detach (bool): Detached mode: run container in the background and print new
-container Id
-* stdin_open (bool): Keep STDIN open even if not attached
-* tty (bool): Allocate a pseudo-TTY
-* mem_limit (float or str): Memory limit (format: [number][optional unit],
-where unit = b, k, m, or g)
-* ports (list of ints): A list of port numbers
-* environment (dict or list): A dictionary or a list of strings in the
-following format `["PASSWORD=xxx"]` or `{"PASSWORD": "xxx"}`.
-* dns (list): DNS name servers
-* dns_opt (list): Additional options to be added to the container's `resolv.conf` file
-* volumes (str or list):
-* volumes_from (str or list): List of container names or Ids to get volumes
-from. Optionally a single string joining container id's with commas
-* network_disabled (bool): Disable networking
-* name (str): A name for the container
-* entrypoint (str or list): An entrypoint
-* working_dir (str): Path to the working directory
-* domainname (str or list): Set custom DNS search domains
-* memswap_limit (int):
-* host_config (dict): A [HostConfig](hostconfig.md) dictionary
-* mac_address (str): The Mac Address to assign the container
-* labels (dict or list): A dictionary of name-value labels (e.g. `{"label1": "value1", "label2": "value2"}`) or a list of names of labels to set with empty values (e.g. `["label1", "label2"]`)
-* volume_driver (str): The name of a volume driver/plugin.
-* stop_signal (str): The stop signal to use to stop the container (e.g. `SIGINT`).
-* networking_config (dict): A [NetworkingConfig](networks.md) dictionary
-
-**Returns** (dict): A dictionary with an image 'Id' key and a 'Warnings' key.
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> container = cli.create_container(image='busybox:latest', command='/bin/sleep 30')
->>> print(container)
-{'Id': '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7',
- 'Warnings': None}
-```
-
-### docker.utils.parse_env_file
-
-A utility for parsing an environment file.
-
-The expected format of the file is as follows:
-
-```
-USERNAME=jdoe
-PASSWORD=secret
-```
-
-The utility can be used as follows:
-
-```python
->>> import docker.utils
->>> my_envs = docker.utils.parse_env_file('/path/to/file')
->>> client.create_container('myimage', 'command', environment=my_envs)
-```
-
-## create_network
-
-Create a network, similar to the `docker network create` command. See the
-[networks documentation](networks.md) for details.
-
-**Params**:
-
-* name (str): Name of the network
-* driver (str): Name of the driver used to create the network
-* options (dict): Driver options as a key-value dictionary
-* ipam (dict): Optional custom IP scheme for the network
-* check_duplicate (bool): Request daemon to check for networks with same name.
- Default: `True`.
-* internal (bool): Restrict external access to the network. Default `False`.
-* labels (dict): Map of labels to set on the network. Default `None`.
-* enable_ipv6 (bool): Enable IPv6 on the network. Default `False`.
-
-**Returns** (dict): The created network reference object
-
-## create_service
-
-Create a service, similar to the `docker service create` command. See the
-[services documentation](services.md#Clientcreate_service) for details.
-
-## create_volume
-
-Create and register a named volume
-
-**Params**:
-
-* name (str): Name of the volume
-* driver (str): Name of the driver used to create the volume
-* driver_opts (dict): Driver options as a key-value dictionary
-* labels (dict): Labels to set on the volume
-
-**Returns** (dict): The created volume reference object
-
-```python
->>> from docker import Client
->>> cli = Client()
->>> volume = cli.create_volume(
- name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'},
- labels={"key": "value"}
-)
->>> print(volume)
-{
- u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
- u'Driver': u'local',
- u'Name': u'foobar',
- u'Labels': {u'key': u'value'}
-}
-```
-
-## diff
-
-Inspect changes on a container's filesystem.
-
-**Params**:
-
-* container (str): The container to diff
-
-**Returns** (str):
-
-## disconnect_container_from_network
-
-**Params**:
-
-* container (str): container-id/name to be disconnected from a network
-* net_id (str): network id
-* force (bool): Force the container to disconnect from a network.
- Default: `False`
-
-## events
-
-Identical to the `docker events` command: get real time events from the server. The `events`
-function return a blocking generator you can iterate over to retrieve events as they happen.
-
-**Params**:
-
-* since (UTC datetime or int): get events from this point
-* until (UTC datetime or int): get events until this point
-* filters (dict): filter the events by event time, container or image
-* decode (bool): If set to true, stream will be decoded into dicts on the
- fly. False by default.
-
-**Returns** (generator):
-
-```python
-{u'status': u'start',
- u'from': u'image/with:tag',
- u'id': u'container-id',
- u'time': 1423339459}
-```
-
-## execute
-
-This command is deprecated for docker-py >= 1.2.0 ; use `exec_create` and
-`exec_start` instead.
-
-## exec_create
-
-Sets up an exec instance in a running container.
-
-**Params**:
-
-* container (str): Target container where exec instance will be created
-* cmd (str or list): Command to be executed
-* stdout (bool): Attach to stdout of the exec command if true. Default: True
-* stderr (bool): Attach to stderr of the exec command if true. Default: True
-* since (UTC datetime or int): Output logs from this timestamp. Default: `None` (all logs are given)
-* tty (bool): Allocate a pseudo-TTY. Default: False
-* user (str): User to execute command as. Default: root
-
-**Returns** (dict): A dictionary with an exec 'Id' key.
-
-
-## exec_inspect
-
-Return low-level information about an exec command.
-
-**Params**:
-
-* exec_id (str): ID of the exec instance
-
-**Returns** (dict): Dictionary of values returned by the endpoint.
-
-
-## exec_resize
-
-Resize the tty session used by the specified exec command.
-
-**Params**:
-
-* exec_id (str): ID of the exec instance
-* height (int): Height of tty session
-* width (int): Width of tty session
-
-## exec_start
-
-Start a previously set up exec instance.
-
-**Params**:
-
-* exec_id (str): ID of the exec instance
-* detach (bool): If true, detach from the exec command. Default: False
-* tty (bool): Allocate a pseudo-TTY. Default: False
-* stream (bool): Stream response data. Default: False
-
-**Returns** (generator or str): If `stream=True`, a generator yielding response
-chunks. A string containing response data otherwise.
-
-## export
-
-Export the contents of a filesystem as a tar archive to STDOUT.
-
-**Params**:
-
-* container (str): The container to export
-
-**Returns** (str): The filesystem tar archive as a str
-
-## get_archive
-
-Retrieve a file or folder from a container in the form of a tar archive.
-
-**Params**:
-
-* container (str): The container where the file is located
-* path (str): Path to the file or folder to retrieve
-
-**Returns** (tuple): First element is a raw tar data stream. Second element is
-a dict containing `stat` information on the specified `path`.
-
-```python
->>> import docker
->>> cli = docker.Client()
->>> ctnr = cli.create_container('busybox', 'true')
->>> strm, stat = cli.get_archive(ctnr, '/bin/sh')
->>> print(stat)
-{u'linkTarget': u'', u'mode': 493, u'mtime': u'2015-09-16T12:34:23-07:00', u'name': u'sh', u'size': 962860}
-```
-
-## get_image
-
-Get an image from the docker daemon. Similar to the `docker save` command.
-
-**Params**:
-
-* image (str): Image name to get
-
-**Returns** (urllib3.response.HTTPResponse object): The response from the docker daemon
-
-An example of how to get (save) an image to a file.
-```python
->>> from docker import Client
->>> cli = Client(base_url='unix://var/run/docker.sock')
->>> image = cli.get_image(“fedora:latest”)
->>> image_tar = open(‘/tmp/fedora-latest.tar’,’w’)
->>> image_tar.write(image.data)
->>> image_tar.close()
-```
-
-## history
-
-Show the history of an image.
-
-**Params**:
-
-* image (str): The image to show history for
-
-**Returns** (str): The history of the image
-
-## images
-
-List images. Identical to the `docker images` command.
-
-**Params**:
-
-* name (str): Only show images belonging to the repository `name`
-* quiet (bool): Only show numeric Ids. Returns a list
-* all (bool): Show all images (by default filter out the intermediate image
-layers)
-* filters (dict): Filters to be processed on the image list. Available filters:
- - `dangling` (bool)
- - `label` (str): format either `"key"` or `"key=value"`
-
-**Returns** (dict or list): A list if `quiet=True`, otherwise a dict.
-
-```python
-[{'Created': 1401926735,
-'Id': 'a9eb172552348a9a49180694790b33a1097f546456d041b6e82e4d7716ddb721',
-'ParentId': '120e218dd395ec314e7b6249f39d2853911b3d6def6ea164ae05722649f34b16',
-'RepoTags': ['busybox:buildroot-2014.02', 'busybox:latest'],
-'Size': 0,
-'VirtualSize': 2433303},
-...
-```
-
-## import_image
-
-Similar to the `docker import` command.
-
-If `src` is a string or unicode string, it will first be treated as a path to
-a tarball on the local system. If there is an error reading from that file,
-src will be treated as a URL instead to fetch the image from. You can also pass
-an open file handle as 'src', in which case the data will be read from that
-file.
-
-If `src` is unset but `image` is set, the `image` parameter will be taken as
-the name of an existing image to import from.
-
-**Params**:
-
-* src (str or file): Path to tarfile, URL, or file-like object
-* repository (str): The repository to create
-* tag (str): The tag to apply
-* image (str): Use another image like the `FROM` Dockerfile parameter
-
-## import_image_from_data
-
-Like `.import_image()`, but allows importing in-memory bytes data.
-
-**Params**:
-
-* data (bytes collection): Bytes collection containing valid tar data
-* repository (str): The repository to create
-* tag (str): The tag to apply
-
-## import_image_from_file
-
-Like `.import_image()`, but only supports importing from a tar file on
-disk. If the file doesn't exist it will raise `IOError`.
-
-**Params**:
-
-* filename (str): Full path to a tar file.
-* repository (str): The repository to create
-* tag (str): The tag to apply
-
-## import_image_from_url
-
-Like `.import_image()`, but only supports importing from a URL.
-
-**Params**:
-
-* url (str): A URL pointing to a tar file.
-* repository (str): The repository to create
-* tag (str): The tag to apply
-
-## import_image_from_image
-
-Like `.import_image()`, but only supports importing from another image,
-like the `FROM` Dockerfile parameter.
-
-**Params**:
-
-* image (str): Image name to import from
-* repository (str): The repository to create
-* tag (str): The tag to apply
-
-## info
-
-Display system-wide information. Identical to the `docker info` command.
-
-**Returns** (dict): The info as a dict
-
-```
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> cli.info()
-{'Containers': 3,
- 'Debug': 1,
- 'Driver': 'aufs',
- 'DriverStatus': [['Root Dir', '/mnt/sda1/var/lib/docker/aufs'],
- ['Dirs', '225']],
- 'ExecutionDriver': 'native-0.2',
- 'IPv4Forwarding': 1,
- 'Images': 219,
- 'IndexServerAddress': 'https://index.docker.io/v1/',
- 'InitPath': '/usr/local/bin/docker',
- 'InitSha1': '',
- 'KernelVersion': '3.16.1-tinycore64',
- 'MemoryLimit': 1,
- 'NEventsListener': 0,
- 'NFd': 11,
- 'NGoroutines': 12,
- 'OperatingSystem': 'Boot2Docker 1.2.0 (TCL 5.3);',
- 'SwapLimit': 1}
-```
-
-## init_swarm
-
-Initialize a new Swarm using the current connected engine as the first node.
-See the [Swarm documentation](swarm.md#clientinit_swarm).
-
-## insert
-*DEPRECATED*
-
-## inspect_container
-
-Identical to the `docker inspect` command, but only for containers.
-
-**Params**:
-
-* container (str): The container to inspect
-
-**Returns** (dict): Nearly the same output as `docker inspect`, just as a
-single dict
-
-## inspect_image
-
-Identical to the `docker inspect` command, but only for images.
-
-**Params**:
-
-* image (str): The image to inspect
-
-**Returns** (dict): Nearly the same output as `docker inspect`, just as a
-single dict
-
-## inspect_network
-
-Retrieve network info by id.
-
-**Params**:
-
-* net_id (str): network id
-
-**Returns** (dict): Network information dictionary
-
-## inspect_node
-
-Retrieve low-level information about a Swarm node.
-See the [Swarm documentation](swarm.md#clientinspect_node).
-
-## inspect_service
-
-Create a service, similar to the `docker service create` command. See the
-[services documentation](services.md#clientinspect_service) for details.
-
-## inspect_swarm
-
-Retrieve information about the current Swarm.
-See the [Swarm documentation](swarm.md#clientinspect_swarm).
-
-## inspect_task
-
-Retrieve information about a task.
-
-**Params**:
-
-* task (str): Task identifier
-
-**Returns** (dict): Task information dictionary
-
-## inspect_volume
-
-Retrieve volume info by name.
-
-**Params**:
-
-* name (str): volume name
-
-**Returns** (dict): Volume information dictionary
-
-```python
->>> cli.inspect_volume('foobar')
-{u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'}
-```
-
-## join_swarm
-
-Join an existing Swarm.
-See the [Swarm documentation](swarm.md#clientjoin_swarm).
-
-## kill
-
-Kill a container or send a signal to a container.
-
-**Params**:
-
-* container (str): The container to kill
-* signal (str or int): The signal to send. Defaults to `SIGKILL`
-
-## leave_swarm
-
-Leave the current Swarm.
-See the [Swarm documentation](swarm.md#clientleave_swarm).
-
-## load_image
-
-Load an image that was previously saved using `Client.get_image`
-(or `docker save`). Similar to `docker load`.
-
-**Params**:
-
-* data (binary): Image data to be loaded
-
-## login
-
-Nearly identical to the `docker login` command, but non-interactive.
-
-**Params**:
-
-* username (str): The registry username
-* password (str): The plaintext password
-* email (str): The email for the registry account
-* registry (str): URL to the registry. Ex:`https://index.docker.io/v1/`
-* reauth (bool): Whether refresh existing authentication on the docker server.
-* dockercfg_path (str): Use a custom path for the .dockercfg file
- (default `$HOME/.dockercfg`)
-
-**Returns** (dict): The response from the login request
-
-## logs
-
-Identical to the `docker logs` command. The `stream` parameter makes the `logs`
-function return a blocking generator you can iterate over to retrieve log
-output as it happens.
-
-**Params**:
-
-* container (str): The container to get logs from
-* stdout (bool): Get STDOUT
-* stderr (bool): Get STDERR
-* stream (bool): Stream the response
-* timestamps (bool): Show timestamps
-* tail (str or int): Output specified number of lines at the end of logs: `"all"` or `number`. Default `"all"`
-* since (datetime or int): Show logs since a given datetime or integer epoch (in seconds)
-* follow (bool): Follow log output
-
-**Returns** (generator or str):
-
-## networks
-
-List networks currently registered by the docker daemon. Similar to the `docker networks ls` command.
-
-**Params**
-
-* names (list): List of names to filter by
-* ids (list): List of ids to filter by
-
-The above are combined to create a filters dict.
-
-**Returns** (dict): List of network objects.
-
-## nodes
-
-List Swarm nodes. See the [Swarm documentation](swarm.md#clientnodes).
-
-## pause
-
-Pauses all processes within a container.
-
-**Params**:
-
-* container (str): The container to pause
-
-
-## ping
-
-Hits the `/_ping` endpoint of the remote API and returns the result. An
-exception will be raised if the endpoint isn't responding.
-
-**Returns** (bool)
-
-## port
-Lookup the public-facing port that is NAT-ed to `private_port`. Identical to
-the `docker port` command.
-
-**Params**:
-
-* container (str): The container to look up
-* private_port (int): The private port to inspect
-
-**Returns** (list of dict): The mapping for the host ports
-
-```bash
-$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
-7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
-```
-```python
->>> cli.port('7174d6347063', 80)
-[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
-```
-
-## pull
-
-Identical to the `docker pull` command.
-
-**Params**:
-
-* repository (str): The repository to pull
-* tag (str): The tag to pull
-* stream (bool): Stream the output as a generator
-* insecure_registry (bool): Use an insecure registry
-* auth_config (dict): Override the credentials that Client.login has set for this request
- `auth_config` should contain the `username` and `password` keys to be valid.
-
-**Returns** (generator or str): The output
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> for line in cli.pull('busybox', stream=True):
-... print(json.dumps(json.loads(line), indent=4))
-{
- "status": "Pulling image (latest) from busybox",
- "progressDetail": {},
- "id": "e72ac664f4f0"
-}
-{
- "status": "Pulling image (latest) from busybox, endpoint: ...",
- "progressDetail": {},
- "id": "e72ac664f4f0"
-}
-```
-
-## push
-
-Push an image or a repository to the registry. Identical to the `docker push`
-command.
-
-**Params**:
-
-* repository (str): The repository to push to
-* tag (str): An optional tag to push
-* stream (bool): Stream the output as a blocking generator
-* insecure_registry (bool): Use `http://` to connect to the registry
-* auth_config (dict): Override the credentials that Client.login has set for this request
- `auth_config` should contain the `username` and `password` keys to be valid.
-
-**Returns** (generator or str): The output of the upload
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> response = [line for line in cli.push('yourname/app', stream=True)]
->>> response
-['{"status":"Pushing repository yourname/app (1 tags)"}\\n',
- '{"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}\\n',
- '{"status":"Image already pushed, skipping","progressDetail":{},
- "id":"511136ea3c5a"}\\n',
- ...
- '{"status":"Pushing tag for rev [918af568e6e5] on {
- https://cdn-registry-1.docker.io/v1/repositories/
- yourname/app/tags/latest}"}\\n']
-```
-
-## put_archive
-
-Insert a file or folder in an existing container using a tar archive as source.
-
-**Params**:
-
-* container (str): The container where the file(s) will be extracted
-* path (str): Path inside the container where the file(s) will be extracted.
- Must exist.
-* data (bytes): tar data to be extracted
-
-**Returns** (bool): True if the call succeeds. `docker.errors.APIError` will
-be raised if an error occurs.
-
-## remove_container
-
-Remove a container. Similar to the `docker rm` command.
-
-**Params**:
-
-* container (str): The container to remove
-* v (bool): Remove the volumes associated with the container
-* link (bool): Remove the specified link and not the underlying container
-* force (bool): Force the removal of a running container (uses SIGKILL)
-
-## remove_image
-
-Remove an image. Similar to the `docker rmi` command.
-
-**Params**:
-
-* image (str): The image to remove
-* force (bool): Force removal of the image
-* noprune (bool): Do not delete untagged parents
-
-## remove_network
-
-Remove a network. Similar to the `docker network rm` command.
-
-**Params**:
-
-* net_id (str): The network's id
-
-Failure to remove will raise a `docker.errors.APIError` exception.
-
-## remove_service
-
-Remove a service, similar to the `docker service rm` command. See the
-[services documentation](services.md#clientremove_service) for details.
-
-## remove_volume
-
-Remove a volume. Similar to the `docker volume rm` command.
-
-**Params**:
-
-* name (str): The volume's name
-
-Failure to remove will raise a `docker.errors.APIError` exception.
-
-## rename
-
-Rename a container. Similar to the `docker rename` command.
-
-**Params**:
-
-* container (str): ID of the container to rename
-* name (str): New name for the container
-
-## resize
-
-Resize the tty session.
-
-**Params**:
-
-* container (str or dict): The container to resize
-* height (int): Height of tty session
-* width (int): Width of tty session
-
-## restart
-
-Restart a container. Similar to the `docker restart` command.
-
-If `container` a dict, the `Id` key is used.
-
-**Params**:
-
-* container (str or dict): The container to restart
-* timeout (int): Number of seconds to try to stop for before killing the
-container. Once killed it will then be restarted. Default is 10 seconds.
-
-## search
-Identical to the `docker search` command.
-
-**Params**:
-
-* term (str): A term to search for
-
-**Returns** (list of dicts): The response of the search
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> response = cli.search('nginx')
->>> response[:2]
-[{'description': 'Official build of Nginx.',
- 'is_official': True,
- 'is_trusted': False,
- 'name': 'nginx',
- 'star_count': 266},
- {'description': 'Trusted automated Nginx (http://nginx.org/) ...',
- 'is_official': False,
- 'is_trusted': True,
- 'name': 'dockerfile/nginx',
- 'star_count': 60},
- ...
-```
-
-## services
-
-List services, similar to the `docker service ls` command. See the
-[services documentation](services.md#clientservices) for details.
-
-## start
-
-Similar to the `docker start` command, but doesn't support attach options. Use
-`.logs()` to recover `stdout`/`stderr`.
-
-**Params**:
-
-* container (str): The container to start
-
-**Deprecation warning:** For API version > 1.15, it is highly recommended to
- provide host config options in the
- [`host_config` parameter of `create_container`](#create_container)
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> container = cli.create_container(
-... image='busybox:latest',
-... command='/bin/sleep 30')
->>> response = cli.start(container=container.get('Id'))
->>> print(response)
-None
-```
-
-## stats
-
-The Docker API parallel to the `docker stats` command.
-This will stream statistics for a specific container.
-
-**Params**:
-
-* container (str): The container to stream statistics for
-* decode (bool): If set to true, stream will be decoded into dicts on the
- fly. False by default.
-* stream (bool): If set to false, only the current stats will be returned
- instead of a stream. True by default.
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> stats_obj = cli.stats('elasticsearch')
->>> for stat in stats_obj:
->>> print(stat)
-{"read":"2015-02-11T21:47:30.49388286+02:00","networks":{"eth0":{"rx_bytes":648,"rx_packets":8 ...
-...
-...
-...
-```
-
-## stop
-
-Stops a container. Similar to the `docker stop` command.
-
-**Params**:
-
-* container (str): The container to stop
-* timeout (int): Timeout in seconds to wait for the container to stop before
-sending a `SIGKILL`. Default: 10
-
-## tag
-
-Tag an image into a repository. Identical to the `docker tag` command.
-
-**Params**:
-
-* image (str): The image to tag
-* repository (str): The repository to set for the tag
-* tag (str): The tag name
-* force (bool): Force
-
-**Returns** (bool): True if successful
-
-## tasks
-
-Retrieve a list of tasks.
-
-**Params**:
-
-* filters (dict): A map of filters to process on the tasks list. Valid filters:
- `id`, `name`, `service`, `node`, `label` and `desired-state`.
-
-**Returns** (list): List of task dictionaries.
-
-## top
-Display the running processes of a container.
-
-**Params**:
-
-* container (str): The container to inspect
-* ps_args (str): An optional arguments passed to ps (e.g., aux)
-
-**Returns** (str): The output of the top
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> cli.create_container('busybox:latest', '/bin/sleep 30', name='sleeper')
->>> cli.start('sleeper')
->>> cli.top('sleeper')
-{'Processes': [['952', 'root', '/bin/sleep 30']],
- 'Titles': ['PID', 'USER', 'COMMAND']}
-```
-
-## unpause
-
-Unpause all processes within a container.
-
-**Params**:
-
-* container (str): The container to unpause
-
-## update_container
-
-Update resource configs of one or more containers.
-
-**Params**:
-
-* container (str): The container to inspect
-* blkio_weight (int): Block IO (relative weight), between 10 and 1000
-* cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
-* cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
-* cpu_shares (int): CPU shares (relative weight)
-* cpuset_cpus (str): CPUs in which to allow execution
-* cpuset_mems (str): MEMs in which to allow execution
-* mem_limit (int or str): Memory limit
-* mem_reservation (int or str): Memory soft limit
-* memswap_limit (int or str): Total memory (memory + swap), -1 to disable swap
-* kernel_memory (int or str): Kernel memory limit
-* restart_policy (dict): Restart policy dictionary
-
-**Returns** (dict): Dictionary containing a `Warnings` key.
-
-## update_node
-
-Update a node.
-See the [Swarm documentation](swarm.md#clientupdate_node).
-
-## update_service
-
-Update a service, similar to the `docker service update` command. See the
-[services documentation](services.md#clientupdate_service) for details.
-
-## update_swarm
-
-Update the current Swarm.
-See the [Swarm documentation](swarm.md#clientupdate_swarm).
-
-## version
-
-Nearly identical to the `docker version` command.
-
-**Returns** (dict): The server version information
-
-```python
->>> from docker import Client
->>> cli = Client(base_url='tcp://127.0.0.1:2375')
->>> cli.version()
-{
- "KernelVersion": "3.16.4-tinycore64",
- "Arch": "amd64",
- "ApiVersion": "1.15",
- "Version": "1.3.0",
- "GitCommit": "c78088f",
- "Os": "linux",
- "GoVersion": "go1.3.3"
-}
-```
-
-## volumes
-
-List volumes currently registered by the docker daemon. Similar to the `docker volume ls` command.
-
-**Params**
-
-* filters (dict): Server-side list filtering options.
-
-**Returns** (dict): Dictionary with list of volume objects as value of the `Volumes` key.
-
-```python
->>> cli.volumes()
-{u'Volumes': [
- {u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Driver': u'local', u'Name': u'foobar'},
- {u'Mountpoint': u'/var/lib/docker/volumes/baz/_data', u'Driver': u'local', u'Name': u'baz'}
-]}
-```
-
-## wait
-Identical to the `docker wait` command. Block until a container stops, then
-return its exit code. Returns the value `-1` if the API responds without a
-`StatusCode` attribute.
-
-If `container` is a dict, the `Id` key is used.
-
-If the timeout value is exceeded, a `requests.exceptions.ReadTimeout`
-exception will be raised.
-
-**Params**:
-
-* container (str or dict): The container to wait on
-* timeout (int): Request timeout
-
-**Returns** (int): The exit code of the container
-
-
-<!---
-TODO:
-
-* load_image
-
--->
-
-****
-
-## Version mismatch
-
-You may encounter an error like this:
-
-```text
-client is newer than server (client API version: 1.24, server API version: 1.23)
-```
-
-To fix this, you have to either supply the exact version your server supports
-when instantiating the `Client`:
-
-```python
-client = docker.Client(version="1.23")
-```
-
-or let the client automatically detect the newest version server supports:
-
-```python
-client = docker.Client(version="auto")
-```
diff --git a/docs/api.rst b/docs/api.rst
new file mode 100644
index 0000000..5e59aa7
--- /dev/null
+++ b/docs/api.rst
@@ -0,0 +1,116 @@
+Low-level API
+=============
+
+The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with.
+
+It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power.
+
+.. py:module:: docker.api
+
+.. autoclass:: docker.api.client.APIClient
+
+Containers
+----------
+
+.. py:module:: docker.api.container
+
+.. rst-class:: hide-signature
+.. autoclass:: ContainerApiMixin
+ :members:
+ :undoc-members:
+
+.. py:module:: docker.api.image
+
+Images
+------
+
+.. py:module:: docker.api.image
+
+.. rst-class:: hide-signature
+.. autoclass:: ImageApiMixin
+ :members:
+ :undoc-members:
+
+Building images
+---------------
+
+.. py:module:: docker.api.build
+
+.. rst-class:: hide-signature
+.. autoclass:: BuildApiMixin
+ :members:
+ :undoc-members:
+
+Networks
+--------
+
+.. rst-class:: hide-signature
+.. autoclass:: docker.api.network.NetworkApiMixin
+ :members:
+ :undoc-members:
+
+Volumes
+-------
+
+.. py:module:: docker.api.volume
+
+.. rst-class:: hide-signature
+.. autoclass:: VolumeApiMixin
+ :members:
+ :undoc-members:
+
+Executing commands in containers
+--------------------------------
+
+.. py:module:: docker.api.exec_api
+
+.. rst-class:: hide-signature
+.. autoclass:: ExecApiMixin
+ :members:
+ :undoc-members:
+
+Swarms
+------
+
+.. py:module:: docker.api.swarm
+
+.. rst-class:: hide-signature
+.. autoclass:: SwarmApiMixin
+ :members:
+ :undoc-members:
+
+Services
+--------
+
+.. py:module:: docker.api.service
+
+.. rst-class:: hide-signature
+.. autoclass:: ServiceApiMixin
+ :members:
+ :undoc-members:
+
+The Docker daemon
+-----------------
+
+.. py:module:: docker.api.daemon
+
+.. rst-class:: hide-signature
+.. autoclass:: DaemonApiMixin
+ :members:
+ :undoc-members:
+
+Configuration types
+-------------------
+
+.. py:module:: docker.types
+
+.. autoclass:: IPAMConfig
+.. autoclass:: IPAMPool
+.. autoclass:: ContainerSpec
+.. autoclass:: DriverConfig
+.. autoclass:: EndpointSpec
+.. autoclass:: Mount
+.. autoclass:: Resources
+.. autoclass:: RestartPolicy
+.. autoclass:: TaskTemplate
+.. autoclass:: UpdateConfig
diff --git a/docs/change_log.md b/docs/change-log.md
index e32df1e..a7bb0b0 100644
--- a/docs/change_log.md
+++ b/docs/change-log.md
@@ -1,4 +1,4 @@
-Change Log
+Change log
==========
1.10.3
diff --git a/docs/client.rst b/docs/client.rst
new file mode 100644
index 0000000..63bce2c
--- /dev/null
+++ b/docs/client.rst
@@ -0,0 +1,30 @@
+Client
+======
+.. py:module:: docker.client
+
+
+Creating a client
+-----------------
+
+To communicate with the Docker daemon, you first need to instantiate a client. The easiest way to do that is by calling the function :py:func:`~docker.client.from_env`. It can also be configured manually by instantiating a :py:class:`~docker.client.DockerClient` class.
+
+.. autofunction:: from_env()
+
+Client reference
+----------------
+
+.. autoclass:: DockerClient()
+
+ .. autoattribute:: containers
+ .. autoattribute:: images
+ .. autoattribute:: networks
+ .. autoattribute:: nodes
+ .. autoattribute:: services
+ .. autoattribute:: swarm
+ .. autoattribute:: volumes
+
+ .. automethod:: events()
+ .. automethod:: info()
+ .. automethod:: login()
+ .. automethod:: ping()
+ .. automethod:: version()
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..4901279
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-
+#
+# docker-sdk-python documentation build configuration file, created by
+# sphinx-quickstart on Wed Sep 14 15:48:58 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import datetime
+import os
+import sys
+sys.path.insert(0, os.path.abspath('..'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.napoleon',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+
+from recommonmark.parser import CommonMarkParser
+
+source_parsers = {
+ '.md': CommonMarkParser,
+}
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+source_suffix = ['.rst', '.md']
+# source_suffix = '.md'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Docker SDK for Python'
+year = datetime.datetime.now().year
+copyright = u'%d Docker Inc' % year
+author = u'Docker Inc'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'2.0'
+# The full version, including alpha/beta/rc tags.
+release = u'2.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+add_module_names = False
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+html_theme_options = {
+ 'description': 'A Python library for the Docker Remote API',
+ 'fixed_sidebar': True,
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'docker-sdk-python v2.0'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+html_sidebars = {
+ '**': [
+ 'about.html',
+ 'navigation.html',
+ 'searchbox.html',
+ ]
+}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'docker-sdk-pythondoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'docker-sdk-python.tex', u'docker-sdk-python Documentation',
+ u'Docker Inc.', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# It false, will not define \strong, \code, itleref, \crossref ... but only
+# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
+# packages.
+#
+# latex_keep_old_macro_names = True
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
+ author, 'docker-sdk-python', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
+
+
+# Napoleon settings
+napoleon_google_docstring = True
+napoleon_numpy_docstring = False
diff --git a/docs/containers.rst b/docs/containers.rst
new file mode 100644
index 0000000..eb51ae4
--- /dev/null
+++ b/docs/containers.rst
@@ -0,0 +1,51 @@
+Containers
+==========
+
+.. py:module:: docker.models.containers
+
+Run and manage containers on the server.
+
+Methods available on ``client.containers``:
+
+.. rst-class:: hide-signature
+.. autoclass:: ContainerCollection
+
+ .. automethod:: run(image, command=None, **kwargs)
+ .. automethod:: create(image, command=None, **kwargs)
+ .. automethod:: get(id_or_name)
+ .. automethod:: list(**kwargs)
+
+Container objects
+-----------------
+
+.. autoclass:: Container()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: name
+ .. autoattribute:: status
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+ .. automethod:: attach
+ .. automethod:: attach_socket
+ .. automethod:: commit
+ .. automethod:: diff
+ .. automethod:: exec_run
+ .. automethod:: export
+ .. automethod:: get_archive
+ .. automethod:: kill
+ .. automethod:: logs
+ .. automethod:: pause
+ .. automethod:: put_archive
+ .. automethod:: remove
+ .. automethod:: rename
+ .. automethod:: resize
+ .. automethod:: restart
+ .. automethod:: start
+ .. automethod:: stats
+ .. automethod:: stop
+ .. automethod:: top
+ .. automethod:: unpause
+ .. automethod:: update
diff --git a/docs/contributing.md b/docs/contributing.md
deleted file mode 100644
index e776458..0000000
--- a/docs/contributing.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Contributing
-See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
-The following is specific to docker-py.
-
-## Running the tests & Code Quality
-
-
-To get the source source code and run the unit tests, run:
-```
-$ git clone git://github.com/docker/docker-py.git
-$ cd docker-py
-$ pip install tox
-$ tox
-```
-
-## Building the docs
-Docs are built with [MkDocs](http://www.mkdocs.org/). For development, you can
-run the following in the project directory:
-```
-$ pip install -r docs-requirements.txt
-$ mkdocs serve
-```
-
-## Release Checklist
-
-Before a new release, please go through the following checklist:
-
-* Bump version in docker/version.py
-* Add a release note in docs/change_log.md
-* Git tag the version
-* Upload to pypi
-
-## Vulnerability Reporting
-For any security issues, please do NOT file an issue or pull request on github!
-Please contact [security@docker.com](mailto:security@docker.com) or read [the
-Docker security page](https://www.docker.com/resources/security/).
diff --git a/docs/host-devices.md b/docs/host-devices.md
deleted file mode 100644
index 150a686..0000000
--- a/docs/host-devices.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Access to devices on the host
-
-If you need to directly expose some host devices to a container, you can use
-the devices parameter in the `host_config` param in `Client.create_container`
-as shown below:
-
-```python
-cli.create_container(
- 'busybox', 'true', host_config=cli.create_host_config(devices=[
- '/dev/sda:/dev/xvda:rwm'
- ])
-)
-```
-
-Each string is a single mapping using the following format:
-`<path_on_host>:<path_in_container>:<cgroup_permissions>`
-The above example allows the container to have read-write access to
-the host's `/dev/sda` via a node named `/dev/xvda` inside the container.
-
-As a more verbose alternative, each host device definition can be specified as
-a dictionary with the following keys:
-
-```python
-{
- 'PathOnHost': '/dev/sda1',
- 'PathInContainer': '/dev/xvda',
- 'CgroupPermissions': 'rwm'
-}
-```
diff --git a/docs/hostconfig.md b/docs/hostconfig.md
deleted file mode 100644
index 008d5cf..0000000
--- a/docs/hostconfig.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# HostConfig object
-
-The Docker Remote API introduced [support for HostConfig in version 1.15](http://docs.docker.com/reference/api/docker_remote_api_v1.15/#create-a-container).
-This object contains all the parameters you could previously pass to `Client.start`.
-*It is highly recommended that users pass the HostConfig in the `host_config`*
-*param of `Client.create_container` instead of `Client.start`*
-
-## HostConfig helper
-
-### Client.create_host_config
-
-Creates a HostConfig dictionary to be used with `Client.create_container`.
-
-`binds` allows to bind a directory in the host to the container. See [Using
-volumes](volumes.md) for more information.
-
-`port_bindings` exposes container ports to the host.
-See [Port bindings](port-bindings.md) for more information.
-
-`lxc_conf` allows to pass LXC configuration options using a dictionary.
-
-`privileged` starts the container in privileged mode.
-
-[Links](http://docs.docker.io/en/latest/use/working_with_links_names/) can be
-specified with the `links` argument. They can either be specified as a
-dictionary mapping name to alias or as a list of `(name, alias)` tuples.
-
-`dns` and `volumes_from` are only available if they are used with version v1.10
-of docker remote API. Otherwise they are ignored.
-
-`network_mode` is available since v1.11 and sets the Network mode for the
-container ('bridge': creates a new network stack for the container on the
-Docker bridge, 'none': no networking for this container, 'container:[name|id]':
-reuses another container network stack, 'host': use the host network stack
-inside the container or any name that identifies an existing Docker network).
-
-`restart_policy` is available since v1.2.0 and sets the container's *RestartPolicy*
-which defines the conditions under which a container should be restarted upon exit.
-If no *RestartPolicy* is defined, the container will not be restarted when it exits.
-The *RestartPolicy* is specified as a dict. For example, if the container
-should always be restarted:
-```python
-{
- "MaximumRetryCount": 0,
- "Name": "always"
-}
-```
-
-It is possible to restart the container only on failure as well as limit the number
-of restarts. For example:
-```python
-{
- "MaximumRetryCount": 5,
- "Name": "on-failure"
-}
-```
-
-`cap_add` and `cap_drop` are available since v1.2.0 and can be used to add or
-drop certain capabilities. The user may specify the capabilities as an array
-for example:
-```python
-[
- "SYS_ADMIN",
- "MKNOD"
-]
-```
-
-
-**Params**
-
-* binds: Volumes to bind. See [Using volumes](volumes.md) for more information.
-* port_bindings (dict): Port bindings. See [Port bindings](port-bindings.md)
- for more information.
-* lxc_conf (dict): LXC config
-* oom_kill_disable (bool): Whether to disable OOM killer
-* oom_score_adj (int): An integer value containing the score given to the
- container in order to tune OOM killer preferences
-* publish_all_ports (bool): Whether to publish all ports to the host
-* links (dict or list of tuples): either as a dictionary mapping name to alias
- or as a list of `(name, alias)` tuples
-* privileged (bool): Give extended privileges to this container
-* dns (list): Set custom DNS servers
-* dns_search (list): DNS search domains
-* volumes_from (str or list): List of container names or Ids to get volumes
- from. Optionally a single string joining container id's with commas
-* network_mode (str): One of `['bridge', 'none', 'container:<name|id>', 'host']`
-* restart_policy (dict): "Name" param must be one of
- `['on-failure', 'always']`
-* cap_add (list of str): Add kernel capabilities
-* cap_drop (list of str): Drop kernel capabilities
-* extra_hosts (dict): custom host-to-IP mappings (host:ip)
-* read_only (bool): mount the container's root filesystem as read only
-* pid_mode (str): if set to "host", use the host PID namespace inside the
- container
-* ipc_mode (str): Set the IPC mode for the container
-* security_opt (list): A list of string values to customize labels for MLS
- systems, such as SELinux.
-* ulimits (list): A list of dicts or `docker.utils.Ulimit` objects. A list
- of ulimits to be set in the container.
-* log_config (`docker.utils.LogConfig` or dict): Logging configuration to
- container
-* mem_limit (str or int): Maximum amount of memory container is allowed to
- consume. (e.g. `'1G'`)
-* memswap_limit (str or int): Maximum amount of memory + swap a container is
- allowed to consume.
-* mem_swappiness (int): Tune a container's memory swappiness behavior.
- Accepts number between 0 and 100.
-* shm_size (str or int): Size of /dev/shm. (e.g. `'1G'`)
-* cpu_group (int): The length of a CPU period in microseconds.
-* cpu_period (int): Microseconds of CPU time that the container can get in a
- CPU period.
-* cpu_shares (int): CPU shares (relative weight)
-* cpuset_cpus (str): CPUs in which to allow execution (0-3, 0,1)
-* blkio_weight: Block IO weight (relative weight), accepts a weight value
- between 10 and 1000.
-* blkio_weight_device: Block IO weight (relative device weight) in the form of:
- `[{"Path": "device_path", "Weight": weight}]`
-* device_read_bps: Limit read rate (bytes per second) from a device in the
- form of: `[{"Path": "device_path", "Rate": rate}]`
-* device_write_bps: Limit write rate (bytes per second) from a device.
-* device_read_iops: Limit read rate (IO per second) from a device.
-* device_write_iops: Limit write rate (IO per second) from a device.
-* group_add (list): List of additional group names and/or IDs that the
- container process will run as.
-* devices (list): Host device bindings. See [host devices](host-devices.md)
- for more information.
-* tmpfs: Temporary filesystems to mount. See [Using tmpfs](tmpfs.md) for more
- information.
-* sysctls (dict): Kernel parameters to set in the container.
-* userns_mode (str): Sets the user namespace mode for the container when user
- namespace remapping option is enabled. Supported values are: `host`
-* pids_limit (int): Tune a container’s pids limit. Set -1 for unlimited.
-
-**Returns** (dict) HostConfig dictionary
-
-```python
->>> from docker import Client
->>> cli = Client()
->>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'], volumes_from=['nostalgic_newton'])
-{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
-```
diff --git a/docs/images.rst b/docs/images.rst
new file mode 100644
index 0000000..7572c2d
--- /dev/null
+++ b/docs/images.rst
@@ -0,0 +1,39 @@
+Images
+======
+
+.. py:module:: docker.models.images
+
+Manage images on the server.
+
+Methods available on ``client.images``:
+
+.. rst-class:: hide-signature
+.. py:class:: ImageCollection
+
+ .. automethod:: build
+ .. automethod:: get
+ .. automethod:: list(**kwargs)
+ .. automethod:: load
+ .. automethod:: pull
+ .. automethod:: push
+ .. automethod:: remove
+ .. automethod:: search
+
+
+Image objects
+-------------
+
+.. autoclass:: Image()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: tags
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+
+ .. automethod:: history
+ .. automethod:: reload
+ .. automethod:: save
+ .. automethod:: tag
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index 5b851f0..0000000
--- a/docs/index.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# docker-py documentation
-
-An API client for docker written in Python
-
-## Installation
-
-Our latest stable is always available on PyPi.
-
- pip install docker-py
-
-## Documentation
-Full documentation is available in the `/docs/` directory.
-
-## License
-Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..7eadf4c
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,93 @@
+Docker SDK for Python
+=====================
+
+A Python library for the Docker Remote API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+For more information about the Remote API, `see its documentation <https://docs.docker.com/engine/reference/api/docker_remote_api/>`_.
+
+Installation
+------------
+
+The latest stable version `is available on PyPi <https://pypi.python.org/pypi/docker/>`_. Either add ``docker`` to your ``requirements.txt`` file or install with pip::
+
+ pip install docker
+
+Getting started
+---------------
+
+To talk to a Docker daemon, you first need to instantiate a client. You can use :py:func:`~docker.client.from_env` to connect using the default socket or the configuration in your environment:
+
+.. code-block:: python
+
+ import docker
+ client = docker.from_env()
+
+You can now run containers:
+
+.. code-block:: python
+
+ >>> client.containers.run("ubuntu", "echo hello world")
+ 'hello world\n'
+
+You can run containers in the background:
+
+.. code-block:: python
+
+ >>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+ <Container '45e6d2de7c54'>
+
+You can manage containers:
+
+.. code-block:: python
+
+ >>> client.containers.list()
+ [<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+ >>> container = client.containers.get('45e6d2de7c54')
+
+ >>> container.attrs['Config']['Image']
+ "bfirsh/reticulate-splines"
+
+ >>> container.logs()
+ "Reticulating spline 1...\n"
+
+ >>> container.stop()
+
+You can stream logs:
+
+.. code-block:: python
+
+ >>> for line in container.logs(stream=True):
+ ... print line.strip()
+ Reticulating spline 2...
+ Reticulating spline 3...
+ ...
+
+You can manage images:
+
+.. code-block:: python
+
+ >>> client.images.pull('nginx')
+ <Image 'nginx'>
+
+ >>> client.images.list()
+ [<Image 'ubuntu'>, <Image 'nginx'>, ...]
+
+That's just a taster of what you can do with the Docker SDK for Python. For more, :doc:`take a look at the reference <client>`.
+
+.. toctree::
+ :hidden:
+ :maxdepth: 2
+
+ Home <index>
+ client
+ containers
+ images
+ networks
+ nodes
+ services
+ swarm
+ volumes
+ api
+ tls
+ change-log
diff --git a/docs/machine.md b/docs/machine.md
deleted file mode 100644
index 6c0bcbb..0000000
--- a/docs/machine.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Using with Docker Toolbox and Machine
-
-In development, Docker recommends using
-[Docker Toolbox](https://www.docker.com/products/docker-toolbox) to set up
-Docker. It includes a tool called Machine which will create a VM running
-Docker Engine and point your shell at it using environment variables.
-
-To configure docker-py with these environment variables
-
-First use Machine to set up the environment variables:
-```bash
-$ eval "$(docker-machine env)"
-```
-
-You can then use docker-py like this:
-```python
-import docker
-client = docker.from_env(assert_hostname=False)
-print client.version()
-```
-
-**Note:** This snippet is disabling TLS hostname checking with
-`assert\_hostname=False`. Machine provides us with the exact certificate
-the server is using so this is safe. If you are not using Machine and verifying
-the host against a certificate authority, you'll want to enable hostname
-verification.
diff --git a/docs/networks.md b/docs/networks.md
deleted file mode 100644
index fb0e9f4..0000000
--- a/docs/networks.md
+++ /dev/null
@@ -1,177 +0,0 @@
-# Using Networks
-
-## Network creation
-
-With the release of Docker 1.9 you can now manage custom networks.
-
-
-Here you can see how to create a network named `network1` using
-the `bridge` driver
-
-```python
-docker_client.create_network("network1", driver="bridge")
-```
-
-You can also create more advanced networks with custom IPAM configurations.
-For example, setting the subnet to `192.168.52.0/24` and gateway address
-to `192.168.52.254`
-
-```python
-ipam_pool = docker.utils.create_ipam_pool(
- subnet='192.168.52.0/24',
- gateway='192.168.52.254'
-)
-ipam_config = docker.utils.create_ipam_config(
- pool_configs=[ipam_pool]
-)
-
-docker_client.create_network("network1", driver="bridge", ipam=ipam_config)
-```
-
-By default, when you connect a container to an overlay network, Docker also
-connects a bridge network to it to provide external connectivity. If you want
-to create an externally isolated overlay network, with Docker 1.10 you can
-create an internal network.
-
-```python
-
-docker_client.create_network("network1", driver="bridge", internal=True)
-```
-
-## Container network configuration
-
-In order to specify which network a container will be connected to, and
-additional configuration, use the `networking_config` parameter in
-`Client.create_container`. Note that at the time of creation, you can
-only connect a container to a single network. Later on, you may create more
-connections using `Client.connect_container_to_network`.
-
-
-```python
-networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config(
- ipv4_address='172.28.0.124',
- aliases=['foo', 'bar'],
- links=['container2']
- )
-})
-
-ctnr = docker_client.create_container(
- img, command, networking_config=networking_config
-)
-
-```
-
-## Network API documentation
-
-### Client.create_networking_config
-
-Create a networking config dictionary to be used as the `networking_config`
-parameter in `Client.create_container_config`
-
-**Params**:
-
-* endpoints_config (dict): A dictionary of `network_name -> endpoint_config`
- relationships. Values should be endpoint config dictionaries created by
- `Client.create_endpoint_config`. Defaults to `None` (default config).
-
-**Returns** A networking config dictionary.
-
-```python
-
-docker_client.create_network('network1')
-
-networking_config = docker_client.create_networking_config({
- 'network1': docker_client.create_endpoint_config()
-})
-
-container = docker_client.create_container(
- img, command, networking_config=networking_config
-)
-```
-
-
-### Client.create_endpoint_config
-
-Create an endpoint config dictionary to be used with
-`Client.create_networking_config`.
-
-**Params**:
-
-* aliases (list): A list of aliases for this endpoint. Names in that list can
- be used within the network to reach the container. Defaults to `None`.
-* links (list): A list of links for this endpoint. Containers declared in this
- list will be [linked](https://docs.docker.com/engine/userguide/networking/work-with-networks/#linking-containers-in-user-defined-networks)
- to this container. Defaults to `None`.
-* ipv4_address (str): The IP address of this container on the network,
- using the IPv4 protocol. Defaults to `None`.
-* ipv6_address (str): The IP address of this container on the network,
- using the IPv6 protocol. Defaults to `None`.
-* link_local_ips (list): A list of link-local (IPv4/IPv6) addresses.
-
-**Returns** An endpoint config dictionary.
-
-```python
-endpoint_config = docker_client.create_endpoint_config(
- aliases=['web', 'app'],
- links=['app_db'],
- ipv4_address='132.65.0.123'
-)
-
-docker_client.create_network('network1')
-networking_config = docker_client.create_networking_config({
- 'network1': endpoint_config
-})
-container = docker_client.create_container(
- img, command, networking_config=networking_config
-)
-```
-### docker.utils.create_ipam_config
-
-Create an IPAM (IP Address Management) config dictionary to be used with
-`Client.create_network`.
-
-
-**Params**:
-
-* driver (str): The IPAM driver to use. Defaults to `'default'`.
-* pool_configs (list): A list of pool configuration dictionaries as created
- by `docker.utils.create_ipam_pool`. Defaults to empty list.
-
-**Returns** An IPAM config dictionary
-
-```python
-ipam_config = docker.utils.create_ipam_config(driver='default')
-network = docker_client.create_network('network1', ipam=ipam_config)
-```
-
-### docker.utils.create_ipam_pool
-
-Create an IPAM pool config dictionary to be added to the `pool_configs` param
-in `docker.utils.create_ipam_config`.
-
-**Params**:
-
-* subnet (str): Custom subnet for this IPAM pool using the CIDR notation.
- Defaults to `None`.
-* iprange (str): Custom IP range for endpoints in this IPAM pool using the
- CIDR notation. Defaults to `None`.
-* gateway (str): Custom IP address for the pool's gateway.
-* aux_addresses (dict): A dictionary of `key -> ip_address` relationships
- specifying auxiliary addresses that need to be allocated by the
- IPAM driver.
-
-**Returns** An IPAM pool config dictionary
-
-```python
-ipam_pool = docker.utils.create_ipam_pool(
- subnet='124.42.0.0/16',
- iprange='124.42.0.0/24',
- gateway='124.42.0.254',
- aux_addresses={
- 'reserved1': '124.42.1.1'
- }
-)
-ipam_config = docker.utils.create_ipam_config(pool_configs=[ipam_pool])
-network = docker_client.create_network('network1', ipam=ipam_config)
-```
diff --git a/docs/networks.rst b/docs/networks.rst
new file mode 100644
index 0000000..f6de38b
--- /dev/null
+++ b/docs/networks.rst
@@ -0,0 +1,33 @@
+Networks
+========
+
+.. py:module:: docker.models.networks
+
+Create and manage networks on the server. For more information about networks, `see the Engine documentation <https://docs.docker.com/engine/userguide/networking/>`_.
+
+Methods available on ``client.networks``:
+
+.. rst-class:: hide-signature
+.. py:class:: NetworkCollection
+
+ .. automethod:: create
+ .. automethod:: get
+ .. automethod:: list
+
+Network objects
+-----------------
+
+.. autoclass:: Network()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: name
+ .. autoattribute:: containers
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+ .. automethod:: connect
+ .. automethod:: disconnect
+ .. automethod:: reload
+ .. automethod:: remove
diff --git a/docs/nodes.rst b/docs/nodes.rst
new file mode 100644
index 0000000..8ef1e20
--- /dev/null
+++ b/docs/nodes.rst
@@ -0,0 +1,30 @@
+Nodes
+=====
+
+.. py:module:: docker.models.nodes
+
+Get and list nodes in a swarm. Before you can use these methods, you first need to :doc:`join or initialize a swarm <swarm>`.
+
+Methods available on ``client.nodes``:
+
+.. rst-class:: hide-signature
+.. py:class:: NodeCollection
+
+ .. automethod:: get(id_or_name)
+ .. automethod:: list(**kwargs)
+
+Node objects
+------------
+
+.. autoclass:: Node()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+ .. autoattribute:: version
+
+ .. automethod:: reload
+ .. automethod:: update
diff --git a/docs/port-bindings.md b/docs/port-bindings.md
deleted file mode 100644
index d31760c..0000000
--- a/docs/port-bindings.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Port bindings
-Port bindings is done in two parts. Firstly, by providing a list of ports to
-open inside the container in the `Client().create_container()` method.
-Bindings are declared in the `host_config` parameter.
-
-```python
-container_id = cli.create_container(
- 'busybox', 'ls', ports=[1111, 2222],
- host_config=cli.create_host_config(port_bindings={
- 1111: 4567,
- 2222: None
- })
-)
-```
-
-
-You can limit the host address on which the port will be exposed like such:
-
-```python
-cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
-```
-
-Or without host port assignment:
-
-```python
-cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
-```
-
-If you wish to use UDP instead of TCP (default), you need to declare ports
-as such in both the config and host config:
-
-```python
-container_id = cli.create_container(
- 'busybox', 'ls', ports=[(1111, 'udp'), 2222],
- host_config=cli.create_host_config(port_bindings={
- '1111/udp': 4567, 2222: None
- })
-)
-```
-
-To bind multiple host ports to a single container port, use the following syntax:
-
-```python
-cli.create_host_config(port_bindings={
- 1111: [1234, 4567]
-})
-```
-
-You can also bind multiple IPs to a single container port:
-
-```python
-cli.create_host_config(port_bindings={
- 1111: [
- ('192.168.0.100', 1234),
- ('192.168.0.101', 1234)
- ]
-})
-```
diff --git a/docs/services.md b/docs/services.md
deleted file mode 100644
index 69e0649..0000000
--- a/docs/services.md
+++ /dev/null
@@ -1,268 +0,0 @@
-# Swarm services
-
-Starting with Engine version 1.12 (API 1.24), it is possible to manage services
-using the Docker Engine API. Note that the engine needs to be part of a
-[Swarm cluster](swarm.md) before you can use the service-related methods.
-
-## Creating a service
-
-The `Client.create_service` method lets you create a new service inside the
-cluster. The method takes several arguments, `task_template` being mandatory.
-This dictionary of values is most easily produced by instantiating a
-`TaskTemplate` object.
-
-```python
-container_spec = docker.types.ContainerSpec(
- image='busybox', command=['echo', 'hello']
-)
-task_tmpl = docker.types.TaskTemplate(container_spec)
-service_id = client.create_service(task_tmpl, name=name)
-```
-
-## Listing services
-
-List all existing services using the `Client.services` method.
-
-```python
-client.services(filters={'name': 'mysql'})
-```
-
-## Retrieving service configuration
-
-To retrieve detailed information and configuration for a specific service, you
-may use the `Client.inspect_service` method using the service's ID or name.
-
-```python
-client.inspect_service(service='my_service_name')
-```
-
-## Updating service configuration
-
-The `Client.update_service` method lets you update a service's configuration.
-The mandatory `version` argument (used to prevent concurrent writes) can be
-retrieved using `Client.inspect_service`.
-
-```python
-container_spec = docker.types.ContainerSpec(
- image='busybox', command=['echo', 'hello world']
-)
-task_tmpl = docker.types.TaskTemplate(container_spec)
-
-svc_version = client.inspect_service(svc_id)['Version']['Index']
-
-client.update_service(
- svc_id, svc_version, name='new_name', task_template=task_tmpl
-)
-```
-
-## Removing a service
-
-A service may be removed simply using the `Client.remove_service` method.
-Either the service name or service ID can be used as argument.
-
-```python
-client.remove_service('my_service_name')
-```
-
-## Service API documentation
-
-### Client.create_service
-
-Create a service.
-
-**Params:**
-
-* task_template (dict): Specification of the task to start as part of the new
- service. See the [TaskTemplate class](#TaskTemplate) for details.
-* name (string): User-defined name for the service. Optional.
-* labels (dict): A map of labels to associate with the service. Optional.
-* mode (string): Scheduling mode for the service (`replicated` or `global`).
- Defaults to `replicated`.
-* update_config (dict): Specification for the update strategy of the service.
- See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`.
-* networks (list): List of network names or IDs to attach the service to.
- Default: `None`.
-* endpoint_spec (dict): Properties that can be configured to access and load
- balance a service. Default: `None`.
-
-**Returns:** A dictionary containing an `ID` key for the newly created service.
-
-### Client.inspect_service
-
-Return information on a service.
-
-**Params:**
-
-* service (string): A service identifier (either its name or service ID)
-
-**Returns:** `True` if successful. Raises an `APIError` otherwise.
-
-### Client.remove_service
-
-Stop and remove a service.
-
-**Params:**
-
-* service (string): A service identifier (either its name or service ID)
-
-**Returns:** `True` if successful. Raises an `APIError` otherwise.
-
-### Client.services
-
-List services.
-
-**Params:**
-
-* filters (dict): Filters to process on the nodes list. Valid filters:
- `id` and `name`. Default: `None`.
-
-**Returns:** A list of dictionaries containing data about each service.
-
-### Client.update_service
-
-Update a service.
-
-**Params:**
-
-* service (string): A service identifier (either its name or service ID).
-* version (int): The version number of the service object being updated. This
- is required to avoid conflicting writes.
-* task_template (dict): Specification of the updated task to start as part of
- the service. See the [TaskTemplate class](#TaskTemplate) for details.
-* name (string): New name for the service. Optional.
-* labels (dict): A map of labels to associate with the service. Optional.
-* mode (string): Scheduling mode for the service (`replicated` or `global`).
- Defaults to `replicated`.
-* update_config (dict): Specification for the update strategy of the service.
- See the [UpdateConfig class](#UpdateConfig) for details. Default: `None`.
-* networks (list): List of network names or IDs to attach the service to.
- Default: `None`.
-* endpoint_spec (dict): Properties that can be configured to access and load
- balance a service. Default: `None`.
-
-**Returns:** `True` if successful. Raises an `APIError` otherwise.
-
-### Configuration objects (`docker.types`)
-
-#### ContainerSpec
-
-A `ContainerSpec` object describes the behavior of containers that are part
-of a task, and is used when declaring a `TaskTemplate`.
-
-**Params:**
-
-* image (string): The image name to use for the container.
-* command (string or list): The command to be run in the image.
-* args (list): Arguments to the command.
-* env (dict): Environment variables.
-* dir (string): The working directory for commands to run in.
-* user (string): The user inside the container.
-* labels (dict): A map of labels to associate with the service.
-* mounts (list): A list of specifications for mounts to be added to containers
- created as part of the service. See the [Mount class](#Mount) for details.
-* stop_grace_period (int): Amount of time to wait for the container to
- terminate before forcefully killing it.
-
-#### DriverConfig
-
-A `LogDriver` object indicates which driver to use, as well as its
-configuration. It can be used for the `log_driver` in a `ContainerSpec`,
-and for the `driver_config` in a volume `Mount`.
-
-**Params:**
-
-* name (string): Name of the logging driver to use.
-* options (dict): Driver-specific options. Default: `None`.
-
-#### EndpointSpec
-
-An `EndpointSpec` object describes properties to access and load-balance a
-service.
-
-**Params:**
-
-* mode (string): The mode of resolution to use for internal load balancing
- between tasks (`'vip'` or `'dnsrr'`). Defaults to `'vip'` if not provided.
-* ports (dict): Exposed ports that this service is accessible on from the
- outside, in the form of `{ target_port: published_port }` or
- `{ target_port: (published_port, protocol) }`. Ports can only be provided if
- the `vip` resolution mode is used.
-
-#### Mount
-
-A `Mount` object describes a mounted folder's configuration inside a
-container. A list of `Mount`s would be used as part of a `ContainerSpec`.
-
-* target (string): Container path.
-* source (string): Mount source (e.g. a volume name or a host path).
-* type (string): The mount type (`bind` or `volume`). Default: `volume`.
-* read_only (bool): Whether the mount should be read-only.
-* propagation (string): A propagation mode with the value `[r]private`,
- `[r]shared`, or `[r]slave`. Only valid for the `bind` type.
-* no_copy (bool): False if the volume should be populated with the data from
- the target. Default: `False`. Only valid for the `volume` type.
-* labels (dict): User-defined name and labels for the volume. Only valid for
- the `volume` type.
-* driver_config (dict): Volume driver configuration.
- See the [DriverConfig class](#DriverConfig) for details. Only valid for the
- `volume` type.
-
-#### Resources
-
-A `Resources` object configures resource allocation for containers when
-made part of a `ContainerSpec`.
-
-**Params:**
-
-* cpu_limit (int): CPU limit in units of 10^9 CPU shares.
-* mem_limit (int): Memory limit in Bytes.
-* cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
-* mem_reservation (int): Memory reservation in Bytes.
-
-#### RestartPolicy
-
-A `RestartPolicy` object is used when creating a `ContainerSpec`. It dictates
-whether a container should restart after stopping or failing.
-
-* condition (string): Condition for restart (`none`, `on-failure`, or `any`).
- Default: `none`.
-* delay (int): Delay between restart attempts. Default: 0
-* attempts (int): Maximum attempts to restart a given container before giving
- up. Default value is 0, which is ignored.
-* window (int): Time window used to evaluate the restart policy. Default value
- is 0, which is unbounded.
-
-
-#### TaskTemplate
-
-A `TaskTemplate` object can be used to describe the task specification to be
-used when creating or updating a service.
-
-**Params:**
-
-* container_spec (dict): Container settings for containers started as part of
- this task. See the [ContainerSpec class](#ContainerSpec) for details.
-* log_driver (dict): Log configuration for containers created as part of the
- service. See the [DriverConfig class](#DriverConfig) for details.
-* resources (dict): Resource requirements which apply to each individual
- container created as part of the service. See the
- [Resources class](#Resources) for details.
-* restart_policy (dict): Specification for the restart policy which applies
- to containers created as part of this service. See the
- [RestartPolicy class](#RestartPolicy) for details.
-* placement (list): A list of constraints.
-
-
-#### UpdateConfig
-
-An `UpdateConfig` object can be used to specify the way container updates
-should be performed by a service.
-
-**Params:**
-
-* parallelism (int): Maximum number of tasks to be updated in one iteration
- (0 means unlimited parallelism). Default: 0.
-* delay (int): Amount of time between updates.
-* failure_action (string): Action to take if an updated task fails to run, or
- stops running during the update. Acceptable values are `continue` and
- `pause`. Default: `continue`
diff --git a/docs/services.rst b/docs/services.rst
new file mode 100644
index 0000000..d8e5285
--- /dev/null
+++ b/docs/services.rst
@@ -0,0 +1,36 @@
+Services
+========
+
+.. py:module:: docker.models.services
+
+Manage services on a swarm. For more information about services, `see the Engine documentation <https://docs.docker.com/engine/swarm/services/>`_.
+
+Before you can use any of these methods, you first need to :doc:`join or initialize a swarm <swarm>`.
+
+Methods available on ``client.services``:
+
+.. rst-class:: hide-signature
+.. py:class:: ServiceCollection
+
+ .. automethod:: create
+ .. automethod:: get
+ .. automethod:: list
+
+Service objects
+---------------
+
+.. autoclass:: Service()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: name
+ .. autoattribute:: version
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+
+ .. automethod:: reload
+ .. automethod:: remove
+ .. automethod:: tasks
+ .. automethod:: update
diff --git a/docs/swarm.md b/docs/swarm.md
deleted file mode 100644
index 20c3945..0000000
--- a/docs/swarm.md
+++ /dev/null
@@ -1,274 +0,0 @@
-# Swarm management
-
-Starting with Engine version 1.12 (API 1.24), it is possible to manage the
-engine's associated Swarm cluster using the API.
-
-## Initializing a new Swarm
-
-You can initialize a new Swarm by calling `Client.init_swarm`. An advertising
-address needs to be provided, usually simply by indicating which network
-interface needs to be used. Advanced options are provided using the
-`swarm_spec` parameter, which can easily be created using
-`Client.create_swarm_spec`.
-
-```python
-spec = client.create_swarm_spec(
- snapshot_interval=5000, log_entries_for_slow_followers=1200
-)
-client.init_swarm(
- advertise_addr='eth0', listen_addr='0.0.0.0:5000', force_new_cluster=False,
- swarm_spec=spec
-)
-```
-
-## Joining an existing Swarm
-
-If you're looking to have the engine your client is connected to join an
-existing Swarm, this can be accomplished by using the `Client.join_swarm`
-method. You will need to provide a list of at least one remote address
-corresponding to other machines already part of the swarm as well as the
-`join_token`. In most cases, a `listen_addr` and `advertise_addr` for your
-node are also required.
-
-```python
-client.join_swarm(
- remote_addrs=['192.168.14.221:2377'], join_token='SWMTKN-1-redacted',
- listen_addr='0.0.0.0:5000', advertise_addr='eth0:5000'
-)
-```
-
-## Leaving the Swarm
-
-To leave the swarm you are currently a member of, simply use
-`Client.leave_swarm`. Note that if your engine is the Swarm's manager,
-you will need to specify `force=True` to be able to leave.
-
-```python
-client.leave_swarm(force=False)
-```
-
-## Retrieving Swarm status
-
-You can retrieve information about your current Swarm status by calling
-`Client.inspect_swarm`. This method takes no arguments.
-
-```python
-client.inspect_swarm()
-```
-
-## Listing Swarm nodes
-
-List all nodes that are part of the current Swarm using `Client.nodes`.
-The `filters` argument allows to filter the results.
-
-```python
-client.nodes(filters={'role': 'manager'})
-```
-
-## Swarm API documentation
-
-### Client.init_swarm
-
-Initialize a new Swarm using the current connected engine as the first node.
-
-**Params:**
-
-* advertise_addr (string): Externally reachable address advertised to other
- nodes. This can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number, like
- `eth0:4567`. If the port number is omitted, the port number from the listen
- address is used. If `advertise_addr` is not specified, it will be
- automatically detected when possible. Default: None
-* listen_addr (string): Listen address used for inter-manager communication,
- as well as determining the networking interface used for the VXLAN Tunnel
- Endpoint (VTEP). This can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number, like
- `eth0:4567`. If the port number is omitted, the default swarm listening port
- is used. Default: '0.0.0.0:2377'
-* force_new_cluster (bool): Force creating a new Swarm, even if already part of
- one. Default: False
-* swarm_spec (dict): Configuration settings of the new Swarm. Use
- `Client.create_swarm_spec` to generate a valid configuration. Default: None
-
-**Returns:** `True` if the request went through. Raises an `APIError` if it
- fails.
-
-#### Client.create_swarm_spec
-
-Create a `docker.types.SwarmSpec` instance that can be used as the `swarm_spec`
-argument in `Client.init_swarm`.
-
-**Params:**
-
-* task_history_retention_limit (int): Maximum number of tasks history stored.
-* snapshot_interval (int): Number of logs entries between snapshot.
-* keep_old_snapshots (int): Number of snapshots to keep beyond the current
- snapshot.
-* log_entries_for_slow_followers (int): Number of log entries to keep around
- to sync up slow followers after a snapshot is created.
-* heartbeat_tick (int): Amount of ticks (in seconds) between each heartbeat.
-* election_tick (int): Amount of ticks (in seconds) needed without a leader to
- trigger a new election.
-* dispatcher_heartbeat_period (int): The delay for an agent to send a
- heartbeat to the dispatcher.
-* node_cert_expiry (int): Automatic expiry for nodes certificates.
-* external_ca (dict): Configuration for forwarding signing requests to an
- external certificate authority. Use `docker.types.SwarmExternalCA`.
-* name (string): Swarm's name
-
-**Returns:** `docker.types.SwarmSpec` instance.
-
-#### docker.types.SwarmExternalCA
-
-Create a configuration dictionary for the `external_ca` argument in a
-`SwarmSpec`.
-
-**Params:**
-
-* protocol (string): Protocol for communication with the external CA (currently
- only “cfssl” is supported).
-* url (string): URL where certificate signing requests should be sent.
-* options (dict): An object with key/value pairs that are interpreted as
- protocol-specific options for the external CA driver.
-
-### Client.inspect_node
-
-Retrieve low-level information about a Swarm node
-
-**Params:**
-
-* node_id (string): ID of the node to be inspected.
-
-**Returns:** A dictionary containing data about this node. See sample below.
-
-```python
-{u'CreatedAt': u'2016-08-11T23:28:39.695834296Z',
- u'Description': {u'Engine': {u'EngineVersion': u'1.12.0',
- u'Plugins': [{u'Name': u'bridge', u'Type': u'Network'},
- {u'Name': u'host', u'Type': u'Network'},
- {u'Name': u'null', u'Type': u'Network'},
- {u'Name': u'overlay', u'Type': u'Network'},
- {u'Name': u'local', u'Type': u'Volume'}]},
- u'Hostname': u'dockerserv-1.local.net',
- u'Platform': {u'Architecture': u'x86_64', u'OS': u'linux'},
- u'Resources': {u'MemoryBytes': 8052109312, u'NanoCPUs': 4000000000}},
- u'ID': u'1kqami616p23dz4hd7km35w63',
- u'ManagerStatus': {u'Addr': u'10.0.131.127:2377',
- u'Leader': True,
- u'Reachability': u'reachable'},
- u'Spec': {u'Availability': u'active', u'Role': u'manager'},
- u'Status': {u'State': u'ready'},
- u'UpdatedAt': u'2016-08-11T23:28:39.979829529Z',
- u'Version': {u'Index': 9}}
- ```
-
-### Client.inspect_swarm
-
-Retrieve information about the current Swarm.
-
-**Returns:** A dictionary containing information about the Swarm. See sample
- below.
-
-```python
-{u'CreatedAt': u'2016-08-04T21:26:18.779800579Z',
- u'ID': u'8hk6e9wh4iq214qtbgvbp84a9',
- u'JoinTokens': {u'Manager': u'SWMTKN-1-redacted-1',
- u'Worker': u'SWMTKN-1-redacted-2'},
- u'Spec': {u'CAConfig': {u'NodeCertExpiry': 7776000000000000},
- u'Dispatcher': {u'HeartbeatPeriod': 5000000000},
- u'Name': u'default',
- u'Orchestration': {u'TaskHistoryRetentionLimit': 10},
- u'Raft': {u'ElectionTick': 3,
- u'HeartbeatTick': 1,
- u'LogEntriesForSlowFollowers': 500,
- u'SnapshotInterval': 10000},
- u'TaskDefaults': {}},
- u'UpdatedAt': u'2016-08-04T21:26:19.391623265Z',
- u'Version': {u'Index': 11}}
-```
-
-### Client.join_swarm
-
-Join an existing Swarm.
-
-**Params:**
-
-* remote_addrs (list): Addresses of one or more manager nodes already
- participating in the Swarm to join.
-* join_token (string): Secret token for joining this Swarm.
-* listen_addr (string): Listen address used for inter-manager communication
- if the node gets promoted to manager, as well as determining the networking
- interface used for the VXLAN Tunnel Endpoint (VTEP). Default: `None`
-* advertise_addr (string): Externally reachable address advertised to other
- nodes. This can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number, like
- `eth0:4567`. If the port number is omitted, the port number from the listen
- address is used. If AdvertiseAddr is not specified, it will be automatically
- detected when possible. Default: `None`
-
-**Returns:** `True` if the request went through. Raises an `APIError` if it
- fails.
-
-### Client.leave_swarm
-
-Leave a Swarm.
-
-**Params:**
-
-* force (bool): Leave the Swarm even if this node is a manager.
- Default: `False`
-
-**Returns:** `True` if the request went through. Raises an `APIError` if it
- fails.
-
-### Client.nodes
-
-List Swarm nodes
-
-**Params:**
-
-* filters (dict): Filters to process on the nodes list. Valid filters:
- `id`, `name`, `membership` and `role`. Default: `None`
-
-**Returns:** A list of dictionaries containing data about each swarm node.
-
-### Client.update_node
-
-Update the Node's configuration
-
-**Params:**
-
-* version (int): The version number of the node object being updated. This
- is required to avoid conflicting writes.
-* node_spec (dict): Configuration settings to update. Any values not provided
- will be removed. See the official [Docker API documentation](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/update-a-node) for more details.
- Default: `None`.
-
-**Returns:** `True` if the request went through. Raises an `APIError` if it
- fails.
-
-```python
-node_spec = {'Availability': 'active',
- 'Name': 'node-name',
- 'Role': 'manager',
- 'Labels': {'foo': 'bar'}
- }
-client.update_node(node_id='24ifsmvkjbyhk', version=8, node_spec=node_spec)
-```
-
-### Client.update_swarm
-
-Update the Swarm's configuration
-
-**Params:**
-
-* version (int): The version number of the swarm object being updated. This
- is required to avoid conflicting writes.
-* swarm_spec (dict): Configuration settings to update. Use
- `Client.create_swarm_spec` to generate a valid configuration.
- Default: `None`.
-* rotate_worker_token (bool): Rotate the worker join token. Default: `False`.
-* rotate_manager_token (bool): Rotate the manager join token. Default: `False`.
-
-**Returns:** `True` if the request went through. Raises an `APIError` if it
- fails.
diff --git a/docs/swarm.rst b/docs/swarm.rst
new file mode 100644
index 0000000..0c21bae
--- /dev/null
+++ b/docs/swarm.rst
@@ -0,0 +1,24 @@
+Swarm
+=====
+
+.. py:module:: docker.models.swarm
+
+Manage `Docker Engine's swarm mode <https://docs.docker.com/engine/swarm/>`_.
+
+To use any swarm methods, you first need to make the Engine part of a swarm. This can be done by either initializing a new swarm with :py:meth:`~Swarm.init`, or joining an existing swarm with :py:meth:`~Swarm.join`.
+
+These methods are available on ``client.swarm``:
+
+.. rst-class:: hide-signature
+.. py:class:: Swarm
+
+ .. automethod:: init()
+ .. automethod:: join()
+ .. automethod:: leave()
+ .. automethod:: update()
+ .. automethod:: reload()
+
+ .. autoattribute:: version
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
diff --git a/docs/tls.md b/docs/tls.md
deleted file mode 100644
index 147e674..0000000
--- a/docs/tls.md
+++ /dev/null
@@ -1,86 +0,0 @@
-## Connection to daemon using HTTPS
-
-**Note:** *These instructions are docker-py specific. Please refer to
-[http://docs.docker.com/articles/https/](http://docs.docker.com/articles/https/)
-first.*
-
-## TLSConfig
-
-**Params**:
-
-* client_cert (tuple of str): Path to client cert, path to client key
-* ca_cert (str): Path to CA cert file
-* verify (bool or str): This can be `False` or a path to a CA Cert file
-* ssl_version (int): A valid [SSL version](
-https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1)
-* assert_hostname (bool): Verify hostname of docker daemon
-
-### configure_client
-
-**Params**:
-
-* client: ([Client](api.md#client-api)): A client to apply this config to
-
-
-## Authenticate server based on public/default CA pool
-
-```python
-client = docker.Client(base_url='<https_url>', tls=True)
-```
-
-Equivalent CLI options:
-```bash
-docker --tls ...
-```
-
-If you want to use TLS but don't want to verify the server certificate
-(for example when testing with a self-signed certificate):
-
-```python
-tls_config = docker.tls.TLSConfig(verify=False)
-client = docker.Client(base_url='<https_url>', tls=tls_config)
-```
-
-## Authenticate server based on given CA
-
-```python
-tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
-client = docker.Client(base_url='<https_url>', tls=tls_config)
-```
-
-Equivalent CLI options:
-```bash
-docker --tlsverify --tlscacert /path/to/ca.pem ...
-```
-
-## Authenticate with client certificate, do not authenticate server based on given CA
-
-```python
-tls_config = docker.tls.TLSConfig(
- client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem')
-)
-client = docker.Client(base_url='<https_url>', tls=tls_config)
-```
-
-Equivalent CLI options:
-```bash
-docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...
-```
-
-## Authenticate with client certificate, authenticate server based on given CA
-
-```python
-tls_config = docker.tls.TLSConfig(
- client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem'),
- verify='/path/to/ca.pem'
-)
-client = docker.Client(base_url='<https_url>', tls=tls_config)
-```
-
-Equivalent CLI options:
-```bash
-docker --tlsverify \
- --tlscert /path/to/client-cert.pem \
- --tlskey /path/to/client-key.pem \
- --tlscacert /path/to/ca.pem ...
-```
diff --git a/docs/tls.rst b/docs/tls.rst
new file mode 100644
index 0000000..2e2f1ea
--- /dev/null
+++ b/docs/tls.rst
@@ -0,0 +1,37 @@
+Using TLS
+=========
+
+.. py:module:: docker.tls
+
+Both the main :py:class:`~docker.client.DockerClient` and low-level
+:py:class:`~docker.api.client.APIClient` can connect to the Docker daemon with TLS.
+
+This is all configured automatically for you if you're using :py:func:`~docker.client.from_env`, but if you need some extra control it is possible to configure it manually by using a :py:class:`TLSConfig` object.
+
+Examples
+--------
+
+For example, to check the server against a specific CA certificate:
+
+.. code-block:: python
+
+ tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
+ client = docker.DockerClient(base_url='<https_url>', tls=tls_config)
+
+This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``.
+
+To authenticate with client certs:
+
+.. code-block:: python
+
+ tls_config = docker.tls.TLSConfig(
+ client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem')
+ )
+ client = docker.DockerClient(base_url='<https_url>', tls=tls_config)
+
+This is the equivalent of ``docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...``.
+
+Reference
+---------
+
+.. autoclass:: TLSConfig()
diff --git a/docs/tmpfs.md b/docs/tmpfs.md
deleted file mode 100644
index d8be9b6..0000000
--- a/docs/tmpfs.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Using tmpfs
-
-When creating a container, you can specify paths to be mounted with tmpfs using
-the `tmpfs` argument to `create_host_config`, similarly to the `--tmpfs`
-argument to `docker run`.
-
-This capability is supported in Docker Engine 1.10 and up.
-
-`tmpfs` can be either a list or a dictionary. If it's a list, each item is a
-string specifying the path and (optionally) any configuration for the mount:
-
-```python
-client.create_container(
- 'busybox', 'ls',
- host_config=client.create_host_config(tmpfs=[
- '/mnt/vol2',
- '/mnt/vol1:size=3G,uid=1000'
- ])
-)
-```
-
-Alternatively, if it's a dictionary, each key is a path and each value contains
-the mount options:
-
-```python
-client.create_container(
- 'busybox', 'ls',
- host_config=client.create_host_config(tmpfs={
- '/mnt/vol2': '',
- '/mnt/vol1': 'size=3G,uid=1000'
- })
-)
-```
diff --git a/docs/user_guides/swarm_services.md b/docs/user_guides/swarm_services.md
new file mode 100644
index 0000000..9bd4dca
--- /dev/null
+++ b/docs/user_guides/swarm_services.md
@@ -0,0 +1,65 @@
+# Swarm services
+
+Starting with Engine version 1.12 (API 1.24), it is possible to manage services
+using the Docker Engine API. Note that the engine needs to be part of a
+[Swarm cluster](../swarm.rst) before you can use the service-related methods.
+
+## Creating a service
+
+The `APIClient.create_service` method lets you create a new service inside the
+cluster. The method takes several arguments, `task_template` being mandatory.
+This dictionary of values is most easily produced by instantiating a
+`TaskTemplate` object.
+
+```python
+container_spec = docker.types.ContainerSpec(
+ image='busybox', command=['echo', 'hello']
+)
+task_tmpl = docker.types.TaskTemplate(container_spec)
+service_id = client.create_service(task_tmpl, name=name)
+```
+
+## Listing services
+
+List all existing services using the `APIClient.services` method.
+
+```python
+client.services(filters={'name': 'mysql'})
+```
+
+## Retrieving service configuration
+
+To retrieve detailed information and configuration for a specific service, you
+may use the `APIClient.inspect_service` method using the service's ID or name.
+
+```python
+client.inspect_service(service='my_service_name')
+```
+
+## Updating service configuration
+
+The `APIClient.update_service` method lets you update a service's configuration.
+The mandatory `version` argument (used to prevent concurrent writes) can be
+retrieved using `APIClient.inspect_service`.
+
+```python
+container_spec = docker.types.ContainerSpec(
+ image='busybox', command=['echo', 'hello world']
+)
+task_tmpl = docker.types.TaskTemplate(container_spec)
+
+svc_version = client.inspect_service(svc_id)['Version']['Index']
+
+client.update_service(
+ svc_id, svc_version, name='new_name', task_template=task_tmpl
+)
+```
+
+## Removing a service
+
+A service may be removed simply using the `APIClient.remove_service` method.
+Either the service name or service ID can be used as argument.
+
+```python
+client.remove_service('my_service_name')
+``` \ No newline at end of file
diff --git a/docs/volumes.md b/docs/volumes.md
deleted file mode 100644
index 04273d8..0000000
--- a/docs/volumes.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Using volumes
-
-Volume declaration is done in two parts. Provide a list of mountpoints to
-the `Client().create_container()` method, and declare mappings in the
-`host_config` section.
-
-```python
-container_id = cli.create_container(
- 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds={
- '/home/user1/': {
- 'bind': '/mnt/vol2',
- 'mode': 'rw',
- },
- '/var/www': {
- 'bind': '/mnt/vol1',
- 'mode': 'ro',
- }
- })
-)
-```
-
-You can alternatively specify binds as a list. This code is equivalent to the
-example above:
-
-```python
-container_id = cli.create_container(
- 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
- host_config=cli.create_host_config(binds=[
- '/home/user1/:/mnt/vol2',
- '/var/www:/mnt/vol1:ro',
- ])
-)
-```
diff --git a/docs/volumes.rst b/docs/volumes.rst
new file mode 100644
index 0000000..8c0574b
--- /dev/null
+++ b/docs/volumes.rst
@@ -0,0 +1,31 @@
+Volumes
+=======
+
+.. py:module:: docker.models.volumes
+
+Manage volumes on the server.
+
+Methods available on ``client.volumes``:
+
+.. rst-class:: hide-signature
+.. py:class:: VolumeCollection
+
+ .. automethod:: create
+ .. automethod:: get
+ .. automethod:: list
+
+Volume objects
+--------------
+
+.. autoclass:: Volume()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: name
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+
+ .. automethod:: reload
+ .. automethod:: remove
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100644
index 6cfaa54..0000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-site_name: docker-py Documentation
-site_description: An API client for Docker written in Python
-site_favicon: favicon_whale.png
-site_url: https://docker-py.readthedocs.io
-repo_url: https://github.com/docker/docker-py/
-theme: readthedocs
-pages:
-- Home: index.md
-- Client API: api.md
-- Port Bindings: port-bindings.md
-- Using Volumes: volumes.md
-- Using TLS: tls.md
-- Host devices: host-devices.md
-- Host configuration: hostconfig.md
-- Network configuration: networks.md
-- Swarm management: swarm.md
-- Swarm services: services.md
-- Using tmpfs: tmpfs.md
-- Using with Docker Machine: machine.md
-- Change Log: change_log.md
-- Contributing: contributing.md
diff --git a/setup.py b/setup.py
index 96bce6a..b82a74f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,16 @@
#!/usr/bin/env python
+import codecs
import os
import sys
-from setuptools import setup
+from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2, != 2.11.0',
+ 'requests >= 2.5.2, != 2.11.0, != 2.12.2',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
@@ -35,7 +36,7 @@ with open('./test-requirements.txt') as test_reqs_txt:
long_description = ''
try:
- with open('./README.rst') as readme_rst:
+ with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
@@ -43,29 +44,24 @@ except IOError:
pass
setup(
- name="docker-py",
+ name="docker",
version=version,
- description="Python client for Docker.",
+ description="A Python library for the Docker Engine API.",
long_description=long_description,
- url='https://github.com/docker/docker-py/',
- packages=[
- 'docker', 'docker.api', 'docker.auth', 'docker.transport',
- 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
- 'docker.types',
- ],
+ url='https://github.com/docker/docker-py',
+ packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
diff --git a/tests/base.py b/tests/base.py
deleted file mode 100644
index cac65fd..0000000
--- a/tests/base.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import sys
-import unittest
-
-import six
-
-
-class BaseTestCase(unittest.TestCase):
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(BaseTestCase, self).assertIn(object, collection)
-
-
-class Cleanup(object):
- if sys.version_info < (2, 7):
- # Provide a basic implementation of addCleanup for Python < 2.7
- def __init__(self, *args, **kwargs):
- super(Cleanup, self).__init__(*args, **kwargs)
- self._cleanups = []
-
- def tearDown(self):
- super(Cleanup, self).tearDown()
- ok = True
- while self._cleanups:
- fn, args, kwargs = self._cleanups.pop(-1)
- try:
- fn(*args, **kwargs)
- except KeyboardInterrupt:
- raise
- except:
- ok = False
- if not ok:
- raise
-
- def addCleanup(self, function, *args, **kwargs):
- self._cleanups.append((function, args, kwargs))
diff --git a/tests/helpers.py b/tests/helpers.py
index 529b727..1d24577 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,7 +1,9 @@
import os
import os.path
+import random
import tarfile
import tempfile
+import time
import docker
import pytest
@@ -47,3 +49,28 @@ def requires_api_version(version):
),
reason="API version is too low (< {0})".format(version)
)
+
+
+def wait_on_condition(condition, delay=0.1, timeout=40):
+ start_time = time.time()
+ while not condition():
+ if time.time() - start_time > timeout:
+ raise AssertionError("Timeout: %s" % condition)
+ time.sleep(delay)
+
+
+def random_name():
+ return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
+
+
+def force_leave_swarm(client):
+ """Actually force leave a Swarm. There seems to be a bug in Swarm that
+ occasionally throws "context deadline exceeded" errors when leaving."""
+ while True:
+ try:
+ return client.swarm.leave(force=True)
+ except docker.errors.APIError as e:
+ if e.explanation == "context deadline exceeded":
+ continue
+ else:
+ raise
diff --git a/tests/integration/build_test.py b/tests/integration/api_build_test.py
index 2695b92..3dac0e9 100644
--- a/tests/integration/build_test.py
+++ b/tests/integration/api_build_test.py
@@ -8,14 +8,13 @@ import six
from docker import errors
from ..helpers import requires_api_version
-from .base import BaseIntegrationTest
+from .base import BaseAPIIntegrationTest
-class BuildTest(BaseIntegrationTest):
+class BuildTest(BaseAPIIntegrationTest):
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -32,7 +31,6 @@ class BuildTest(BaseIntegrationTest):
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -54,7 +52,6 @@ class BuildTest(BaseIntegrationTest):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
- 'MAINTAINER docker-py',
'ADD . /test',
]))
@@ -182,7 +179,6 @@ class BuildTest(BaseIntegrationTest):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
- 'MAINTAINER docker-py',
'ADD . /test',
]))
diff --git a/tests/integration/api_test.py b/tests/integration/api_client_test.py
index f20d30b..dab8ddf 100644
--- a/tests/integration/api_test.py
+++ b/tests/integration/api_client_test.py
@@ -8,10 +8,10 @@ import warnings
import docker
from docker.utils import kwargs_from_env
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class InformationTest(BaseIntegrationTest):
+class InformationTest(BaseAPIIntegrationTest):
def test_version(self):
res = self.client.version()
self.assertIn('GoVersion', res)
@@ -25,7 +25,7 @@ class InformationTest(BaseIntegrationTest):
self.assertIn('Debug', res)
def test_search(self):
- client = docker.from_env(timeout=10)
+ client = docker.APIClient(timeout=10, **kwargs_from_env())
res = client.search('busybox')
self.assertTrue(len(res) >= 1)
base_img = [x for x in res if x['name'] == 'busybox']
@@ -33,7 +33,7 @@ class InformationTest(BaseIntegrationTest):
self.assertIn('description', base_img[0])
-class LinkTest(BaseIntegrationTest):
+class LinkTest(BaseAPIIntegrationTest):
def test_remove_link(self):
# Create containers
container1 = self.client.create_container(
@@ -75,7 +75,7 @@ class LinkTest(BaseIntegrationTest):
self.assertEqual(len(retrieved), 2)
-class LoadConfigTest(BaseIntegrationTest):
+class LoadConfigTest(BaseAPIIntegrationTest):
def test_load_legacy_config(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
@@ -114,7 +114,7 @@ class LoadConfigTest(BaseIntegrationTest):
class AutoDetectVersionTest(unittest.TestCase):
def test_client_init(self):
- client = docker.from_env(version='auto')
+ client = docker.APIClient(version='auto', **kwargs_from_env())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
@@ -122,22 +122,11 @@ class AutoDetectVersionTest(unittest.TestCase):
self.assertEqual(client_version, api_version_2)
client.close()
- def test_auto_client(self):
- client = docker.AutoVersionClient(**kwargs_from_env())
- client_version = client._version
- api_version = client.version(api_version=False)['ApiVersion']
- self.assertEqual(client_version, api_version)
- api_version_2 = client.version()['ApiVersion']
- self.assertEqual(client_version, api_version_2)
- client.close()
- with self.assertRaises(docker.errors.DockerException):
- docker.AutoVersionClient(version='1.11', **kwargs_from_env())
-
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
- self.client = docker.client.Client(base_url='http://192.168.10.2:4243',
+ self.client = docker.api.APIClient(base_url='http://192.168.10.2:4243',
timeout=self.timeout)
def test_timeout(self):
@@ -166,7 +155,7 @@ class UnixconnTest(unittest.TestCase):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- client = docker.from_env()
+ client = docker.APIClient(**kwargs_from_env())
client.images()
client.close()
del client
diff --git a/tests/integration/container_test.py b/tests/integration/api_container_test.py
index 838ec36..bebadb7 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/api_container_test.py
@@ -11,10 +11,10 @@ import six
from ..helpers import requires_api_version
from .. import helpers
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class ListContainersTest(BaseIntegrationTest):
+class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
@@ -34,7 +34,7 @@ class ListContainersTest(BaseIntegrationTest):
self.assertIn('Status', retrieved)
-class CreateContainerTest(BaseIntegrationTest):
+class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
res = self.client.create_container(BUSYBOX, 'true')
@@ -255,7 +255,7 @@ class CreateContainerTest(BaseIntegrationTest):
self.assertIn('1001', groups)
def test_valid_log_driver_and_log_opt(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='json-file',
config={'max-file': '100'}
)
@@ -274,7 +274,7 @@ class CreateContainerTest(BaseIntegrationTest):
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type='asdf-nope',
config={}
)
@@ -292,7 +292,7 @@ class CreateContainerTest(BaseIntegrationTest):
assert excinfo.value.explanation == expected_msg
def test_valid_no_log_driver_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="",
config={'max-file': '100'}
)
@@ -311,7 +311,7 @@ class CreateContainerTest(BaseIntegrationTest):
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
- log_config = docker.utils.LogConfig(
+ log_config = docker.types.LogConfig(
type="json-file",
config=None
)
@@ -361,13 +361,6 @@ class CreateContainerTest(BaseIntegrationTest):
host_config = inspect['HostConfig']
self.assertIn('MemorySwappiness', host_config)
- def test_create_host_config_exception_raising(self):
- self.assertRaises(TypeError,
- self.client.create_host_config, mem_swappiness='40')
-
- self.assertRaises(ValueError,
- self.client.create_host_config, pid_mode='40')
-
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
BUSYBOX,
@@ -397,8 +390,19 @@ class CreateContainerTest(BaseIntegrationTest):
config = self.client.inspect_container(container)
assert config['HostConfig']['Tmpfs'] == tmpfs
+ @requires_api_version('1.24')
+ def test_create_with_isolation(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo'], host_config=self.client.create_host_config(
+ isolation='default'
+ )
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['Isolation'] == 'default'
+
-class VolumeBindTest(BaseIntegrationTest):
+class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
super(VolumeBindTest, self).setUp()
@@ -493,7 +497,7 @@ class VolumeBindTest(BaseIntegrationTest):
@requires_api_version('1.20')
-class ArchiveTest(BaseIntegrationTest):
+class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
@@ -573,7 +577,7 @@ class ArchiveTest(BaseIntegrationTest):
self.assertIn('bar/', results)
-class RenameContainerTest(BaseIntegrationTest):
+class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
@@ -589,7 +593,7 @@ class RenameContainerTest(BaseIntegrationTest):
self.assertEqual('/{0}'.format(name), inspect['Name'])
-class StartContainerTest(BaseIntegrationTest):
+class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
@@ -643,7 +647,7 @@ class StartContainerTest(BaseIntegrationTest):
self.assertEqual(exitcode, 0, msg=cmd)
-class WaitTest(BaseIntegrationTest):
+class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
@@ -671,7 +675,7 @@ class WaitTest(BaseIntegrationTest):
self.assertEqual(inspect['State']['ExitCode'], exitcode)
-class LogsTest(BaseIntegrationTest):
+class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
@@ -743,7 +747,7 @@ Line2'''
self.assertEqual(logs, ''.encode(encoding='ascii'))
-class DiffTest(BaseIntegrationTest):
+class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -771,7 +775,7 @@ class DiffTest(BaseIntegrationTest):
self.assertEqual(test_diff[0]['Kind'], 1)
-class StopTest(BaseIntegrationTest):
+class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -798,7 +802,7 @@ class StopTest(BaseIntegrationTest):
self.assertEqual(state['Running'], False)
-class KillTest(BaseIntegrationTest):
+class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -875,7 +879,7 @@ class KillTest(BaseIntegrationTest):
self.assertEqual(state['Running'], False, state)
-class PortTest(BaseIntegrationTest):
+class PortTest(BaseAPIIntegrationTest):
def test_port(self):
port_bindings = {
@@ -906,7 +910,7 @@ class PortTest(BaseIntegrationTest):
self.client.kill(id)
-class ContainerTopTest(BaseIntegrationTest):
+class ContainerTopTest(BaseAPIIntegrationTest):
def test_top(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60']
@@ -946,7 +950,7 @@ class ContainerTopTest(BaseIntegrationTest):
self.assertEqual(res['Processes'][0][10], 'sleep 60')
-class RestartContainerTest(BaseIntegrationTest):
+class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -987,7 +991,7 @@ class RestartContainerTest(BaseIntegrationTest):
self.client.kill(id)
-class RemoveContainerTest(BaseIntegrationTest):
+class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
@@ -1009,7 +1013,7 @@ class RemoveContainerTest(BaseIntegrationTest):
self.assertEqual(len(res), 0)
-class AttachContainerTest(BaseIntegrationTest):
+class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
@@ -1040,7 +1044,7 @@ class AttachContainerTest(BaseIntegrationTest):
self.assertEqual(data.decode('utf-8'), line)
-class PauseTest(BaseIntegrationTest):
+class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
@@ -1069,7 +1073,7 @@ class PauseTest(BaseIntegrationTest):
self.assertEqual(state['Paused'], False)
-class GetContainerStatsTest(BaseIntegrationTest):
+class GetContainerStatsTest(BaseAPIIntegrationTest):
@requires_api_version('1.19')
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
@@ -1100,7 +1104,7 @@ class GetContainerStatsTest(BaseIntegrationTest):
self.assertIn(key, chunk)
-class ContainerUpdateTest(BaseIntegrationTest):
+class ContainerUpdateTest(BaseAPIIntegrationTest):
@requires_api_version('1.22')
def test_update_container(self):
old_mem_limit = 400 * 1024 * 1024
@@ -1147,7 +1151,7 @@ class ContainerUpdateTest(BaseIntegrationTest):
)
-class ContainerCPUTest(BaseIntegrationTest):
+class ContainerCPUTest(BaseAPIIntegrationTest):
@requires_api_version('1.18')
def test_container_cpu_shares(self):
cpu_shares = 512
diff --git a/tests/integration/exec_test.py b/tests/integration/api_exec_test.py
index f2a8b1f..0ceeefa 100644
--- a/tests/integration/exec_test.py
+++ b/tests/integration/api_exec_test.py
@@ -1,10 +1,10 @@
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class ExecTest(BaseIntegrationTest):
+class ExecTest(BaseAPIIntegrationTest):
def test_execute_command(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py
new file mode 100644
index 0000000..afe1dea
--- /dev/null
+++ b/tests/integration/api_healthcheck_test.py
@@ -0,0 +1,50 @@
+from .base import BaseAPIIntegrationTest, BUSYBOX
+from .. import helpers
+
+SECOND = 1000000000
+
+
+def wait_on_health_status(client, container, status):
+ def condition():
+ res = client.inspect_container(container)
+ return res['State']['Health']['Status'] == status
+ return helpers.wait_on_condition(condition)
+
+
+class HealthcheckTest(BaseAPIIntegrationTest):
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_shell_command(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
+ self.tmp_containers.append(container)
+
+ res = self.client.inspect_container(container)
+ assert res['Config']['Healthcheck']['Test'] == \
+ ['CMD-SHELL', 'echo "hello world"']
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_passes(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="true",
+ interval=1*SECOND,
+ timeout=1*SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "healthy")
+
+ @helpers.requires_api_version('1.24')
+ def test_healthcheck_fails(self):
+ container = self.client.create_container(
+ BUSYBOX, 'top', healthcheck=dict(
+ test="false",
+ interval=1*SECOND,
+ timeout=1*SECOND,
+ retries=1,
+ ))
+ self.tmp_containers.append(container)
+ self.client.start(container)
+ wait_on_health_status(self.client, container, "unhealthy")
diff --git a/tests/integration/image_test.py b/tests/integration/api_image_test.py
index 31d2218..135f115 100644
--- a/tests/integration/image_test.py
+++ b/tests/integration/api_image_test.py
@@ -14,10 +14,10 @@ from six.moves import socketserver
import docker
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class ListImagesTest(BaseIntegrationTest):
+class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
@@ -35,7 +35,7 @@ class ListImagesTest(BaseIntegrationTest):
self.assertEqual(type(res1[0]), six.text_type)
-class PullImageTest(BaseIntegrationTest):
+class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
@@ -66,7 +66,7 @@ class PullImageTest(BaseIntegrationTest):
self.assertIn('Id', img_info)
-class CommitTest(BaseIntegrationTest):
+class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -101,7 +101,7 @@ class CommitTest(BaseIntegrationTest):
assert img['Config']['Cmd'] == ['bash']
-class RemoveImageTest(BaseIntegrationTest):
+class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
@@ -117,7 +117,7 @@ class RemoveImageTest(BaseIntegrationTest):
self.assertEqual(len(res), 0)
-class ImportImageTest(BaseIntegrationTest):
+class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
diff --git a/tests/integration/network_test.py b/tests/integration/api_network_test.py
index 2ff5f02..b1ac52c 100644
--- a/tests/integration/network_test.py
+++ b/tests/integration/api_network_test.py
@@ -1,17 +1,14 @@
-import random
-
import docker
-from docker.utils import create_ipam_config
-from docker.utils import create_ipam_pool
+from docker.types import IPAMConfig, IPAMPool
import pytest
-from ..helpers import requires_api_version
-from .base import BaseIntegrationTest
+from ..helpers import random_name, requires_api_version
+from .base import BaseAPIIntegrationTest
-class TestNetworks(BaseIntegrationTest):
+class TestNetworks(BaseAPIIntegrationTest):
def create_network(self, *args, **kwargs):
- net_name = u'dockerpy{}'.format(random.getrandbits(24))[:14]
+ net_name = random_name()
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
@@ -47,10 +44,10 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.21')
def test_create_network_with_ipam_config(self):
_, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16",
iprange="172.28.5.0/24",
gateway="172.28.5.254",
@@ -84,10 +81,8 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.21')
def test_create_network_with_host_driver_fails(self):
- net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14]
-
with pytest.raises(docker.errors.APIError):
- self.client.create_network(net_name, driver='host')
+ self.client.create_network(random_name(), driver='host')
@requires_api_version('1.21')
def test_remove_network(self):
@@ -221,9 +216,9 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.22')
def test_create_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="132.124.0.0/16")],
+ pool_configs=[IPAMPool(subnet="132.124.0.0/16")],
),
)
container = self.client.create_container(
@@ -250,9 +245,9 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.22')
def test_create_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
- pool_configs=[create_ipam_pool(subnet="2001:389::1/64")],
+ pool_configs=[IPAMPool(subnet="2001:389::1/64")],
),
)
container = self.client.create_container(
@@ -357,10 +352,10 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.22')
def test_connect_with_ipv4_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="172.28.0.0/16", iprange="172.28.5.0/24",
gateway="172.28.5.254"
)
@@ -385,10 +380,10 @@ class TestNetworks(BaseIntegrationTest):
@requires_api_version('1.22')
def test_connect_with_ipv6_address(self):
net_name, net_id = self.create_network(
- ipam=create_ipam_config(
+ ipam=IPAMConfig(
driver='default',
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet="2001:389::1/64", iprange="2001:389::0/96",
gateway="2001:389::ffff"
)
diff --git a/tests/integration/service_test.py b/tests/integration/api_service_test.py
index 0725663..bdf7c01 100644
--- a/tests/integration/service_test.py
+++ b/tests/integration/api_service_test.py
@@ -3,10 +3,10 @@ import random
import docker
from ..helpers import requires_api_version
-from .base import BaseIntegrationTest
+from .base import BaseAPIIntegrationTest
-class ServiceTest(BaseIntegrationTest):
+class ServiceTest(BaseAPIIntegrationTest):
def setUp(self):
super(ServiceTest, self).setUp()
self.client.leave_swarm(force=True)
diff --git a/tests/integration/swarm_test.py b/tests/integration/api_swarm_test.py
index d623b83..24c566f 100644
--- a/tests/integration/swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -3,10 +3,10 @@ import docker
import pytest
from ..helpers import requires_api_version
-from .base import BaseIntegrationTest
+from .base import BaseAPIIntegrationTest
-class SwarmTest(BaseIntegrationTest):
+class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
self.client.leave_swarm(force=True)
diff --git a/tests/integration/volume_test.py b/tests/integration/api_volume_test.py
index 329b4e0..bc97f46 100644
--- a/tests/integration/volume_test.py
+++ b/tests/integration/api_volume_test.py
@@ -2,11 +2,11 @@ import docker
import pytest
from ..helpers import requires_api_version
-from .base import BaseIntegrationTest
+from .base import BaseAPIIntegrationTest
@requires_api_version('1.21')
-class TestVolumes(BaseIntegrationTest):
+class TestVolumes(BaseAPIIntegrationTest):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
diff --git a/tests/integration/base.py b/tests/integration/base.py
index 3fb25b5..ea43d05 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -2,6 +2,7 @@ import shutil
import unittest
import docker
+from docker.utils import kwargs_from_env
import six
@@ -10,20 +11,14 @@ BUSYBOX = 'busybox:buildroot-2014.02'
class BaseIntegrationTest(unittest.TestCase):
"""
- A base class for integration test cases.
-
- It sets up a Docker client and cleans up the Docker server after itself.
+ A base class for integration test cases. It cleans up the Docker server
+ after itself.
"""
- tmp_imgs = []
- tmp_containers = []
- tmp_folders = []
- tmp_volumes = []
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
- self.client = docker.from_env(timeout=60)
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
@@ -31,32 +26,41 @@ class BaseIntegrationTest(unittest.TestCase):
self.tmp_networks = []
def tearDown(self):
+ client = docker.from_env()
for img in self.tmp_imgs:
try:
- self.client.remove_image(img)
+ client.api.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
- self.client.stop(container, timeout=1)
- self.client.remove_container(container)
+ client.api.remove_container(container, force=True)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
try:
- self.client.remove_network(network)
+ client.api.remove_network(network)
except docker.errors.APIError:
pass
- for folder in self.tmp_folders:
- shutil.rmtree(folder)
-
for volume in self.tmp_volumes:
try:
- self.client.remove_volume(volume)
+ client.api.remove_volume(volume)
except docker.errors.APIError:
pass
- self.client.close()
+ for folder in self.tmp_folders:
+ shutil.rmtree(folder)
+
+
+class BaseAPIIntegrationTest(BaseIntegrationTest):
+ """
+ A test case for `APIClient` integration tests. It sets up an `APIClient`
+ as `self.client`.
+ """
+
+ def setUp(self):
+ super(BaseAPIIntegrationTest, self).setUp()
+ self.client = docker.APIClient(timeout=60, **kwargs_from_env())
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
new file mode 100644
index 0000000..dfced9b
--- /dev/null
+++ b/tests/integration/client_test.py
@@ -0,0 +1,20 @@
+import unittest
+
+import docker
+
+
+class ClientTest(unittest.TestCase):
+
+ def test_info(self):
+ client = docker.from_env()
+ info = client.info()
+ assert 'ID' in info
+ assert 'Name' in info
+
+ def test_ping(self):
+ client = docker.from_env()
+ assert client.ping() is True
+
+ def test_version(self):
+ client = docker.from_env()
+ assert 'Version' in client.version()
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index c488f90..7217fe0 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -4,6 +4,7 @@ import sys
import warnings
import docker.errors
+from docker.utils import kwargs_from_env
import pytest
from .base import BUSYBOX
@@ -12,7 +13,7 @@ from .base import BUSYBOX
@pytest.fixture(autouse=True, scope='session')
def setup_test_session():
warnings.simplefilter('error')
- c = docker.from_env()
+ c = docker.APIClient(**kwargs_from_env())
try:
c.inspect_image(BUSYBOX)
except docker.errors.NotFound:
diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py
index 4adfa32..dc5cef4 100644
--- a/tests/integration/errors_test.py
+++ b/tests/integration/errors_test.py
@@ -1,8 +1,8 @@
from docker.errors import APIError
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class ErrorsTest(BaseIntegrationTest):
+class ErrorsTest(BaseAPIIntegrationTest):
def test_api_error_parses_json(self):
container = self.client.create_container(BUSYBOX, ['sleep', '10'])
self.client.start(container['Id'])
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
new file mode 100644
index 0000000..d8b4c62
--- /dev/null
+++ b/tests/integration/models_containers_test.py
@@ -0,0 +1,204 @@
+import docker
+from .base import BaseIntegrationTest
+
+
+class ContainerCollectionTest(BaseIntegrationTest):
+
+ def test_run(self):
+ client = docker.from_env()
+ self.assertEqual(
+ client.containers.run("alpine", "echo hello world", remove=True),
+ b'hello world\n'
+ )
+
+ def test_run_detach(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['Config']['Image'] == "alpine"
+ assert container.attrs['Config']['Cmd'] == ['sleep', '300']
+
+ def test_run_with_error(self):
+ client = docker.from_env()
+ with self.assertRaises(docker.errors.ContainerError) as cm:
+ client.containers.run("alpine", "cat /test", remove=True)
+ assert cm.exception.exit_status == 1
+ assert "cat /test" in str(cm.exception)
+ assert "alpine" in str(cm.exception)
+ assert "No such file or directory" in str(cm.exception)
+
+ def test_run_with_image_that_does_not_exist(self):
+ client = docker.from_env()
+ with self.assertRaises(docker.errors.ImageNotFound):
+ client.containers.run("dockerpytest_does_not_exist")
+
+ def test_get(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ assert client.containers.get(container.id).attrs[
+ 'Config']['Image'] == "alpine"
+
+ def test_list(self):
+ client = docker.from_env()
+ container_id = client.containers.run(
+ "alpine", "sleep 300", detach=True).id
+ self.tmp_containers.append(container_id)
+ containers = [c for c in client.containers.list() if c.id ==
+ container_id]
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert container.attrs['Config']['Image'] == 'alpine'
+
+ container.kill()
+ container.remove()
+ assert container_id not in [c.id for c in client.containers.list()]
+
+
+class ContainerTest(BaseIntegrationTest):
+
+ def test_commit(self):
+ client = docker.from_env()
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test'",
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+ image = container.commit()
+ self.assertEqual(
+ client.containers.run(image.id, "cat /test", remove=True),
+ b"hello\n"
+ )
+
+ def test_diff(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "touch /test", detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.diff() == [{'Path': '/test', 'Kind': 1}]
+
+ def test_exec_run(self):
+ client = docker.from_env()
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
+ )
+ self.tmp_containers.append(container.id)
+ assert container.exec_run("cat /test") == b"hello\n"
+
+ def test_kill(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ while container.status != 'running':
+ container.reload()
+ assert container.status == 'running'
+ container.kill()
+ container.reload()
+ assert container.status == 'exited'
+
+ def test_logs(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "echo hello world",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ container.wait()
+ assert container.logs() == b"hello world\n"
+
+ def test_pause(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ container.pause()
+ container.reload()
+ assert container.status == "paused"
+ container.unpause()
+ container.reload()
+ assert container.status == "running"
+
+ def test_remove(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "echo hello", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.id in [c.id for c in client.containers.list(all=True)]
+ container.wait()
+ container.remove()
+ containers = client.containers.list(all=True)
+ assert container.id not in [c.id for c in containers]
+
+ def test_rename(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "echo hello", name="test1",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.name == "test1"
+ container.rename("test2")
+ container.reload()
+ assert container.name == "test2"
+
+ def test_restart(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.restart()
+ container.reload()
+ second_started_at = container.attrs['State']['StartedAt']
+ assert first_started_at != second_started_at
+
+ def test_start(self):
+ client = docker.from_env()
+ container = client.containers.create("alpine", "sleep 50", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status == "created"
+ container.start()
+ container.reload()
+ assert container.status == "running"
+
+ def test_stats(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 100", detach=True)
+ self.tmp_containers.append(container.id)
+ stats = container.stats(stream=False)
+ for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
+ 'memory_stats', 'blkio_stats']:
+ assert key in stats
+
+ def test_stop(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "top", detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.status in ("running", "created")
+ container.stop(timeout=2)
+ container.reload()
+ assert container.status == "exited"
+
+ def test_top(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 60", detach=True)
+ self.tmp_containers.append(container.id)
+ top = container.top()
+ assert len(top['Processes']) == 1
+ assert 'sleep 60' in top['Processes'][0]
+
+ def test_update(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 60", detach=True,
+ cpu_shares=2)
+ self.tmp_containers.append(container.id)
+ assert container.attrs['HostConfig']['CpuShares'] == 2
+ container.update(cpu_shares=3)
+ container.reload()
+ assert container.attrs['HostConfig']['CpuShares'] == 3
+
+ def test_wait(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sh -c 'exit 0'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait() == 0
+ container = client.containers.run("alpine", "sh -c 'exit 1'",
+ detach=True)
+ self.tmp_containers.append(container.id)
+ assert container.wait() == 1
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
new file mode 100644
index 0000000..2be6232
--- /dev/null
+++ b/tests/integration/models_images_test.py
@@ -0,0 +1,67 @@
+import io
+import docker
+from .base import BaseIntegrationTest
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_build(self):
+ client = docker.from_env()
+ image = client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "CMD echo hello world".encode('ascii')
+ ))
+ self.tmp_imgs.append(image.id)
+ assert client.containers.run(image) == b"hello world\n"
+
+ def test_build_with_error(self):
+ client = docker.from_env()
+ with self.assertRaises(docker.errors.BuildError) as cm:
+ client.images.build(fileobj=io.BytesIO(
+ "FROM alpine\n"
+ "NOTADOCKERFILECOMMAND".encode('ascii')
+ ))
+ assert str(cm.exception) == ("Unknown instruction: "
+ "NOTADOCKERFILECOMMAND")
+
+ def test_list(self):
+ client = docker.from_env()
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list())
+
+ def test_list_with_repository(self):
+ client = docker.from_env()
+ image = client.images.pull('alpine:latest')
+ assert image.id in get_ids(client.images.list('alpine'))
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+ def test_pull(self):
+ client = docker.from_env()
+ image = client.images.pull('alpine:latest')
+ assert 'alpine:latest' in image.attrs['RepoTags']
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_tag_and_remove(self):
+ repo = 'dockersdk.tests.images.test_tag'
+ tag = 'some-tag'
+ identifier = '{}:{}'.format(repo, tag)
+
+ client = docker.from_env()
+ image = client.images.pull('alpine:latest')
+
+ image.tag(repo, tag)
+ self.tmp_imgs.append(identifier)
+ assert image.id in get_ids(client.images.list(repo))
+ assert image.id in get_ids(client.images.list(identifier))
+
+ client.images.remove(identifier)
+ assert image.id not in get_ids(client.images.list(repo))
+ assert image.id not in get_ids(client.images.list(identifier))
+
+ assert image.id in get_ids(client.images.list('alpine:latest'))
+
+
+def get_ids(images):
+ return [i.id for i in images]
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
new file mode 100644
index 0000000..771ee7d
--- /dev/null
+++ b/tests/integration/models_networks_test.py
@@ -0,0 +1,64 @@
+import docker
+from .. import helpers
+from .base import BaseIntegrationTest
+
+
+class ImageCollectionTest(BaseIntegrationTest):
+
+ def test_create(self):
+ client = docker.from_env()
+ name = helpers.random_name()
+ network = client.networks.create(name, labels={'foo': 'bar'})
+ self.tmp_networks.append(network.id)
+ assert network.name == name
+ assert network.attrs['Labels']['foo'] == "bar"
+
+ def test_get(self):
+ client = docker.from_env()
+ name = helpers.random_name()
+ network_id = client.networks.create(name).id
+ self.tmp_networks.append(network_id)
+ network = client.networks.get(network_id)
+ assert network.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env()
+ name = helpers.random_name()
+ network = client.networks.create(name)
+ self.tmp_networks.append(network.id)
+ assert network.id in [n.id for n in client.networks.list()]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(ids=["fdhjklfdfdshjkfds"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(ids=[network.id])
+ ]
+ assert network.id not in [
+ n.id for n in
+ client.networks.list(names=["fdshjklfdsjhkl"])
+ ]
+ assert network.id in [
+ n.id for n in
+ client.networks.list(names=[name])
+ ]
+ network.remove()
+ assert network.id not in [n.id for n in client.networks.list()]
+
+
+class ImageTest(BaseIntegrationTest):
+
+ def test_connect_disconnect(self):
+ client = docker.from_env()
+ network = client.networks.create(helpers.random_name())
+ self.tmp_networks.append(network.id)
+ container = client.containers.create("alpine", "sleep 300")
+ self.tmp_containers.append(container.id)
+ assert network.containers == []
+ network.connect(container)
+ container.start()
+ assert client.networks.get(network.id).containers == [container]
+ network.disconnect(container)
+ assert network.containers == []
+ assert client.networks.get(network.id).containers == []
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
new file mode 100644
index 0000000..0199d69
--- /dev/null
+++ b/tests/integration/models_nodes_test.py
@@ -0,0 +1,34 @@
+import unittest
+import docker
+from .. import helpers
+
+
+class NodesTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env())
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env())
+
+ def test_list_get_update(self):
+ client = docker.from_env()
+ client.swarm.init()
+ nodes = client.nodes.list()
+ assert len(nodes) == 1
+ assert nodes[0].attrs['Spec']['Role'] == 'manager'
+
+ node = client.nodes.get(nodes[0].id)
+ assert node.id == nodes[0].id
+ assert node.attrs['Spec']['Role'] == 'manager'
+ assert node.version > 0
+
+ node = client.nodes.list()[0]
+ assert not node.attrs['Spec'].get('Labels')
+ node.update({
+ 'Availability': 'active',
+ 'Name': 'node-name',
+ 'Role': 'manager',
+ 'Labels': {'foo': 'bar'}
+ })
+ node.reload()
+ assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py
new file mode 100644
index 0000000..b8eba81
--- /dev/null
+++ b/tests/integration/models_resources_test.py
@@ -0,0 +1,16 @@
+import docker
+from .base import BaseIntegrationTest
+
+
+class ModelTest(BaseIntegrationTest):
+
+ def test_reload(self):
+ client = docker.from_env()
+ container = client.containers.run("alpine", "sleep 300", detach=True)
+ self.tmp_containers.append(container.id)
+ first_started_at = container.attrs['State']['StartedAt']
+ container.kill()
+ container.start()
+ assert container.attrs['State']['StartedAt'] == first_started_at
+ container.reload()
+ assert container.attrs['State']['StartedAt'] != first_started_at
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
new file mode 100644
index 0000000..99cffc0
--- /dev/null
+++ b/tests/integration/models_services_test.py
@@ -0,0 +1,100 @@
+import unittest
+import docker
+from .. import helpers
+
+
+class ServiceTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ client = docker.from_env()
+ helpers.force_leave_swarm(client)
+ client.swarm.init()
+
+ @classmethod
+ def tearDownClass(cls):
+ helpers.force_leave_swarm(docker.from_env())
+
+ def test_create(self):
+ client = docker.from_env()
+ name = helpers.random_name()
+ service = client.services.create(
+ # create arguments
+ name=name,
+ labels={'foo': 'bar'},
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300",
+ container_labels={'container': 'label'}
+ )
+ assert service.name == name
+ assert service.attrs['Spec']['Labels']['foo'] == 'bar'
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Image'] == "alpine"
+ assert container_spec['Labels'] == {'container': 'label'}
+
+ def test_get(self):
+ client = docker.from_env()
+ name = helpers.random_name()
+ service = client.services.create(
+ name=name,
+ image="alpine",
+ command="sleep 300"
+ )
+ service = client.services.get(service.id)
+ assert service.name == name
+
+ def test_list_remove(self):
+ client = docker.from_env()
+ service = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ assert service in client.services.list()
+ service.remove()
+ assert service not in client.services.list()
+
+ def test_tasks(self):
+ client = docker.from_env()
+ service1 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ service2 = client.services.create(
+ name=helpers.random_name(),
+ image="alpine",
+ command="sleep 300"
+ )
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service1.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service1.id
+
+ tasks = []
+ while len(tasks) == 0:
+ tasks = service2.tasks()
+ assert len(tasks) == 1
+ assert tasks[0]['ServiceID'] == service2.id
+
+ def test_update(self):
+ client = docker.from_env()
+ service = client.services.create(
+ # create arguments
+ name=helpers.random_name(),
+ # ContainerSpec arguments
+ image="alpine",
+ command="sleep 300"
+ )
+ new_name = helpers.random_name()
+ service.update(
+ # create argument
+ name=new_name,
+ # ContainerSpec argument
+ command="sleep 600"
+ )
+ service.reload()
+ assert service.name == new_name
+ container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
+ assert container_spec['Command'] == ["sleep", "600"]
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
new file mode 100644
index 0000000..abdff41
--- /dev/null
+++ b/tests/integration/models_swarm_test.py
@@ -0,0 +1,22 @@
+import unittest
+import docker
+from .. import helpers
+
+
+class SwarmTest(unittest.TestCase):
+ def setUp(self):
+ helpers.force_leave_swarm(docker.from_env())
+
+ def tearDown(self):
+ helpers.force_leave_swarm(docker.from_env())
+
+ def test_init_update_leave(self):
+ client = docker.from_env()
+ client.swarm.init(snapshot_interval=5000)
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
+ client.swarm.update(snapshot_interval=10000)
+ assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
+ assert client.swarm.leave(force=True)
+ with self.assertRaises(docker.errors.APIError) as cm:
+ client.swarm.reload()
+ assert cm.exception.response.status_code == 406
diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py
new file mode 100644
index 0000000..094e68f
--- /dev/null
+++ b/tests/integration/models_volumes_test.py
@@ -0,0 +1,30 @@
+import docker
+from .base import BaseIntegrationTest
+
+
+class VolumesTest(BaseIntegrationTest):
+ def test_create_get(self):
+ client = docker.from_env()
+ volume = client.volumes.create(
+ 'dockerpytest_1',
+ driver='local',
+ labels={'labelkey': 'labelvalue'}
+ )
+ self.tmp_volumes.append(volume.id)
+ assert volume.id
+ assert volume.name == 'dockerpytest_1'
+ assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
+
+ volume = client.volumes.get(volume.id)
+ assert volume.name == 'dockerpytest_1'
+
+ def test_list_remove(self):
+ client = docker.from_env()
+ volume = client.volumes.create('dockerpytest_1')
+ self.tmp_volumes.append(volume.id)
+ assert volume in client.volumes.list()
+ assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
+ assert volume not in client.volumes.list(filters={'name': 'foobar'})
+
+ volume.remove()
+ assert volume not in client.volumes.list()
diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py
index 0672c4f..e3e6d9b 100644
--- a/tests/integration/regression_test.py
+++ b/tests/integration/regression_test.py
@@ -4,10 +4,10 @@ import random
import docker
import six
-from .base import BaseIntegrationTest, BUSYBOX
+from .base import BaseAPIIntegrationTest, BUSYBOX
-class TestRegressions(BaseIntegrationTest):
+class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
diff --git a/tests/unit/build_test.py b/tests/unit/api_build_test.py
index b2705eb..927aa97 100644
--- a/tests/unit/build_test.py
+++ b/tests/unit/api_build_test.py
@@ -4,14 +4,13 @@ import io
import docker
from docker import auth
-from .api_test import DockerClientTest, fake_request, url_prefix
+from .api_test import BaseAPIClientTest, fake_request, url_prefix
-class BuildTest(DockerClientTest):
+class BuildTest(BaseAPIClientTest):
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -23,7 +22,6 @@ class BuildTest(DockerClientTest):
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -35,7 +33,6 @@ class BuildTest(DockerClientTest):
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -47,7 +44,6 @@ class BuildTest(DockerClientTest):
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
@@ -60,7 +56,6 @@ class BuildTest(DockerClientTest):
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
- 'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
diff --git a/tests/unit/container_test.py b/tests/unit/api_container_test.py
index 51e8cbb..abf3613 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/api_container_test.py
@@ -11,7 +11,7 @@ import six
from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
)
@@ -25,7 +25,7 @@ def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
-class StartContainerTest(DockerClientTest):
+class StartContainerTest(BaseAPIClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
@@ -34,10 +34,7 @@ class StartContainerTest(DockerClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
@@ -63,25 +60,21 @@ class StartContainerTest(DockerClientTest):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_lxc_conf_compat(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_ro(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
@@ -91,22 +84,18 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_binds_rw(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_port_binds(self):
self.maxDiff = None
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
@@ -116,18 +105,14 @@ class StartContainerTest(DockerClientTest):
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_multiple_links(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
@@ -136,21 +121,15 @@ class StartContainerTest(DockerClientTest):
}
)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_links_as_list_of_tuples(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
- pytest.deprecated_call(call_start)
-
def test_start_container_privileged(self):
- def call_start():
+ with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
- pytest.deprecated_call(call_start)
-
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -159,16 +138,13 @@ class StartContainerTest(DockerClientTest):
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
- self.assertEqual(json.loads(args[1]['data']), {})
- self.assertEqual(
- args[1]['headers'], {'Content-Type': 'application/json'}
- )
+ assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
-class CreateContainerTest(DockerClientTest):
+class CreateContainerTest(BaseAPIClientTest):
def test_create_container(self):
self.client.create_container('busybox', 'true')
@@ -1180,7 +1156,7 @@ class CreateContainerTest(DockerClientTest):
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
-class ContainerTest(DockerClientTest):
+class ContainerTest(BaseAPIClientTest):
def test_list_containers(self):
self.client.containers(all=True)
@@ -1244,7 +1220,7 @@ class ContainerTest(DockerClientTest):
)
def test_logs(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
@@ -1263,7 +1239,7 @@ class ContainerTest(DockerClientTest):
)
def test_logs_with_dict_instead_of_id(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
@@ -1282,7 +1258,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_streaming(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=False)
@@ -1297,7 +1273,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=True)
@@ -1312,7 +1288,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_following_backwards(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
@@ -1326,7 +1302,7 @@ class ContainerTest(DockerClientTest):
)
def test_log_streaming_and_following(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=True)
@@ -1342,7 +1318,7 @@ class ContainerTest(DockerClientTest):
def test_log_tail(self):
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, tail=10)
@@ -1358,7 +1334,7 @@ class ContainerTest(DockerClientTest):
def test_log_since(self):
ts = 809222400
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=ts)
@@ -1375,7 +1351,7 @@ class ContainerTest(DockerClientTest):
def test_log_since_with_datetime(self):
ts = 809222400
time = datetime.datetime.utcfromtimestamp(ts)
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=time)
@@ -1391,9 +1367,9 @@ class ContainerTest(DockerClientTest):
def test_log_tty(self):
m = mock.Mock()
- with mock.patch('docker.Client.inspect_container',
+ with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container_tty):
- with mock.patch('docker.Client._stream_raw_result',
+ with mock.patch('docker.api.client.APIClient._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
diff --git a/tests/unit/exec_test.py b/tests/unit/api_exec_test.py
index 6ba2a3d..41ee940 100644
--- a/tests/unit/exec_test.py
+++ b/tests/unit/api_exec_test.py
@@ -2,11 +2,11 @@ import json
from . import fake_api
from .api_test import (
- DockerClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
+ BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
)
-class ExecTest(DockerClientTest):
+class ExecTest(BaseAPIClientTest):
def test_exec_create(self):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
diff --git a/tests/unit/image_test.py b/tests/unit/api_image_test.py
index cca519e..fbfb146 100644
--- a/tests/unit/image_test.py
+++ b/tests/unit/api_image_test.py
@@ -4,7 +4,7 @@ import pytest
from . import fake_api
from docker import auth
from .api_test import (
- DockerClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
+ BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
)
@@ -14,7 +14,7 @@ except ImportError:
import mock
-class ImageTest(DockerClientTest):
+class ImageTest(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
@@ -228,7 +228,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
@@ -245,7 +245,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_with_tag(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
@@ -289,7 +289,7 @@ class ImageTest(DockerClientTest):
)
def test_push_image_stream(self):
- with mock.patch('docker.auth.auth.resolve_authconfig',
+ with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
diff --git a/tests/unit/network_test.py b/tests/unit/api_network_test.py
index 93f03da..037edb5 100644
--- a/tests/unit/network_test.py
+++ b/tests/unit/api_network_test.py
@@ -2,9 +2,9 @@ import json
import six
+from .api_test import BaseAPIClientTest, url_prefix, response
from ..helpers import requires_api_version
-from .api_test import DockerClientTest, url_prefix, response
-from docker.utils import create_ipam_config, create_ipam_pool
+from docker.types import IPAMConfig, IPAMPool
try:
from unittest import mock
@@ -12,7 +12,7 @@ except ImportError:
import mock
-class NetworkTest(DockerClientTest):
+class NetworkTest(BaseAPIClientTest):
@requires_api_version('1.21')
def test_list_networks(self):
networks = [
@@ -33,7 +33,7 @@ class NetworkTest(DockerClientTest):
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
- with mock.patch('docker.Client.get', get):
+ with mock.patch('docker.api.client.APIClient.get', get):
self.assertEqual(self.client.networks(), networks)
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
@@ -59,7 +59,7 @@ class NetworkTest(DockerClientTest):
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
result = self.client.create_network('foo')
self.assertEqual(result, network_data)
@@ -81,9 +81,9 @@ class NetworkTest(DockerClientTest):
json.loads(post.call_args[1]['data']),
{"Name": "foo", "Driver": "bridge", "Options": opts})
- ipam_pool_config = create_ipam_pool(subnet="192.168.52.0/24",
- gateway="192.168.52.254")
- ipam_config = create_ipam_config(pool_configs=[ipam_pool_config])
+ ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
+ gateway="192.168.52.254")
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
self.client.create_network("bar", driver="bridge",
ipam=ipam_config)
@@ -109,7 +109,7 @@ class NetworkTest(DockerClientTest):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
- with mock.patch('docker.Client.delete', delete):
+ with mock.patch('docker.api.client.APIClient.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
@@ -130,7 +130,7 @@ class NetworkTest(DockerClientTest):
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
- with mock.patch('docker.Client.get', get):
+ with mock.patch('docker.api.client.APIClient.get', get):
result = self.client.inspect_network(network_id)
self.assertEqual(result, network_data)
@@ -145,7 +145,7 @@ class NetworkTest(DockerClientTest):
post = mock.Mock(return_value=response(status_code=201))
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
self.client.connect_container_to_network(
{'Id': container_id},
network_id,
@@ -174,7 +174,7 @@ class NetworkTest(DockerClientTest):
post = mock.Mock(return_value=response(status_code=201))
- with mock.patch('docker.Client.post', post):
+ with mock.patch('docker.api.client.APIClient.post', post):
self.client.disconnect_container_from_network(
{'Id': container_id}, network_id)
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 94092dd..67373ba 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -1,21 +1,21 @@
import datetime
import json
+import io
import os
import re
import shutil
import socket
-import sys
import tempfile
import threading
import time
-import io
+import unittest
import docker
+from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
-from .. import base
from . import fake_api
import pytest
@@ -86,21 +86,25 @@ def fake_delete(self, url, *args, **kwargs):
def fake_read_from_socket(self, response, stream):
return six.binary_type()
+
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
-class DockerClientTest(base.Cleanup, base.BaseTestCase):
+class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
- 'docker.Client', get=fake_get, post=fake_post, put=fake_put,
+ 'docker.api.client.APIClient',
+ get=fake_get,
+ post=fake_post,
+ put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
- self.client = docker.Client()
+ self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
@@ -108,11 +112,6 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
self.client.close()
self.patcher.stop()
- def assertIn(self, object, collection):
- if six.PY2 and sys.version_info[1] <= 6:
- return self.assertTrue(object in collection)
- return super(DockerClientTest, self).assertIn(object, collection)
-
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
@@ -124,10 +123,10 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
}
-class DockerApiTest(DockerClientTest):
+class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
- docker.Client(version=1.12)
+ APIClient(version=1.12)
self.assertEqual(
str(excinfo.value),
@@ -194,7 +193,7 @@ class DockerApiTest(DockerClientTest):
)
def test_retrieve_server_version(self):
- client = docker.Client(version="auto")
+ client = APIClient(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
@@ -274,27 +273,27 @@ class DockerApiTest(DockerClientTest):
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
- c = docker.Client(base_url="unix://socket")
+ c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
- c = docker.Client(base_url="unix:///socket")
+ c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
- c = docker.Client(base_url="http+unix:///socket")
+ c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
- c = docker.Client(base_url="http://hostname:1234")
+ c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
- c = docker.Client(base_url="tcp://hostname:1234")
+ c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
@@ -355,7 +354,7 @@ class DockerApiTest(DockerClientTest):
self.assertEqual(result, content)
-class StreamTest(base.Cleanup, base.BaseTestCase):
+class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
@@ -440,7 +439,7 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
b'\r\n'
) + b'\r\n'.join(lines)
- with docker.Client(base_url="http+unix://" + self.socket_file) \
+ with APIClient(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
@@ -457,10 +456,10 @@ class StreamTest(base.Cleanup, base.BaseTestCase):
str(i).encode() for i in range(50)])
-class UserAgentTest(base.BaseTestCase):
+class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
- docker.Client,
+ APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
@@ -470,7 +469,7 @@ class UserAgentTest(base.BaseTestCase):
self.patcher.stop()
def test_default_user_agent(self):
- client = docker.Client()
+ client = APIClient()
client.version()
self.assertEqual(self.mock_send.call_count, 1)
@@ -479,9 +478,53 @@ class UserAgentTest(base.BaseTestCase):
self.assertEqual(headers['User-Agent'], expected)
def test_custom_user_agent(self):
- client = docker.Client(user_agent='foo/bar')
+ client = APIClient(user_agent='foo/bar')
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
self.assertEqual(headers['User-Agent'], 'foo/bar')
+
+
+class DisableSocketTest(unittest.TestCase):
+ class DummySocket(object):
+ def __init__(self, timeout=60):
+ self.timeout = timeout
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
+
+ def gettimeout(self):
+ return self.timeout
+
+ def setUp(self):
+ self.client = APIClient()
+
+ def test_disable_socket_timeout(self):
+ """Test that the timeout is disabled on a generic socket object."""
+ socket = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+
+ def test_disable_socket_timeout2(self):
+ """Test that the timeouts are disabled on a generic socket object
+ and it's _sock object if present."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+ self.assertEqual(socket._sock.timeout, None)
+
+ def test_disable_socket_timout_non_blocking(self):
+ """Test that a non-blocking socket does not get set to blocking."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket(0.0)
+
+ self.client._disable_socket_timeout(socket)
+
+ self.assertEqual(socket.timeout, None)
+ self.assertEqual(socket._sock.timeout, 0.0)
diff --git a/tests/unit/volume_test.py b/tests/unit/api_volume_test.py
index 3909977..cb72cb2 100644
--- a/tests/unit/volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -3,10 +3,10 @@ import json
import pytest
from ..helpers import requires_api_version
-from .api_test import DockerClientTest, url_prefix, fake_request
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
-class VolumeTest(DockerClientTest):
+class VolumeTest(BaseAPIClientTest):
@requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py
index f395133..e4c93b7 100644
--- a/tests/unit/auth_test.py
+++ b/tests/unit/auth_test.py
@@ -7,12 +7,9 @@ import os.path
import random
import shutil
import tempfile
+import unittest
-from docker import auth
-from docker.auth.auth import parse_auth
-from docker import errors
-
-from .. import base
+from docker import auth, errors
try:
from unittest import mock
@@ -20,7 +17,7 @@ except ImportError:
import mock
-class RegressionTest(base.BaseTestCase):
+class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
@@ -31,7 +28,7 @@ class RegressionTest(base.BaseTestCase):
assert b'_' in encoded
-class ResolveRepositoryNameTest(base.BaseTestCase):
+class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
@@ -117,12 +114,12 @@ def encode_auth(auth_info):
auth_info.get('password', '').encode('utf-8'))
-class ResolveAuthTest(base.BaseTestCase):
+class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
- auth_config = parse_auth({
+ auth_config = auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
@@ -272,7 +269,7 @@ class ResolveAuthTest(base.BaseTestCase):
)
-class LoadConfigTest(base.Cleanup, base.BaseTestCase):
+class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py
index 6ceb8cb..b79c68e 100644
--- a/tests/unit/client_test.py
+++ b/tests/unit/client_test.py
@@ -1,14 +1,78 @@
+import datetime
+import docker
+from docker.utils import kwargs_from_env
import os
-from docker.client import Client
-from .. import base
+import unittest
-TEST_CERT_DIR = os.path.join(
- os.path.dirname(__file__),
- 'testdata/certs',
-)
+from . import fake_api
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
+
+
+class ClientTest(unittest.TestCase):
+
+ @mock.patch('docker.api.APIClient.events')
+ def test_events(self, mock_func):
+ since = datetime.datetime(2016, 1, 1, 0, 0)
+ mock_func.return_value = fake_api.get_fake_events()[1]
+ client = docker.from_env()
+ assert client.events(since=since) == mock_func.return_value
+ mock_func.assert_called_with(since=since)
+
+ @mock.patch('docker.api.APIClient.info')
+ def test_info(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_info()[1]
+ client = docker.from_env()
+ assert client.info() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.ping')
+ def test_ping(self, mock_func):
+ mock_func.return_value = True
+ client = docker.from_env()
+ assert client.ping() is True
+ mock_func.assert_called_with()
+
+ @mock.patch('docker.api.APIClient.version')
+ def test_version(self, mock_func):
+ mock_func.return_value = fake_api.get_fake_version()[1]
+ client = docker.from_env()
+ assert client.version() == mock_func.return_value
+ mock_func.assert_called_with()
+
+ def test_call_api_client_method(self):
+ client = docker.from_env()
+ with self.assertRaises(AttributeError) as cm:
+ client.create_container()
+ s = str(cm.exception)
+ assert "'DockerClient' object has no attribute 'create_container'" in s
+ assert "this method is now on the object APIClient" in s
+
+ with self.assertRaises(AttributeError) as cm:
+ client.abcdef()
+ s = str(cm.exception)
+ assert "'DockerClient' object has no attribute 'abcdef'" in s
+ assert "this method is now on the object APIClient" not in s
+
+ def test_call_containers(self):
+ client = docker.DockerClient(**kwargs_from_env())
+
+ with self.assertRaises(TypeError) as cm:
+ client.containers()
+
+ s = str(cm.exception)
+ assert "'ContainerCollection' object is not callable" in s
+ assert "docker.APIClient" in s
+
+
+class FromEnvTest(unittest.TestCase):
-class ClientTest(base.BaseTestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -22,57 +86,13 @@ class ClientTest(base.BaseTestCase):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env()
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
+ client = docker.from_env()
+ self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
def test_from_env_with_version(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
- client = Client.from_env(version='2.32')
- self.assertEqual(client.base_url, "https://192.168.59.103:2376")
- self.assertEqual(client._version, '2.32')
-
-
-class DisableSocketTest(base.BaseTestCase):
- class DummySocket(object):
- def __init__(self, timeout=60):
- self.timeout = timeout
-
- def settimeout(self, timeout):
- self.timeout = timeout
-
- def gettimeout(self):
- return self.timeout
-
- def setUp(self):
- self.client = Client()
-
- def test_disable_socket_timeout(self):
- """Test that the timeout is disabled on a generic socket object."""
- socket = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
-
- self.assertEqual(socket.timeout, None)
-
- def test_disable_socket_timeout2(self):
- """Test that the timeouts are disabled on a generic socket object
- and it's _sock object if present."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket()
-
- self.client._disable_socket_timeout(socket)
-
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, None)
-
- def test_disable_socket_timout_non_blocking(self):
- """Test that a non-blocking socket does not get set to blocking."""
- socket = self.DummySocket()
- socket._sock = self.DummySocket(0.0)
-
- self.client._disable_socket_timeout(socket)
-
- self.assertEqual(socket.timeout, None)
- self.assertEqual(socket._sock.timeout, 0.0)
+ client = docker.from_env(version='2.32')
+ self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
+ self.assertEqual(client.api._version, '2.32')
diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py
new file mode 100644
index 0000000..2480b9e
--- /dev/null
+++ b/tests/unit/dockertypes_test.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+
+import unittest
+
+import pytest
+
+from docker.constants import DEFAULT_DOCKER_API_VERSION
+from docker.errors import InvalidVersion
+from docker.types import (
+ EndpointConfig, HostConfig, IPAMConfig, IPAMPool, LogConfig, Ulimit,
+)
+
+
+def create_host_config(*args, **kwargs):
+ return HostConfig(*args, **kwargs)
+
+
+class HostConfigTest(unittest.TestCase):
+ def test_create_host_config_no_options(self):
+ config = create_host_config(version='1.19')
+ self.assertFalse('NetworkMode' in config)
+
+ def test_create_host_config_no_options_newer_api_version(self):
+ config = create_host_config(version='1.20')
+ self.assertEqual(config['NetworkMode'], 'default')
+
+ def test_create_host_config_invalid_cpu_cfs_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota=23.11)
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period=1999.0)
+
+ def test_create_host_config_with_cpu_quota(self):
+ config = create_host_config(version='1.20', cpu_quota=1999)
+ self.assertEqual(config.get('CpuQuota'), 1999)
+
+ def test_create_host_config_with_cpu_period(self):
+ config = create_host_config(version='1.20', cpu_period=1999)
+ self.assertEqual(config.get('CpuPeriod'), 1999)
+
+ def test_create_host_config_with_blkio_constraints(self):
+ blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
+ config = create_host_config(version='1.22',
+ blkio_weight=1999,
+ blkio_weight_device=blkio_rate,
+ device_read_bps=blkio_rate,
+ device_write_bps=blkio_rate,
+ device_read_iops=blkio_rate,
+ device_write_iops=blkio_rate)
+
+ self.assertEqual(config.get('BlkioWeight'), 1999)
+ self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
+ self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
+ self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
+ self.assertEqual(blkio_rate[0]['Rate'], 1000)
+
+ def test_create_host_config_with_shm_size(self):
+ config = create_host_config(version='1.22', shm_size=67108864)
+ self.assertEqual(config.get('ShmSize'), 67108864)
+
+ def test_create_host_config_with_shm_size_in_mb(self):
+ config = create_host_config(version='1.22', shm_size='64M')
+ self.assertEqual(config.get('ShmSize'), 67108864)
+
+ def test_create_host_config_with_oom_kill_disable(self):
+ config = create_host_config(version='1.20', oom_kill_disable=True)
+ self.assertEqual(config.get('OomKillDisable'), True)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.18.3',
+ oom_kill_disable=True))
+
+ def test_create_host_config_with_userns_mode(self):
+ config = create_host_config(version='1.23', userns_mode='host')
+ self.assertEqual(config.get('UsernsMode'), 'host')
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.22',
+ userns_mode='host'))
+ self.assertRaises(
+ ValueError, lambda: create_host_config(version='1.23',
+ userns_mode='host12'))
+
+ def test_create_host_config_with_oom_score_adj(self):
+ config = create_host_config(version='1.22', oom_score_adj=100)
+ self.assertEqual(config.get('OomScoreAdj'), 100)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.21',
+ oom_score_adj=100))
+ self.assertRaises(
+ TypeError, lambda: create_host_config(version='1.22',
+ oom_score_adj='100'))
+
+ def test_create_host_config_with_dns_opt(self):
+
+ tested_opts = ['use-vc', 'no-tld-query']
+ config = create_host_config(version='1.21', dns_opt=tested_opts)
+ dns_opts = config.get('DnsOptions')
+
+ self.assertTrue('use-vc' in dns_opts)
+ self.assertTrue('no-tld-query' in dns_opts)
+
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(version='1.20',
+ dns_opt=tested_opts))
+
+ def test_create_host_config_with_mem_reservation(self):
+ config = create_host_config(version='1.21', mem_reservation=67108864)
+ self.assertEqual(config.get('MemoryReservation'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', mem_reservation=67108864))
+
+ def test_create_host_config_with_kernel_memory(self):
+ config = create_host_config(version='1.21', kernel_memory=67108864)
+ self.assertEqual(config.get('KernelMemory'), 67108864)
+ self.assertRaises(
+ InvalidVersion, lambda: create_host_config(
+ version='1.20', kernel_memory=67108864))
+
+ def test_create_host_config_with_pids_limit(self):
+ config = create_host_config(version='1.23', pids_limit=1024)
+ self.assertEqual(config.get('PidsLimit'), 1024)
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.22', pids_limit=1024)
+ with pytest.raises(TypeError):
+ create_host_config(version='1.23', pids_limit='1024')
+
+ def test_create_host_config_with_isolation(self):
+ config = create_host_config(version='1.24', isolation='hyperv')
+ self.assertEqual(config.get('Isolation'), 'hyperv')
+
+ with pytest.raises(InvalidVersion):
+ create_host_config(version='1.23', isolation='hyperv')
+ with pytest.raises(TypeError):
+ create_host_config(
+ version='1.24', isolation={'isolation': 'hyperv'}
+ )
+
+ def test_create_host_config_pid_mode(self):
+ with pytest.raises(ValueError):
+ create_host_config(version='1.23', pid_mode='baccab125')
+
+ config = create_host_config(version='1.23', pid_mode='host')
+ assert config.get('PidMode') == 'host'
+ config = create_host_config(version='1.24', pid_mode='baccab125')
+ assert config.get('PidMode') == 'baccab125'
+
+ def test_create_host_config_invalid_mem_swappiness(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.24', mem_swappiness='40')
+
+
+class UlimitTest(unittest.TestCase):
+ def test_create_host_config_dict_ulimit(self):
+ ulimit_dct = {'name': 'nofile', 'soft': 8096}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_dict_ulimit_capitals(self):
+ ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
+ self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_obj_ulimit(self):
+ ulimit_dct = Ulimit(name='nofile', soft=8096)
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj, ulimit_dct)
+
+ def test_ulimit_invalid_type(self):
+ self.assertRaises(ValueError, lambda: Ulimit(name=None))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
+
+
+class LogConfigTest(unittest.TestCase):
+ def test_create_host_config_dict_logconfig(self):
+ dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=dct
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(dct['type'], config['LogConfig'].type)
+
+ def test_create_host_config_obj_logconfig(self):
+ obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=obj
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(obj, config['LogConfig'])
+
+ def test_logconfig_invalid_config_type(self):
+ with pytest.raises(ValueError):
+ LogConfig(type=LogConfig.types.JSON, config='helloworld')
+
+
+class EndpointConfigTest(unittest.TestCase):
+ def test_create_endpoint_config_with_aliases(self):
+ config = EndpointConfig(version='1.22', aliases=['foo', 'bar'])
+ assert config == {'Aliases': ['foo', 'bar']}
+
+ with pytest.raises(InvalidVersion):
+ EndpointConfig(version='1.21', aliases=['foo', 'bar'])
+
+
+class IPAMConfigTest(unittest.TestCase):
+ def test_create_ipam_config(self):
+ ipam_pool = IPAMPool(subnet='192.168.52.0/24',
+ gateway='192.168.52.254')
+
+ ipam_config = IPAMConfig(pool_configs=[ipam_pool])
+ self.assertEqual(ipam_config, {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '192.168.52.0/24',
+ 'Gateway': '192.168.52.254',
+ 'AuxiliaryAddresses': None,
+ 'IPRange': None,
+ }]
+ })
diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py
new file mode 100644
index 0000000..876ede3
--- /dev/null
+++ b/tests/unit/errors_test.py
@@ -0,0 +1,22 @@
+import unittest
+
+from docker.errors import (APIError, DockerException,
+ create_unexpected_kwargs_error)
+
+
+class APIErrorTest(unittest.TestCase):
+ def test_api_error_is_caught_by_dockerexception(self):
+ try:
+ raise APIError("this should be caught by DockerException")
+ except DockerException:
+ pass
+
+
+class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
+ def test_create_unexpected_kwargs_error_single(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
+ assert str(e) == "f() got an unexpected keyword argument 'foo'"
+
+ def test_create_unexpected_kwargs_error_multiple(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
+ assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index a8fb60b..cf3f7d7 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -6,6 +6,7 @@ CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
+FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
@@ -46,6 +47,17 @@ def get_fake_info():
return status_code, response
+def post_fake_auth():
+ status_code = 200
+ response = {'Status': 'Login Succeeded',
+ 'IdentityToken': '9cbaf023786cd7'}
+ return status_code, response
+
+
+def get_fake_ping():
+ return 200, "OK"
+
+
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
@@ -125,7 +137,9 @@ def get_fake_inspect_container(tty=False):
'Config': {'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
+ 'Name': 'foobar',
"State": {
+ "Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
@@ -140,11 +154,11 @@ def get_fake_inspect_container(tty=False):
def get_fake_inspect_image():
status_code = 200
response = {
- 'id': FAKE_IMAGE_ID,
- 'parent': "27cf784147099545",
- 'created': "2013-03-23T22:24:18.818426-07:00",
- 'container': FAKE_CONTAINER_ID,
- 'container_config':
+ 'Id': FAKE_IMAGE_ID,
+ 'Parent': "27cf784147099545",
+ 'Created': "2013-03-23T22:24:18.818426-07:00",
+ 'Container': FAKE_CONTAINER_ID,
+ 'ContainerConfig':
{
"Hostname": "",
"User": "",
@@ -411,6 +425,61 @@ def post_fake_update_node():
return 200, None
+def get_fake_network_list():
+ return 200, [{
+ "Name": "bridge",
+ "Id": FAKE_NETWORK_ID,
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": False,
+ "Internal": False,
+ "IPAM": {
+ "Driver": "default",
+ "Config": [
+ {
+ "Subnet": "172.17.0.0/16"
+ }
+ ]
+ },
+ "Containers": {
+ FAKE_CONTAINER_ID: {
+ "EndpointID": "ed2419a97c1d99",
+ "MacAddress": "02:42:ac:11:00:02",
+ "IPv4Address": "172.17.0.2/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500"
+ }
+ }]
+
+
+def get_fake_network():
+ return 200, get_fake_network_list()[1][0]
+
+
+def post_fake_network():
+ return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
+
+
+def delete_fake_network():
+ return 204, None
+
+
+def post_fake_network_connect():
+ return 200, None
+
+
+def post_fake_network_disconnect():
+ return 200, None
+
+
# Maps real api url to fake response callback
prefix = 'http+docker://localunixsocket'
if constants.IS_WINDOWS_PLATFORM:
@@ -423,6 +492,10 @@ fake_responses = {
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
+ '{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
+ post_fake_auth,
+ '{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
+ get_fake_ping,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
@@ -516,4 +589,24 @@ fake_responses = {
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
+ ('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
+ get_fake_network_list,
+ ('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
+ post_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'GET'):
+ get_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'DELETE'):
+ delete_fake_network,
+ ('{1}/{0}/networks/{2}/connect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_connect,
+ ('{1}/{0}/networks/{2}/disconnect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_disconnect,
}
diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py
new file mode 100644
index 0000000..47890ac
--- /dev/null
+++ b/tests/unit/fake_api_client.py
@@ -0,0 +1,61 @@
+import copy
+import docker
+
+from . import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class CopyReturnMagicMock(mock.MagicMock):
+ """
+ A MagicMock which deep copies every return value.
+ """
+ def _mock_call(self, *args, **kwargs):
+ ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
+ if isinstance(ret, (dict, list)):
+ ret = copy.deepcopy(ret)
+ return ret
+
+
+def make_fake_api_client():
+ """
+ Returns non-complete fake APIClient.
+
+ This returns most of the default cases correctly, but most arguments that
+ change behaviour will not work.
+ """
+ api_client = docker.APIClient()
+ mock_client = CopyReturnMagicMock(**{
+ 'build.return_value': fake_api.FAKE_IMAGE_ID,
+ 'commit.return_value': fake_api.post_fake_commit()[1],
+ 'containers.return_value': fake_api.get_fake_containers()[1],
+ 'create_container.return_value':
+ fake_api.post_fake_create_container()[1],
+ 'create_host_config.side_effect': api_client.create_host_config,
+ 'create_network.return_value': fake_api.post_fake_network()[1],
+ 'exec_create.return_value': fake_api.post_fake_exec_create()[1],
+ 'exec_start.return_value': fake_api.post_fake_exec_start()[1],
+ 'images.return_value': fake_api.get_fake_images()[1],
+ 'inspect_container.return_value':
+ fake_api.get_fake_inspect_container()[1],
+ 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
+ 'inspect_network.return_value': fake_api.get_fake_network()[1],
+ 'logs.return_value': 'hello world\n',
+ 'networks.return_value': fake_api.get_fake_network_list()[1],
+ 'start.return_value': None,
+ 'wait.return_value': 0,
+ })
+ mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
+ return mock_client
+
+
+def make_fake_client():
+ """
+ Returns a Client with a fake APIClient.
+ """
+ client = docker.DockerClient()
+ client.api = make_fake_api_client()
+ return client
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
new file mode 100644
index 0000000..c3086c6
--- /dev/null
+++ b/tests/unit/models_containers_test.py
@@ -0,0 +1,465 @@
+import docker
+from docker.models.containers import Container, _create_container_args
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
+from .fake_api_client import make_fake_client
+
+
+class ContainerCollectionTest(unittest.TestCase):
+ def test_run(self):
+ client = make_fake_client()
+ out = client.containers.run("alpine", "echo hello world")
+
+ assert out == 'hello world\n'
+
+ client.api.create_container.assert_called_with(
+ image="alpine",
+ command="echo hello world",
+ detach=False,
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.logs.assert_called_with(
+ FAKE_CONTAINER_ID,
+ stderr=False,
+ stdout=True
+ )
+
+ def test_create_container_args(self):
+ create_kwargs = _create_container_args(dict(
+ image='alpine',
+ command='echo hello world',
+ blkio_weight_device=[{'Path': 'foo', 'Weight': 3}],
+ blkio_weight=2,
+ cap_add=['foo'],
+ cap_drop=['bar'],
+ cgroup_parent='foobar',
+ cpu_period=1,
+ cpu_quota=2,
+ cpu_shares=5,
+ cpuset_cpus='0-3',
+ detach=False,
+ device_read_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_read_iops=[{'Path': 'foo', 'Rate': 3}],
+ device_write_bps=[{'Path': 'foo', 'Rate': 3}],
+ device_write_iops=[{'Path': 'foo', 'Rate': 3}],
+ devices=['/dev/sda:/dev/xvda:rwm'],
+ dns=['8.8.8.8'],
+ domainname='example.com',
+ dns_opt=['foo'],
+ dns_search=['example.com'],
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ extra_hosts={'foo': '1.2.3.4'},
+ group_add=['blah'],
+ ipc_mode='foo',
+ kernel_memory=123,
+ labels={'key': 'value'},
+ links={'foo': 'bar'},
+ log_config={'Type': 'json-file', 'Config': {}},
+ lxc_conf={'foo': 'bar'},
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ mac_address='abc123',
+ mem_limit=123,
+ mem_reservation=123,
+ mem_swappiness=2,
+ memswap_limit=456,
+ name='somename',
+ network_disabled=False,
+ network_mode='blah',
+ networks=['foo'],
+ oom_kill_disable=True,
+ oom_score_adj=5,
+ pid_mode='host',
+ pids_limit=500,
+ ports={
+ 1111: 4567,
+ 2222: None
+ },
+ privileged=True,
+ publish_all_ports=True,
+ read_only=True,
+ restart_policy={'Name': 'always'},
+ security_opt=['blah'],
+ shm_size=123,
+ stdin_open=True,
+ stop_signal=9,
+ sysctls={'foo': 'bar'},
+ tmpfs={'/blah': ''},
+ tty=True,
+ ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ user='bob',
+ userns_mode='host',
+ version='1.23',
+ volume_driver='some_driver',
+ volumes=[
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ],
+ volumes_from=['container'],
+ working_dir='/code'
+ ))
+
+ expected = dict(
+ image='alpine',
+ command='echo hello world',
+ domainname='example.com',
+ detach=False,
+ entrypoint='/bin/sh',
+ environment={'FOO': 'BAR'},
+ host_config={
+ 'Binds': [
+ '/home/user1/:/mnt/vol2',
+ '/var/www:/mnt/vol1:ro',
+ ],
+ 'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}],
+ 'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}],
+ 'BlkioWeight': 2,
+ 'CapAdd': ['foo'],
+ 'CapDrop': ['bar'],
+ 'CgroupParent': 'foobar',
+ 'CpuPeriod': 1,
+ 'CpuQuota': 2,
+ 'CpuShares': 5,
+ 'CpuSetCpus': '0-3',
+ 'Devices': [{'PathOnHost': '/dev/sda',
+ 'CgroupPermissions': 'rwm',
+ 'PathInContainer': '/dev/xvda'}],
+ 'Dns': ['8.8.8.8'],
+ 'DnsOptions': ['foo'],
+ 'DnsSearch': ['example.com'],
+ 'ExtraHosts': ['foo:1.2.3.4'],
+ 'GroupAdd': ['blah'],
+ 'IpcMode': 'foo',
+ 'KernelMemory': 123,
+ 'Links': ['foo:bar'],
+ 'LogConfig': {'Type': 'json-file', 'Config': {}},
+ 'LxcConf': [{'Key': 'foo', 'Value': 'bar'}],
+ 'Memory': 123,
+ 'MemoryReservation': 123,
+ 'MemorySwap': 456,
+ 'MemorySwappiness': 2,
+ 'NetworkMode': 'blah',
+ 'OomKillDisable': True,
+ 'OomScoreAdj': 5,
+ 'PidMode': 'host',
+ 'PidsLimit': 500,
+ 'PortBindings': {
+ '1111/tcp': [{'HostIp': '', 'HostPort': '4567'}],
+ '2222/tcp': [{'HostIp': '', 'HostPort': ''}]
+ },
+ 'Privileged': True,
+ 'PublishAllPorts': True,
+ 'ReadonlyRootfs': True,
+ 'RestartPolicy': {'Name': 'always'},
+ 'SecurityOpt': ['blah'],
+ 'ShmSize': 123,
+ 'Sysctls': {'foo': 'bar'},
+ 'Tmpfs': {'/blah': ''},
+ 'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
+ 'UsernsMode': 'host',
+ 'VolumesFrom': ['container'],
+ },
+ healthcheck={'test': 'true'},
+ hostname='somehost',
+ labels={'key': 'value'},
+ mac_address='abc123',
+ name='somename',
+ network_disabled=False,
+ networking_config={'foo': None},
+ ports=[('1111', 'tcp'), ('2222', 'tcp')],
+ stdin_open=True,
+ stop_signal=9,
+ tty=True,
+ user='bob',
+ volume_driver='some_driver',
+ volumes=['/home/user1/', '/var/www'],
+ working_dir='/code'
+ )
+
+ assert create_kwargs == expected
+
+ def test_run_detach(self):
+ client = make_fake_client()
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='sleep 300',
+ detach=True,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_run_pull(self):
+ client = make_fake_client()
+
+ # raise exception on first call, then return normal value
+ client.api.create_container.side_effect = [
+ docker.errors.ImageNotFound(""),
+ client.api.create_container.return_value
+ ]
+
+ container = client.containers.run('alpine', 'sleep 300', detach=True)
+
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.pull.assert_called_with('alpine')
+
+ def test_run_with_error(self):
+ client = make_fake_client()
+ client.api.logs.return_value = "some error"
+ client.api.wait.return_value = 1
+
+ with self.assertRaises(docker.errors.ContainerError) as cm:
+ client.containers.run('alpine', 'echo hello world')
+ assert cm.exception.exit_status == 1
+ assert "some error" in str(cm.exception)
+
+ def test_run_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.run(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ detach=False,
+ host_config={
+ 'NetworkMode': 'default',
+ }
+ )
+
+ def test_run_remove(self):
+ client = make_fake_client()
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.api.wait.return_value = 1
+ with self.assertRaises(docker.errors.ContainerError):
+ client.containers.run("alpine")
+ client.api.remove_container.assert_not_called()
+
+ client = make_fake_client()
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ client.api.wait.return_value = 1
+ with self.assertRaises(docker.errors.ContainerError):
+ client.containers.run("alpine", remove=True)
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ client = make_fake_client()
+ with self.assertRaises(RuntimeError):
+ client.containers.run("alpine", detach=True, remove=True)
+
+ def test_create(self):
+ client = make_fake_client()
+ container = client.containers.create(
+ 'alpine',
+ 'echo hello world',
+ environment={'FOO': 'BAR'}
+ )
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.create_container.assert_called_with(
+ image='alpine',
+ command='echo hello world',
+ environment={'FOO': 'BAR'},
+ host_config={'NetworkMode': 'default'}
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_create_with_image_object(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.containers.create(image)
+ client.api.create_container.assert_called_with(
+ image=image.id,
+ command=None,
+ host_config={'NetworkMode': 'default'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert isinstance(container, Container)
+ assert container.id == FAKE_CONTAINER_ID
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ containers = client.containers.list(all=True)
+ client.api.containers.assert_called_with(
+ all=True,
+ before=None,
+ filters=None,
+ limit=-1,
+ since=None
+ )
+ client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
+ assert len(containers) == 1
+ assert isinstance(containers[0], Container)
+ assert containers[0].id == FAKE_CONTAINER_ID
+
+
+class ContainerTest(unittest.TestCase):
+ def test_name(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.name == 'foobar'
+
+ def test_status(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ assert container.status == "running"
+
+ def test_attach(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attach(stream=True)
+ client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True)
+
+ def test_commit(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ image = container.commit()
+ client.api.commit.assert_called_with(FAKE_CONTAINER_ID,
+ repository=None,
+ tag=None)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_diff(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.diff()
+ client.api.diff.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_exec_run(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.exec_run("echo hello world", privileged=True, stream=True)
+ client.api.exec_create.assert_called_with(
+ FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
+ stdin=False, tty=False, privileged=True, user=''
+ )
+ client.api.exec_start.assert_called_with(
+ FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
+ )
+
+ def test_export(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.export()
+ client.api.export.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_get_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.get_archive('foo')
+ client.api.get_archive.assert_called_with(FAKE_CONTAINER_ID, 'foo')
+
+ def test_kill(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.kill(signal=5)
+ client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5)
+
+ def test_logs(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.logs()
+ client.api.logs.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_pause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.pause()
+ client.api.pause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_put_archive(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.put_archive('path', 'foo')
+ client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID,
+ 'path', 'foo')
+
+ def test_remove(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.remove()
+ client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_rename(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.rename("foo")
+ client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo")
+
+ def test_resize(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.resize(1, 2)
+ client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2)
+
+ def test_restart(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.restart()
+ client.api.restart.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_start(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.start()
+ client.api.start.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stats(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stats()
+ client.api.stats.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_stop(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.stop()
+ client.api.stop.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_top(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.top()
+ client.api.top.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_unpause(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.unpause()
+ client.api.unpause.assert_called_with(FAKE_CONTAINER_ID)
+
+ def test_update(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.update(cpu_shares=2)
+ client.api.update_container.assert_called_with(FAKE_CONTAINER_ID,
+ cpu_shares=2)
+
+ def test_wait(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.wait()
+ client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py
new file mode 100644
index 0000000..392c58d
--- /dev/null
+++ b/tests/unit/models_images_test.py
@@ -0,0 +1,102 @@
+from docker.models.images import Image
+import unittest
+
+from .fake_api import FAKE_IMAGE_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+ def test_build(self):
+ client = make_fake_client()
+ image = client.images.build()
+ client.api.build.assert_called_with()
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_get(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_list(self):
+ client = make_fake_client()
+ images = client.images.list(all=True)
+ client.api.images.assert_called_with(all=True, name=None, filters=None)
+ assert len(images) == 1
+ assert isinstance(images[0], Image)
+ assert images[0].id == FAKE_IMAGE_ID
+
+ def test_load(self):
+ client = make_fake_client()
+ client.images.load('byte stream')
+ client.api.load_image.assert_called_with('byte stream')
+
+ def test_pull(self):
+ client = make_fake_client()
+ image = client.images.pull('test_image')
+ client.api.pull.assert_called_with('test_image')
+ client.api.inspect_image.assert_called_with('test_image')
+ assert isinstance(image, Image)
+ assert image.id == FAKE_IMAGE_ID
+
+ def test_push(self):
+ client = make_fake_client()
+ client.images.push('foobar', insecure_registry=True)
+ client.api.push.assert_called_with(
+ 'foobar',
+ tag=None,
+ insecure_registry=True
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ client.images.remove('test_image')
+ client.api.remove_image.assert_called_with('test_image')
+
+ def test_search(self):
+ client = make_fake_client()
+ client.images.search('test')
+ client.api.search.assert_called_with('test')
+
+
+class ImageTest(unittest.TestCase):
+ def test_short_id(self):
+ image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'sha256:b684607067'
+
+ image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
+ 'f69d65a6f73ef6262057ad0a15dcd'})
+ assert image.short_id == 'b684607067'
+
+ def test_tags(self):
+ image = Image(attrs={
+ 'RepoTags': ['test_image:latest']
+ })
+ assert image.tags == ['test_image:latest']
+
+ image = Image(attrs={
+ 'RepoTags': ['<none>:<none>']
+ })
+ assert image.tags == []
+
+ def test_history(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.history()
+ client.api.history.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_save(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.save()
+ client.api.get_image.assert_called_with(FAKE_IMAGE_ID)
+
+ def test_tag(self):
+ client = make_fake_client()
+ image = client.images.get(FAKE_IMAGE_ID)
+ image.tag('foo')
+ client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py
new file mode 100644
index 0000000..943b904
--- /dev/null
+++ b/tests/unit/models_networks_test.py
@@ -0,0 +1,64 @@
+import unittest
+
+from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ImageCollectionTest(unittest.TestCase):
+
+ def test_create(self):
+ client = make_fake_client()
+ network = client.networks.create("foobar", labels={'foo': 'bar'})
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+ assert client.api.create_network.called_once_with(
+ "foobar",
+ labels={'foo': 'bar'}
+ )
+
+ def test_get(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ assert network.id == FAKE_NETWORK_ID
+ assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
+
+ def test_list(self):
+ client = make_fake_client()
+ networks = client.networks.list()
+ assert networks[0].id == FAKE_NETWORK_ID
+ assert client.api.networks.called_once_with()
+
+ client = make_fake_client()
+ client.networks.list(ids=["abc"])
+ assert client.api.networks.called_once_with(ids=["abc"])
+
+ client = make_fake_client()
+ client.networks.list(names=["foobar"])
+ assert client.api.networks.called_once_with(names=["foobar"])
+
+
+class ImageTest(unittest.TestCase):
+
+ def test_connect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.connect(FAKE_CONTAINER_ID)
+ assert client.api.connect_container_to_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_disconnect(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.disconnect(FAKE_CONTAINER_ID)
+ assert client.api.disconnect_container_from_network.called_once_with(
+ FAKE_CONTAINER_ID,
+ FAKE_NETWORK_ID
+ )
+
+ def test_remove(self):
+ client = make_fake_client()
+ network = client.networks.get(FAKE_NETWORK_ID)
+ network.remove()
+ assert client.api.remove_network.called_once_with(FAKE_NETWORK_ID)
diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py
new file mode 100644
index 0000000..25c6a3e
--- /dev/null
+++ b/tests/unit/models_resources_test.py
@@ -0,0 +1,14 @@
+import unittest
+
+from .fake_api import FAKE_CONTAINER_ID
+from .fake_api_client import make_fake_client
+
+
+class ModelTest(unittest.TestCase):
+ def test_reload(self):
+ client = make_fake_client()
+ container = client.containers.get(FAKE_CONTAINER_ID)
+ container.attrs['Name'] = "oldname"
+ container.reload()
+ assert client.api.inspect_container.call_count == 2
+ assert container.attrs['Name'] == "foobar"
diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py
new file mode 100644
index 0000000..c3b63ae
--- /dev/null
+++ b/tests/unit/models_services_test.py
@@ -0,0 +1,52 @@
+import unittest
+from docker.models.services import _get_create_service_kwargs
+
+
+class CreateServiceKwargsTest(unittest.TestCase):
+ def test_get_create_service_kwargs(self):
+ kwargs = _get_create_service_kwargs('test', {
+ 'image': 'foo',
+ 'command': 'true',
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ 'container_labels': {'containerkey': 'containervalue'},
+ 'resources': {'foo': 'bar'},
+ 'restart_policy': {'restart': 'policy'},
+ 'log_driver': 'logdriver',
+ 'log_driver_options': {'foo': 'bar'},
+ 'args': ['some', 'args'],
+ 'env': {'FOO': 'bar'},
+ 'workdir': '/',
+ 'user': 'bob',
+ 'mounts': [{'some': 'mounts'}],
+ 'stop_grace_period': 5,
+ 'constraints': ['foo=bar'],
+ })
+
+ task_template = kwargs.pop('task_template')
+
+ assert kwargs == {
+ 'name': 'somename',
+ 'labels': {'key': 'value'},
+ 'mode': 'global',
+ 'update_config': {'update': 'config'},
+ 'networks': ['somenet'],
+ 'endpoint_spec': {'blah': 'blah'},
+ }
+ assert set(task_template.keys()) == set([
+ 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
+ 'LogDriver'
+ ])
+ assert task_template['Placement'] == {'Constraints': ['foo=bar']}
+ assert task_template['LogDriver'] == {
+ 'Name': 'logdriver',
+ 'Options': {'foo': 'bar'}
+ }
+ assert set(task_template['ContainerSpec'].keys()) == set([
+ 'Image', 'Command', 'Args', 'Env', 'Dir', 'User', 'Labels',
+ 'Mounts', 'StopGracePeriod'
+ ])
diff --git a/tests/unit/ssladapter_test.py b/tests/unit/ssladapter_test.py
index 2ad1cad..2b7ce52 100644
--- a/tests/unit/ssladapter_test.py
+++ b/tests/unit/ssladapter_test.py
@@ -1,4 +1,5 @@
-from docker.ssladapter import ssladapter
+import unittest
+from docker.transport import ssladapter
try:
from backports.ssl_match_hostname import (
@@ -16,19 +17,18 @@ except ImportError:
OP_NO_SSLv3 = 0x2000000
OP_NO_TLSv1 = 0x4000000
-from .. import base
-
-class SSLAdapterTest(base.BaseTestCase):
+class SSLAdapterTest(unittest.TestCase):
def test_only_uses_tls(self):
ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
assert ssl_context.options & OP_NO_SSLv3
- assert ssl_context.options & OP_NO_SSLv2
+ # if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
+ assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
assert not ssl_context.options & OP_NO_TLSv1
-class MatchHostnameTest(base.BaseTestCase):
+class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
(('countryName', u'US'),),
diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py
index 39d4ec4..374f8b2 100644
--- a/tests/unit/swarm_test.py
+++ b/tests/unit/swarm_test.py
@@ -4,10 +4,10 @@ import json
from . import fake_api
from ..helpers import requires_api_version
-from .api_test import (DockerClientTest, url_prefix, fake_request)
+from .api_test import BaseAPIClientTest, url_prefix, fake_request
-class SwarmTest(DockerClientTest):
+class SwarmTest(BaseAPIClientTest):
@requires_api_version('1.24')
def test_node_update(self):
node_spec = {
diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py
new file mode 100644
index 0000000..f7aefd0
--- /dev/null
+++ b/tests/unit/utils_json_stream_test.py
@@ -0,0 +1,62 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
+
+
+class TestJsonSplitter(object):
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText(object):
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = stream_as_text(stream)
+ assert output == '���'
+
+ def test_stream_with_utf_character(self):
+ stream = ['ěĝ'.encode('utf-8')]
+ output, = stream_as_text(stream)
+ assert output == 'ěĝ'
+
+
+class TestJsonStream(object):
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 290874f..743d076 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -8,27 +8,25 @@ import shutil
import sys
import tarfile
import tempfile
+import unittest
import pytest
import six
-from docker.client import Client
-from docker.constants import (
- DEFAULT_DOCKER_API_VERSION, IS_WINDOWS_PLATFORM
-)
-from docker.errors import DockerException, InvalidVersion
+from docker.api.client import APIClient
+from docker.constants import IS_WINDOWS_PLATFORM
+from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
- create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
- exclude_paths, convert_volume_binds, decode_json_header, tar,
- split_command, create_ipam_config, create_ipam_pool, parse_devices,
- update_headers
+ parse_bytes, parse_env_file, exclude_paths, convert_volume_binds,
+ decode_json_header, tar, split_command, parse_devices, update_headers,
)
from docker.utils.ports import build_port_bindings, split_port
-from docker.utils.utils import create_endpoint_config, format_environment
+from docker.utils.utils import (
+ format_environment, should_check_directory
+)
-from .. import base
from ..helpers import make_tree
@@ -38,7 +36,7 @@ TEST_CERT_DIR = os.path.join(
)
-class DecoratorsTest(base.BaseTestCase):
+class DecoratorsTest(unittest.TestCase):
def test_update_headers(self):
sample_headers = {
'X-Docker-Locale': 'en-US',
@@ -47,7 +45,7 @@ class DecoratorsTest(base.BaseTestCase):
def f(self, headers=None):
return headers
- client = Client()
+ client = APIClient()
client._auth_configs = {}
g = update_headers(f)
@@ -69,204 +67,7 @@ class DecoratorsTest(base.BaseTestCase):
}
-class HostConfigTest(base.BaseTestCase):
- def test_create_host_config_no_options(self):
- config = create_host_config(version='1.19')
- self.assertFalse('NetworkMode' in config)
-
- def test_create_host_config_no_options_newer_api_version(self):
- config = create_host_config(version='1.20')
- self.assertEqual(config['NetworkMode'], 'default')
-
- def test_create_host_config_invalid_cpu_cfs_types(self):
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period='0')
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_quota=23.11)
-
- with pytest.raises(TypeError):
- create_host_config(version='1.20', cpu_period=1999.0)
-
- def test_create_host_config_with_cpu_quota(self):
- config = create_host_config(version='1.20', cpu_quota=1999)
- self.assertEqual(config.get('CpuQuota'), 1999)
-
- def test_create_host_config_with_cpu_period(self):
- config = create_host_config(version='1.20', cpu_period=1999)
- self.assertEqual(config.get('CpuPeriod'), 1999)
-
- def test_create_host_config_with_blkio_constraints(self):
- blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
- config = create_host_config(version='1.22',
- blkio_weight=1999,
- blkio_weight_device=blkio_rate,
- device_read_bps=blkio_rate,
- device_write_bps=blkio_rate,
- device_read_iops=blkio_rate,
- device_write_iops=blkio_rate)
-
- self.assertEqual(config.get('BlkioWeight'), 1999)
- self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
- self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
- self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
- self.assertEqual(blkio_rate[0]['Rate'], 1000)
-
- def test_create_host_config_with_shm_size(self):
- config = create_host_config(version='1.22', shm_size=67108864)
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_shm_size_in_mb(self):
- config = create_host_config(version='1.22', shm_size='64M')
- self.assertEqual(config.get('ShmSize'), 67108864)
-
- def test_create_host_config_with_oom_kill_disable(self):
- config = create_host_config(version='1.20', oom_kill_disable=True)
- self.assertEqual(config.get('OomKillDisable'), True)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.18.3',
- oom_kill_disable=True))
-
- def test_create_host_config_with_userns_mode(self):
- config = create_host_config(version='1.23', userns_mode='host')
- self.assertEqual(config.get('UsernsMode'), 'host')
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.22',
- userns_mode='host'))
- self.assertRaises(
- ValueError, lambda: create_host_config(version='1.23',
- userns_mode='host12'))
-
- def test_create_host_config_with_oom_score_adj(self):
- config = create_host_config(version='1.22', oom_score_adj=100)
- self.assertEqual(config.get('OomScoreAdj'), 100)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.21',
- oom_score_adj=100))
- self.assertRaises(
- TypeError, lambda: create_host_config(version='1.22',
- oom_score_adj='100'))
-
- def test_create_host_config_with_dns_opt(self):
-
- tested_opts = ['use-vc', 'no-tld-query']
- config = create_host_config(version='1.21', dns_opt=tested_opts)
- dns_opts = config.get('DnsOptions')
-
- self.assertTrue('use-vc' in dns_opts)
- self.assertTrue('no-tld-query' in dns_opts)
-
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(version='1.20',
- dns_opt=tested_opts))
-
- def test_create_endpoint_config_with_aliases(self):
- config = create_endpoint_config(version='1.22', aliases=['foo', 'bar'])
- assert config == {'Aliases': ['foo', 'bar']}
-
- with pytest.raises(InvalidVersion):
- create_endpoint_config(version='1.21', aliases=['foo', 'bar'])
-
- def test_create_host_config_with_mem_reservation(self):
- config = create_host_config(version='1.21', mem_reservation=67108864)
- self.assertEqual(config.get('MemoryReservation'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', mem_reservation=67108864))
-
- def test_create_host_config_with_kernel_memory(self):
- config = create_host_config(version='1.21', kernel_memory=67108864)
- self.assertEqual(config.get('KernelMemory'), 67108864)
- self.assertRaises(
- InvalidVersion, lambda: create_host_config(
- version='1.20', kernel_memory=67108864))
-
- def test_create_host_config_with_pids_limit(self):
- config = create_host_config(version='1.23', pids_limit=1024)
- self.assertEqual(config.get('PidsLimit'), 1024)
-
- with pytest.raises(InvalidVersion):
- create_host_config(version='1.22', pids_limit=1024)
- with pytest.raises(TypeError):
- create_host_config(version='1.22', pids_limit='1024')
-
-
-class UlimitTest(base.BaseTestCase):
- def test_create_host_config_dict_ulimit(self):
- ulimit_dct = {'name': 'nofile', 'soft': 8096}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_dict_ulimit_capitals(self):
- ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
- self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_obj_ulimit(self):
- ulimit_dct = Ulimit(name='nofile', soft=8096)
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj, ulimit_dct)
-
- def test_ulimit_invalid_type(self):
- self.assertRaises(ValueError, lambda: Ulimit(name=None))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
-
-
-class LogConfigTest(base.BaseTestCase):
- def test_create_host_config_dict_logconfig(self):
- dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=dct
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(dct['type'], config['LogConfig'].type)
-
- def test_create_host_config_obj_logconfig(self):
- obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=obj
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(obj, config['LogConfig'])
-
- def test_logconfig_invalid_config_type(self):
- with pytest.raises(ValueError):
- LogConfig(type=LogConfig.types.JSON, config='helloworld')
-
-
-class KwargsFromEnvTest(base.BaseTestCase):
+class KwargsFromEnvTest(unittest.TestCase):
def setUp(self):
self.os_environ = os.environ.copy()
@@ -294,7 +95,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
self.assertEqual(False, kwargs['tls'].assert_hostname)
self.assertTrue(kwargs['tls'].verify)
try:
- client = Client(**kwargs)
+ client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].ca_cert, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
@@ -313,7 +114,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
self.assertEqual(True, kwargs['tls'].assert_hostname)
self.assertEqual(False, kwargs['tls'].verify)
try:
- client = Client(**kwargs)
+ client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].cert, client.cert)
self.assertFalse(kwargs['tls'].verify)
@@ -366,7 +167,7 @@ class KwargsFromEnvTest(base.BaseTestCase):
assert 'tls' not in kwargs
-class ConverVolumeBindsTest(base.BaseTestCase):
+class ConverVolumeBindsTest(unittest.TestCase):
def test_convert_volume_binds_empty(self):
self.assertEqual(convert_volume_binds({}), [])
self.assertEqual(convert_volume_binds([]), [])
@@ -425,7 +226,7 @@ class ConverVolumeBindsTest(base.BaseTestCase):
)
-class ParseEnvFileTest(base.BaseTestCase):
+class ParseEnvFileTest(unittest.TestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
@@ -456,10 +257,18 @@ class ParseEnvFileTest(base.BaseTestCase):
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
- get_parse_env_file = parse_env_file((env_file))
+ get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
os.unlink(env_file)
+ def test_parse_env_file_newline(self):
+ env_file = self.generate_tempfile(
+ file_content='\nUSER=jdoe\n\n\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ self.assertEqual(get_parse_env_file,
+ {'USER': 'jdoe', 'PASS': 'secret'})
+ os.unlink(env_file)
+
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
@@ -468,7 +277,7 @@ class ParseEnvFileTest(base.BaseTestCase):
os.unlink(env_file)
-class ParseHostTest(base.BaseTestCase):
+class ParseHostTest(unittest.TestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
@@ -530,7 +339,7 @@ class ParseHostTest(base.BaseTestCase):
assert parse_host(host_value) == expected_result
-class ParseRepositoryTagTest(base.BaseTestCase):
+class ParseRepositoryTagTest(unittest.TestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
@@ -576,7 +385,7 @@ class ParseRepositoryTagTest(base.BaseTestCase):
)
-class ParseDeviceTest(base.BaseTestCase):
+class ParseDeviceTest(unittest.TestCase):
def test_dict(self):
devices = parse_devices([{
'PathOnHost': '/dev/sda1',
@@ -635,7 +444,7 @@ class ParseDeviceTest(base.BaseTestCase):
})
-class ParseBytesTest(base.BaseTestCase):
+class ParseBytesTest(unittest.TestCase):
def test_parse_bytes_valid(self):
self.assertEqual(parse_bytes("512MB"), 536870912)
self.assertEqual(parse_bytes("512M"), 536870912)
@@ -655,7 +464,7 @@ class ParseBytesTest(base.BaseTestCase):
)
-class UtilsTest(base.BaseTestCase):
+class UtilsTest(unittest.TestCase):
longMessage = True
def test_convert_filters(self):
@@ -679,23 +488,8 @@ class UtilsTest(base.BaseTestCase):
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
- def test_create_ipam_config(self):
- ipam_pool = create_ipam_pool(subnet='192.168.52.0/24',
- gateway='192.168.52.254')
-
- ipam_config = create_ipam_config(pool_configs=[ipam_pool])
- self.assertEqual(ipam_config, {
- 'Driver': 'default',
- 'Config': [{
- 'Subnet': '192.168.52.0/24',
- 'Gateway': '192.168.52.254',
- 'AuxiliaryAddresses': None,
- 'IPRange': None,
- }]
- })
-
-class SplitCommandTest(base.BaseTestCase):
+class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
@@ -704,7 +498,7 @@ class SplitCommandTest(base.BaseTestCase):
self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
-class PortsTest(base.BaseTestCase):
+class PortsTest(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
@@ -817,7 +611,7 @@ def convert_paths(collection):
return set(map(lambda x: x.replace('/', '\\'), collection))
-class ExcludePathsTest(base.BaseTestCase):
+class ExcludePathsTest(unittest.TestCase):
dirs = [
'foo',
'foo/bar',
@@ -999,7 +793,7 @@ class ExcludePathsTest(base.BaseTestCase):
)
-class TarTest(base.Cleanup, base.BaseTestCase):
+class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
dirs = [
'foo',
@@ -1083,7 +877,79 @@ class TarTest(base.Cleanup, base.BaseTestCase):
)
-class FormatEnvironmentTest(base.BaseTestCase):
+class ShouldCheckDirectoryTest(unittest.TestCase):
+ exclude_patterns = [
+ 'exclude_rather_large_directory',
+ 'dir/with/subdir_excluded',
+ 'dir/with/exceptions'
+ ]
+
+ include_patterns = [
+ 'dir/with/exceptions/like_this_one',
+ 'dir/with/exceptions/in/descendents'
+ ]
+
+ def test_should_check_directory_not_excluded(self):
+ self.assertTrue(
+ should_check_directory('not_excluded', self.exclude_patterns,
+ self.include_patterns)
+ )
+
+ self.assertTrue(
+ should_check_directory('dir/with', self.exclude_patterns,
+ self.include_patterns)
+ )
+
+ def test_shoud_check_parent_directories_of_excluded(self):
+ self.assertTrue(
+ should_check_directory('dir', self.exclude_patterns,
+ self.include_patterns)
+ )
+ self.assertTrue(
+ should_check_directory('dir/with', self.exclude_patterns,
+ self.include_patterns)
+ )
+
+ def test_should_not_check_excluded_directories_with_no_exceptions(self):
+ self.assertFalse(
+ should_check_directory('exclude_rather_large_directory',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+ self.assertFalse(
+ should_check_directory('dir/with/subdir_excluded',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+
+ def test_should_check_excluded_directory_with_exceptions(self):
+ self.assertTrue(
+ should_check_directory('dir/with/exceptions',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+ self.assertTrue(
+ should_check_directory('dir/with/exceptions/in',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+
+ def test_should_not_check_siblings_of_exceptions(self):
+ self.assertFalse(
+ should_check_directory('dir/with/exceptions/but_not_here',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+
+ def test_should_check_subdirectories_of_exceptions(self):
+ self.assertTrue(
+ should_check_directory('dir/with/exceptions/like_this_one/subdir',
+ self.exclude_patterns, self.include_patterns
+ )
+ )
+
+
+class FormatEnvironmentTest(unittest.TestCase):
def test_format_env_binary_unicode_value(self):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
diff --git a/tox.ini b/tox.ini
index be4508e..1a41c6e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26, py27, py33, py34, py35, flake8
+envlist = py27, py33, py34, py35, flake8
skipsdist=True
[testenv]