diff options
-rw-r--r-- | docker/api/client.py | 6 | ||||
-rw-r--r-- | docker/api/container.py | 17 | ||||
-rw-r--r-- | docker/api/image.py | 8 | ||||
-rw-r--r-- | docker/api/service.py | 5 | ||||
-rw-r--r-- | docker/auth.py | 5 | ||||
-rw-r--r-- | docker/constants.py | 1 | ||||
-rw-r--r-- | docker/models/containers.py | 20 | ||||
-rw-r--r-- | docker/models/images.py | 14 | ||||
-rw-r--r-- | docker/models/services.py | 15 | ||||
-rw-r--r-- | docker/types/containers.py | 12 | ||||
-rw-r--r-- | docker/types/services.py | 37 | ||||
-rw-r--r-- | docker/utils/build.py | 201 | ||||
-rw-r--r-- | docker/version.py | 2 | ||||
-rw-r--r-- | docs/change-log.md | 30 | ||||
-rw-r--r-- | requirements.txt | 2 | ||||
-rw-r--r-- | setup.py | 2 | ||||
-rw-r--r-- | tests/integration/api_container_test.py | 15 | ||||
-rw-r--r-- | tests/integration/api_service_test.py | 52 | ||||
-rw-r--r-- | tests/integration/models_containers_test.py | 3 | ||||
-rw-r--r-- | tests/integration/models_images_test.py | 9 | ||||
-rw-r--r-- | tests/integration/models_services_test.py | 41 | ||||
-rw-r--r-- | tests/unit/api_test.py | 14 | ||||
-rw-r--r-- | tests/unit/auth_test.py | 13 | ||||
-rw-r--r-- | tests/unit/models_containers_test.py | 9 | ||||
-rw-r--r-- | tests/unit/models_images_test.py | 5 | ||||
-rw-r--r-- | tests/unit/utils_test.py | 91 |
26 files changed, 415 insertions, 214 deletions
diff --git a/docker/api/client.py b/docker/api/client.py index e69d143..bddab61 100644 --- a/docker/api/client.py +++ b/docker/api/client.py @@ -350,10 +350,10 @@ class APIClient( break yield data - def _stream_raw_result(self, response): - ''' Stream result for TTY-enabled container ''' + def _stream_raw_result(self, response, chunk_size=1, decode=True): + ''' Stream result for TTY-enabled container and raw binary data''' self._raise_for_status(response) - for out in response.iter_content(chunk_size=1, decode_unicode=True): + for out in response.iter_content(chunk_size, decode): yield out def _read_from_socket(self, response, stream, tty=False): diff --git a/docker/api/container.py b/docker/api/container.py index 962d8cb..f8d52de 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -3,6 +3,7 @@ from datetime import datetime from .. import errors from .. import utils +from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..types import ( ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig ) @@ -438,6 +439,8 @@ class ContainerApiMixin(object): ``0,1``). cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (``0-3``, ``0,1``). Only effective on NUMA systems. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. device_read_bps: Limit read rate (bytes per second) from a device in the form of: `[{"Path": "device_path", "Rate": rate}]` device_read_iops: Limit read rate (IO per second) from a device. @@ -643,12 +646,15 @@ class ContainerApiMixin(object): ) @utils.check_resource('container') - def export(self, container): + def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Export the contents of a filesystem as a tar archive. Args: container (str): The container to export + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB Returns: (generator): The archived filesystem data stream @@ -660,10 +666,10 @@ class ContainerApiMixin(object): res = self._get( self._url("/containers/{0}/export", container), stream=True ) - return self._stream_raw_result(res) + return self._stream_raw_result(res, chunk_size, False) @utils.check_resource('container') - def get_archive(self, container, path): + def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Retrieve a file or folder from a container in the form of a tar archive. @@ -671,6 +677,9 @@ class ContainerApiMixin(object): Args: container (str): The container where the file is located path (str): Path to the file or folder to retrieve + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is @@ -688,7 +697,7 @@ class ContainerApiMixin(object): self._raise_for_status(res) encoded_stat = res.headers.get('x-docker-container-path-stat') return ( - self._stream_raw_result(res), + self._stream_raw_result(res, chunk_size, False), utils.decode_json_header(encoded_stat) if encoded_stat else None ) diff --git a/docker/api/image.py b/docker/api/image.py index fa832a3..3ebca32 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -4,6 +4,7 @@ import os import six from .. import auth, errors, utils +from ..constants import DEFAULT_DATA_CHUNK_SIZE log = logging.getLogger(__name__) @@ -11,12 +12,15 @@ log = logging.getLogger(__name__) class ImageApiMixin(object): @utils.check_resource('image') - def get_image(self, image): + def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Get a tarball of an image. Similar to the ``docker save`` command. Args: image (str): Image name to get + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB Returns: (generator): A stream of raw archive data. @@ -34,7 +38,7 @@ class ImageApiMixin(object): >>> f.close() """ res = self._get(self._url("/images/{0}/get", image), stream=True) - return self._stream_raw_result(res) + return self._stream_raw_result(res, chunk_size, False) @utils.check_resource('image') def history(self, image): diff --git a/docker/api/service.py b/docker/api/service.py index ceae8fc..03b0ca6 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -73,6 +73,11 @@ def _check_api_features(version, task_template, update_config, endpoint_spec): if container_spec.get('Isolation') is not None: raise_version_error('ContainerSpec.isolation', '1.35') + if task_template.get('Resources'): + if utils.version_lt(version, '1.32'): + if task_template['Resources'].get('GenericResources'): + raise_version_error('Resources.generic_resources', '1.32') + def _merge_task_template(current, override): merged = current.copy() diff --git a/docker/auth.py b/docker/auth.py index 91be2b8..48fcd8b 100644 --- a/docker/auth.py +++ b/docker/auth.py @@ -90,9 +90,12 @@ def resolve_authconfig(authconfig, registry=None): log.debug( 'Using credentials store "{0}"'.format(store_name) ) - return _resolve_authconfig_credstore( + cfg = _resolve_authconfig_credstore( authconfig, registry, store_name ) + if cfg is not None: + return cfg + log.debug('No entry in credstore - fetching from auth dict') # Default to the public index server registry = resolve_index_name(registry) if registry else INDEX_NAME diff --git a/docker/constants.py b/docker/constants.py index 9ab3673..7565a76 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -17,3 +17,4 @@ IS_WINDOWS_PLATFORM = (sys.platform == 'win32') DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version) DEFAULT_NUM_POOLS = 25 +DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 diff --git a/docker/models/containers.py b/docker/models/containers.py index 107a020..895080c 100644 --- a/docker/models/containers.py +++ b/docker/models/containers.py @@ -3,6 +3,7 @@ import ntpath from collections import namedtuple from ..api import APIClient +from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..errors import (ContainerError, ImageNotFound, create_unexpected_kwargs_error) from ..types import HostConfig @@ -181,10 +182,15 @@ class Container(Model): exec_output ) - def export(self): + def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Export the contents of the container's filesystem as a tar archive. + Args: + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + Returns: (str): The filesystem tar archive @@ -192,15 +198,18 @@ class Container(Model): :py:class:`docker.errors.APIError` If the server returns an error. """ - return self.client.api.export(self.id) + return self.client.api.export(self.id, chunk_size) - def get_archive(self, path): + def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Retrieve a file or folder from the container in the form of a tar archive. Args: path (str): Path to the file or folder to retrieve + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is @@ -210,7 +219,7 @@ class Container(Model): :py:class:`docker.errors.APIError` If the server returns an error. """ - return self.client.api.get_archive(self.id, path) + return self.client.api.get_archive(self.id, path, chunk_size) def kill(self, signal=None): """ @@ -515,6 +524,8 @@ class ContainerCollection(Collection): (``0-3``, ``0,1``). Only effective on NUMA systems. detach (bool): Run container in the background and return a :py:class:`Container` object. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. device_read_bps: Limit read rate (bytes per second) from a device in the form of: `[{"Path": "device_path", "Rate": rate}]` device_read_iops: Limit read rate (IO per second) from a device. @@ -912,6 +923,7 @@ RUN_HOST_CONFIG_KWARGS = [ 'cpuset_mems', 'cpu_rt_period', 'cpu_rt_runtime', + 'device_cgroup_rules', 'device_read_bps', 'device_read_iops', 'device_write_bps', diff --git a/docker/models/images.py b/docker/models/images.py index 0f3c71a..58d5d93 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -4,6 +4,7 @@ import re import six from ..api import APIClient +from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..errors import BuildError, ImageLoadError from ..utils import parse_repository_tag from ..utils.json_stream import json_stream @@ -58,10 +59,15 @@ class Image(Model): """ return self.client.api.history(self.id) - def save(self): + def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Get a tarball of an image. Similar to the ``docker save`` command. + Args: + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + Returns: (generator): A stream of raw archive data. @@ -77,7 +83,7 @@ class Image(Model): >>> f.write(chunk) >>> f.close() """ - return self.client.api.get_image(self.id) + return self.client.api.get_image(self.id, chunk_size) def tag(self, repository, tag=None, **kwargs): """ @@ -308,7 +314,9 @@ class ImageCollection(Collection): self.client.api.pull(repository, tag=tag, **kwargs) if tag: - return self.get('{0}:{1}'.format(repository, tag)) + return self.get('{0}{2}{1}'.format( + repository, tag, '@' if tag.startswith('sha256:') else ':' + )) return self.list(repository) def push(self, repository, tag=None, **kwargs): diff --git a/docker/models/services.py b/docker/models/services.py index 8a633df..125896b 100644 --- a/docker/models/services.py +++ b/docker/models/services.py @@ -69,6 +69,11 @@ class Service(Model): spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec'] kwargs['image'] = spec['Image'] + if kwargs.get('force_update') is True: + task_template = self.attrs['Spec']['TaskTemplate'] + current_value = int(task_template.get('ForceUpdate', 0)) + kwargs['force_update'] = current_value + 1 + create_kwargs = _get_create_service_kwargs('update', kwargs) return self.client.api.update_service( @@ -124,6 +129,16 @@ class Service(Model): service_mode, fetch_current_spec=True) + def force_update(self): + """ + Force update the service even if no changes require it. + + Returns: + ``True``if successful. + """ + + return self.update(force_update=True, fetch_current_spec=True) + class ServiceCollection(Collection): """Services on the Docker server.""" diff --git a/docker/types/containers.py b/docker/types/containers.py index b4a329c..2521420 100644 --- a/docker/types/containers.py +++ b/docker/types/containers.py @@ -120,7 +120,8 @@ class HostConfig(dict): init=None, init_path=None, volume_driver=None, cpu_count=None, cpu_percent=None, nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, - cpu_rt_period=None, cpu_rt_runtime=None): + cpu_rt_period=None, cpu_rt_runtime=None, + device_cgroup_rules=None): if mem_limit is not None: self['Memory'] = parse_bytes(mem_limit) @@ -466,6 +467,15 @@ class HostConfig(dict): raise host_config_version_error('mounts', '1.30') self['Mounts'] = mounts + if device_cgroup_rules is not None: + if version_lt(version, '1.28'): + raise host_config_version_error('device_cgroup_rules', '1.28') + if not isinstance(device_cgroup_rules, list): + raise host_config_type_error( + 'device_cgroup_rules', device_cgroup_rules, 'list' + ) + self['DeviceCgroupRules'] = device_cgroup_rules + def host_config_type_error(param, param_value, expected): error_msg = 'Invalid type for {0} param: expected {1} but found {2}' diff --git a/docker/types/services.py b/docker/types/services.py index d530e61..09eb05e 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -306,9 +306,13 @@ class Resources(dict): mem_limit (int): Memory limit in Bytes. cpu_reservation (int): CPU reservation in units of 10^9 CPU shares. mem_reservation (int): Memory reservation in Bytes. + generic_resources (dict or :py:class:`list`): Node level generic + resources, for example a GPU, using the following format: + ``{ resource_name: resource_value }``. Alternatively, a list of + of resource specifications as defined by the Engine API. """ def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, - mem_reservation=None): + mem_reservation=None, generic_resources=None): limits = {} reservation = {} if cpu_limit is not None: @@ -319,13 +323,42 @@ class Resources(dict): reservation['NanoCPUs'] = cpu_reservation if mem_reservation is not None: reservation['MemoryBytes'] = mem_reservation - + if generic_resources is not None: + reservation['GenericResources'] = ( + _convert_generic_resources_dict(generic_resources) + ) if limits: self['Limits'] = limits if reservation: self['Reservations'] = reservation +def _convert_generic_resources_dict(generic_resources): + if isinstance(generic_resources, list): + return generic_resources + if not isinstance(generic_resources, dict): + raise errors.InvalidArgument( + 'generic_resources must be a dict or a list' + ' (found {})'.format(type(generic_resources)) + ) + resources = [] + for kind, value in six.iteritems(generic_resources): + resource_type = None + if isinstance(value, int): + resource_type = 'DiscreteResourceSpec' + elif isinstance(value, str): + resource_type = 'NamedResourceSpec' + else: + raise errors.InvalidArgument( + 'Unsupported generic resource reservation ' + 'type: {}'.format({kind: value}) + ) + resources.append({ + resource_type: {'Kind': kind, 'Value': value} + }) + return resources + + class UpdateConfig(dict): """ diff --git a/docker/utils/build.py b/docker/utils/build.py index d4223e7..1da56fb 100644 --- a/docker/utils/build.py +++ b/docker/utils/build.py @@ -1,20 +1,24 @@ import os +import re from ..constants import IS_WINDOWS_PLATFORM -from .fnmatch import fnmatch +from fnmatch import fnmatch +from itertools import chain from .utils import create_archive def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False): root = os.path.abspath(path) exclude = exclude or [] - return create_archive( files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)), root=root, fileobj=fileobj, gzip=gzip ) +_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/') + + def exclude_paths(root, patterns, dockerfile=None): """ Given a root directory path and a list of .dockerignore patterns, return @@ -23,121 +27,90 @@ def exclude_paths(root, patterns, dockerfile=None): All paths returned are relative to the root. """ + if dockerfile is None: dockerfile = 'Dockerfile' - patterns = [p.lstrip('/') for p in patterns] - exceptions = [p for p in patterns if p.startswith('!')] - - include_patterns = [p[1:] for p in exceptions] - include_patterns += [dockerfile, '.dockerignore'] - - exclude_patterns = list(set(patterns) - set(exceptions)) - - paths = get_paths(root, exclude_patterns, include_patterns, - has_exceptions=len(exceptions) > 0) - - return set(paths).union( - # If the Dockerfile is in a subdirectory that is excluded, get_paths - # will not descend into it and the file will be skipped. This ensures - # it doesn't happen. - set([dockerfile.replace('/', os.path.sep)]) - if os.path.exists(os.path.join(root, dockerfile)) else set() - ) - - -def should_include(path, exclude_patterns, include_patterns): - """ - Given a path, a list of exclude patterns, and a list of inclusion patterns: - - 1. Returns True if the path doesn't match any exclusion pattern - 2. Returns False if the path matches an exclusion pattern and doesn't match - an inclusion pattern - 3. Returns true if the path matches an exclusion pattern and matches an - inclusion pattern + def normalize(p): + # Leading and trailing slashes are not relevant. Yes, + # "foo.py/" must exclude the "foo.py" regular file. "." + # components are not relevant either, even if the whole + # pattern is only ".", as the Docker reference states: "For + # historical reasons, the pattern . is ignored." + split = [pt for pt in re.split(_SEP, p) if pt and pt != '.'] + # ".." component must be cleared with the potential previous + # component, regardless of whether it exists: "A preprocessing + # step [...] eliminates . and .. elements using Go's + # filepath.". + i = 0 + while i < len(split): + if split[i] == '..': + del split[i] + if i > 0: + del split[i - 1] + i -= 1 + else: + i += 1 + return split + + patterns = ( + (True, normalize(p[1:])) + if p.startswith('!') else + (False, normalize(p)) + for p in patterns) + patterns = list(reversed(list(chain( + # Exclude empty patterns such as "." or the empty string. + filter(lambda p: p[1], patterns), + # Always include the Dockerfile and .dockerignore + [(True, dockerfile.split('/')), (True, ['.dockerignore'])])))) + return set(walk(root, patterns)) + + +def walk(root, patterns, default=True): """ - for pattern in exclude_patterns: - if match_path(path, pattern): - for pattern in include_patterns: - if match_path(path, pattern): - return True - return False - return True - - -def should_check_directory(directory_path, exclude_patterns, include_patterns): + A collection of file lying below root that should be included according to + patterns. """ - Given a directory path, a list of exclude patterns, and a list of inclusion - patterns: - - 1. Returns True if the directory path should be included according to - should_include. - 2. Returns True if the directory path is the prefix for an inclusion - pattern - 3. Returns False otherwise - """ - - # To account for exception rules, check directories if their path is a - # a prefix to an inclusion pattern. This logic conforms with the current - # docker logic (2016-10-27): - # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671 - - def normalize_path(path): - return path.replace(os.path.sep, '/') - - path_with_slash = normalize_path(directory_path) + '/' - possible_child_patterns = [ - pattern for pattern in map(normalize_path, include_patterns) - if (pattern + '/').startswith(path_with_slash) - ] - directory_included = should_include( - directory_path, exclude_patterns, include_patterns - ) - return directory_included or len(possible_child_patterns) > 0 - - -def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False): - paths = [] - - for parent, dirs, files in os.walk(root, topdown=True, followlinks=False): - parent = os.path.relpath(parent, root) - if parent == '.': - parent = '' - - # Remove excluded patterns from the list of directories to traverse - # by mutating the dirs we're iterating over. - # This looks strange, but is considered the correct way to skip - # traversal. See https://docs.python.org/2/library/os.html#os.walk - dirs[:] = [ - d for d in dirs if should_check_directory( - os.path.join(parent, d), exclude_patterns, include_patterns - ) - ] - - for path in dirs: - if should_include(os.path.join(parent, path), - exclude_patterns, include_patterns): - paths.append(os.path.join(parent, path)) - - for path in files: - if should_include(os.path.join(parent, path), - exclude_patterns, include_patterns): - paths.append(os.path.join(parent, path)) - - return paths - - -def match_path(path, pattern): - pattern = pattern.rstrip('/' + os.path.sep) - if pattern: - pattern = os.path.relpath(pattern) - - pattern_components = pattern.split(os.path.sep) - if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM: - pattern_components = pattern.split('/') - if '**' not in pattern: - path_components = path.split(os.path.sep)[:len(pattern_components)] - else: - path_components = path.split(os.path.sep) - return fnmatch('/'.join(path_components), '/'.join(pattern_components)) + def match(p): + if p[1][0] == '**': + rec = (p[0], p[1][1:]) + return [p] + (match(rec) if rec[1] else [rec]) + elif fnmatch(f, p[1][0]): + return [(p[0], p[1][1:])] + else: + return [] + + for f in os.listdir(root): + cur = os.path.join(root, f) + # The patterns if recursing in that directory. + sub = list(chain(*(match(p) for p in patterns))) + # Whether this file is explicitely included / excluded. + hit = next((p[0] for p in sub if not p[1]), None) + # Whether this file is implicitely included / excluded. + matched = default if hit is None else hit + sub = list(filter(lambda p: p[1], sub)) + if os.path.isdir(cur): + # Entirely skip directories if there are no chance any subfile will + # be included. + if all(not p[0] for p in sub) and not matched: + continue + # I think this would greatly speed up dockerignore handling by not + # recursing into directories we are sure would be entirely + # included, and only yielding the directory itself, which will be + # recursively archived anyway. However the current unit test expect + # the full list of subfiles and I'm not 100% sure it would make no + # difference yet. + # if all(p[0] for p in sub) and matched: + # yield f + # continue + children = False + for r in (os.path.join(f, p) for p in walk(cur, sub, matched)): + yield r + children = True + # The current unit tests expect directories only under those + # conditions. It might be simplifiable though. + if (not sub or not children) and hit or hit is None and default: + yield f + elif matched: + yield f diff --git a/docker/version.py b/docker/version.py index 635e84c..c79cf93 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "3.0.1" +version = "3.1.0" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) diff --git a/docs/change-log.md b/docs/change-log.md index 8ae88ef..ceab083 100644 --- a/docs/change-log.md +++ b/docs/change-log.md @@ -1,6 +1,36 @@ Change log ========== +3.1.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/44?closed=1) + +### Features + +* Added support for `device_cgroup_rules` in host config +* Added support for `generic_resources` when creating a `Resources` + object. +* Added support for a configurable `chunk_size` parameter in `export`, + `get_archive` and `get_image` (`Image.save`) +* Added a `force_update` method to the `Service` class. +* In `Service.update`, when the `force_update` parameter is set to `True`, + the current `force_update` counter is incremented by one in the update + request. + +### Bugfixes + +* Fixed a bug where authentication through `login()` was being ignored if the + SDK was configured to use a credential store. +* Fixed a bug where download methods would use an absurdly small chunk size, + leading to slow data retrieval +* Fixed a bug where using `DockerClient.images.pull` to pull an image by digest + would lead to an exception being raised. +* `.dockerignore` rules should now be respected as defined by the spec, + including respect for last-line precedence and proper handling of absolute + paths +* The `pass` credential store is now properly supported. + 3.0.1 ----- diff --git a/requirements.txt b/requirements.txt index 1602750..2b281ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ asn1crypto==0.22.0 backports.ssl-match-hostname==3.5.0.1 cffi==1.10.0 cryptography==1.9 -docker-pycreds==0.2.1 +docker-pycreds==0.2.2 enum34==1.1.6 idna==2.5 ipaddress==1.0.18 @@ -23,7 +23,7 @@ requirements = [ 'requests >= 2.14.2, != 2.18.0', 'six >= 1.4.0', 'websocket-client >= 0.32.0', - 'docker-pycreds >= 0.2.1' + 'docker-pycreds >= 0.2.2' ] extras_require = { diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py index 01780a7..8447aa5 100644 --- a/tests/integration/api_container_test.py +++ b/tests/integration/api_container_test.py @@ -474,6 +474,21 @@ class CreateContainerTest(BaseAPIIntegrationTest): assert config['HostConfig']['CpuRealtimeRuntime'] == 500 assert config['HostConfig']['CpuRealtimePeriod'] == 1000 + @requires_api_version('1.28') + def test_create_with_device_cgroup_rules(self): + rule = 'c 7:128 rwm' + ctnr = self.client.create_container( + BUSYBOX, 'cat /sys/fs/cgroup/devices/devices.list', + host_config=self.client.create_host_config( + device_cgroup_rules=[rule] + ) + ) + self.tmp_containers.append(ctnr) + config = self.client.inspect_container(ctnr) + assert config['HostConfig']['DeviceCgroupRules'] == [rule] + self.client.start(ctnr) + assert rule in self.client.logs(ctnr).decode('utf-8') + class VolumeBindTest(BaseAPIIntegrationTest): def setUp(self): diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py index 5cc3fc1..85f9dcc 100644 --- a/tests/integration/api_service_test.py +++ b/tests/integration/api_service_test.py @@ -4,6 +4,7 @@ import random import time import docker +import pytest import six from ..helpers import ( @@ -212,6 +213,57 @@ class ServiceTest(BaseAPIIntegrationTest): 'Reservations' ] + def _create_service_with_generic_resources(self, generic_resources): + container_spec = docker.types.ContainerSpec(BUSYBOX, ['true']) + + resources = docker.types.Resources( + generic_resources=generic_resources + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, resources=resources + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + return resources, self.client.inspect_service(svc_id) + + @requires_api_version('1.32') + def test_create_service_with_generic_resources(self): + successful = [{ + 'input': [ + {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 1}}, + {'NamedResourceSpec': {'Kind': 'gpu', 'Value': 'test'}} + ]}, { + 'input': {'gpu': 2, 'mpi': 'latest'}, + 'expected': [ + {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 2}}, + {'NamedResourceSpec': {'Kind': 'mpi', 'Value': 'latest'}} + ]} + ] + + for test in successful: + t = test['input'] + resrcs, svc_info = self._create_service_with_generic_resources(t) + + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'Resources' in res_template + res_reservations = res_template['Resources']['Reservations'] + assert res_reservations == resrcs['Reservations'] + assert 'GenericResources' in res_reservations + + def _key(d, specs=('DiscreteResourceSpec', 'NamedResourceSpec')): + return [d.get(s, {}).get('Kind', '') for s in specs] + + actual = res_reservations['GenericResources'] + expected = test.get('expected', test['input']) + assert sorted(actual, key=_key) == sorted(expected, key=_key) + + @requires_api_version('1.32') + def test_create_service_with_invalid_generic_resources(self): + for test_input in ['1', 1.0, lambda: '1', {1, 2}]: + with pytest.raises(docker.errors.InvalidArgument): + self._create_service_with_generic_resources(test_input) + def test_create_service_with_update_config(self): container_spec = docker.types.ContainerSpec(BUSYBOX, ['true']) task_tmpl = docker.types.TaskTemplate(container_spec) diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py index a4d9f9c..f9f59c4 100644 --- a/tests/integration/models_containers_test.py +++ b/tests/integration/models_containers_test.py @@ -55,7 +55,8 @@ class ContainerCollectionTest(BaseIntegrationTest): def test_run_with_named_volume(self): client = docker.from_env(version=TEST_API_VERSION) - client.volumes.create(name="somevolume") + volume = client.volumes.create(name="somevolume") + self.tmp_volumes.append(volume.id) container = client.containers.run( "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'", diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py index 2fa71a7..ae735ba 100644 --- a/tests/integration/models_images_test.py +++ b/tests/integration/models_images_test.py @@ -74,6 +74,15 @@ class ImageCollectionTest(BaseIntegrationTest): image = client.images.pull('alpine', tag='3.3') assert 'alpine:3.3' in image.attrs['RepoTags'] + def test_pull_with_sha(self): + image_ref = ( + 'hello-world@sha256:083de497cff944f969d8499ab94f07134c50bcf5e6b95' + '59b27182d3fa80ce3f7' + ) + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull(image_ref) + assert image_ref in image.attrs['RepoDigests'] + def test_pull_multiple(self): client = docker.from_env(version=TEST_API_VERSION) images = client.images.pull('hello-world') diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py index cb8eca2..36caa85 100644 --- a/tests/integration/models_services_test.py +++ b/tests/integration/models_services_test.py @@ -276,7 +276,7 @@ class ServiceTest(unittest.TestCase): assert spec.get('Command') == ['sleep', '300'] @helpers.requires_api_version('1.25') - def test_restart_service(self): + def test_force_update_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments @@ -286,7 +286,7 @@ class ServiceTest(unittest.TestCase): command="sleep 300" ) initial_version = service.version - service.update( + assert service.update( # create argument name=service.name, # task template argument @@ -296,3 +296,40 @@ class ServiceTest(unittest.TestCase): ) service.reload() assert service.version > initial_version + + @helpers.requires_api_version('1.25') + def test_force_update_service_using_bool(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + initial_version = service.version + assert service.update( + # create argument + name=service.name, + # task template argument + force_update=True, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + assert service.version > initial_version + + @helpers.requires_api_version('1.25') + def test_force_update_service_using_shorthand_method(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + initial_version = service.version + assert service.force_update() + service.reload() + assert service.version > initial_version diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index f65e13e..46cbd68 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -214,13 +214,13 @@ class DockerApiTest(BaseAPIClientTest): def test_login(self): self.client.login('sakuya', 'izayoi') - fake_request.assert_called_with( - 'POST', url_prefix + 'auth', - data=json.dumps({'username': 'sakuya', 'password': 'izayoi'}), - timeout=DEFAULT_TIMEOUT_SECONDS, - headers={'Content-Type': 'application/json'} - ) - + args = fake_request.call_args + assert args[0][0] == 'POST' + assert args[0][1] == url_prefix + 'auth' + assert json.loads(args[1]['data']) == { + 'username': 'sakuya', 'password': 'izayoi' + } + assert args[1]['headers'] == {'Content-Type': 'application/json'} assert self.client._auth_configs['auths'] == { 'docker.io': { 'email': None, diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py index d6981cd..ee32ca0 100644 --- a/tests/unit/auth_test.py +++ b/tests/unit/auth_test.py @@ -210,6 +210,19 @@ class ResolveAuthTest(unittest.TestCase): self.auth_config, auth.resolve_repository_name(image)[0] ) is None + def test_resolve_auth_with_empty_credstore_and_auth_dict(self): + auth_config = { + 'auths': auth.parse_auth({ + 'https://index.docker.io/v1/': self.index_config, + }), + 'credsStore': 'blackbox' + } + with mock.patch('docker.auth._resolve_authconfig_credstore') as m: + m.return_value = None + assert 'indexuser' == auth.resolve_authconfig( + auth_config, None + )['username'] + class CredStoreTest(unittest.TestCase): def test_get_credential_store(self): diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py index f79f5d5..2b0b499 100644 --- a/tests/unit/models_containers_test.py +++ b/tests/unit/models_containers_test.py @@ -1,4 +1,5 @@ import docker +from docker.constants import DEFAULT_DATA_CHUNK_SIZE from docker.models.containers import Container, _create_container_args from docker.models.images import Image import unittest @@ -422,13 +423,17 @@ class ContainerTest(unittest.TestCase): client = make_fake_client() container = client.containers.get(FAKE_CONTAINER_ID) container.export() - client.api.export.assert_called_with(FAKE_CONTAINER_ID) + client.api.export.assert_called_with( + FAKE_CONTAINER_ID, DEFAULT_DATA_CHUNK_SIZE + ) def test_get_archive(self): client = make_fake_client() container = client.containers.get(FAKE_CONTAINER_ID) container.get_archive('foo') - client.api.get_archive.assert_called_with(FAKE_CONTAINER_ID, 'foo') + client.api.get_archive.assert_called_with( + FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE + ) def test_image(self): client = make_fake_client() diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py index dacd72b..6783279 100644 --- a/tests/unit/models_images_test.py +++ b/tests/unit/models_images_test.py @@ -1,3 +1,4 @@ +from docker.constants import DEFAULT_DATA_CHUNK_SIZE from docker.models.images import Image import unittest @@ -116,7 +117,9 @@ class ImageTest(unittest.TestCase): client = make_fake_client() image = client.images.get(FAKE_IMAGE_ID) image.save() - client.api.get_image.assert_called_with(FAKE_IMAGE_ID) + client.api.get_image.assert_called_with( + FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE + ) def test_tag(self): client = make_fake_client() diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index eedcf71..8a4b193 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -23,7 +23,6 @@ from docker.utils import ( decode_json_header, tar, split_command, parse_devices, update_headers, ) -from docker.utils.build import should_check_directory from docker.utils.ports import build_port_bindings, split_port from docker.utils.utils import format_environment @@ -758,6 +757,13 @@ class ExcludePathsTest(unittest.TestCase): self.all_paths - set(['foo/a.py']) ) + def test_exclude_include_absolute_path(self): + base = make_tree([], ['a.py', 'b.py']) + assert exclude_paths( + base, + ['/*', '!/*.py'] + ) == set(['a.py', 'b.py']) + def test_single_subdir_with_path_traversal(self): assert self.exclude(['foo/whoops/../a.py']) == convert_paths( self.all_paths - set(['foo/a.py']) @@ -876,6 +882,26 @@ class ExcludePathsTest(unittest.TestCase): ) ) + def test_include_wildcard(self): + base = make_tree(['a'], ['a/b.py']) + assert exclude_paths( + base, + ['*', '!*/b.py'] + ) == convert_paths(['a/b.py']) + + def test_last_line_precedence(self): + base = make_tree( + [], + ['garbage.md', + 'thrash.md', + 'README.md', + 'README-bis.md', + 'README-secret.md']) + assert exclude_paths( + base, + ['*.md', '!README*.md', 'README-secret.md'] + ) == set(['README.md', 'README-bis.md']) + class TarTest(unittest.TestCase): def test_tar_with_excludes(self): @@ -1012,69 +1038,6 @@ class TarTest(unittest.TestCase): assert tar_data.getmember('th.txt').mtime == -3600 -class ShouldCheckDirectoryTest(unittest.TestCase): - exclude_patterns = [ - 'exclude_rather_large_directory', - 'dir/with/subdir_excluded', - 'dir/with/exceptions' - ] - - include_patterns = [ - 'dir/with/exceptions/like_this_one', - 'dir/with/exceptions/in/descendents' - ] - - def test_should_check_directory_not_excluded(self): - assert should_check_directory( - 'not_excluded', self.exclude_patterns, self.include_patterns - ) - assert should_check_directory( - convert_path('dir/with'), self.exclude_patterns, - self.include_patterns - ) - - def test_shoud_check_parent_directories_of_excluded(self): - assert should_check_directory( - 'dir', self.exclude_patterns, self.include_patterns - ) - assert should_check_directory( - convert_path('dir/with'), self.exclude_patterns, - self.include_patterns - ) - - def test_should_not_check_excluded_directories_with_no_exceptions(self): - assert not should_check_directory( - 'exclude_rather_large_directory', self.exclude_patterns, - self.include_patterns - ) - assert not should_check_directory( - convert_path('dir/with/subdir_excluded'), self.exclude_patterns, - self.include_patterns - ) - - def test_should_check_excluded_directory_with_exceptions(self): - assert should_check_directory( - convert_path('dir/with/exceptions'), self.exclude_patterns, - self.include_patterns - ) - assert should_check_directory( - convert_path('dir/with/exceptions/in'), self.exclude_patterns, - self.include_patterns - ) - - def test_should_not_check_siblings_of_exceptions(self): - assert not should_check_directory( - convert_path('dir/with/exceptions/but_not_here'), - self.exclude_patterns, self.include_patterns - ) - - def test_should_check_subdirectories_of_exceptions(self): - assert should_check_directory( - convert_path('dir/with/exceptions/like_this_one/subdir'), - self.exclude_patterns, self.include_patterns - ) - - class FormatEnvironmentTest(unittest.TestCase): def test_format_env_binary_unicode_value(self): env_dict = { |