mirror of
https://github.com/docker/compose.git
synced 2026-02-11 19:19:23 +08:00
Compare commits
161 Commits
1.23.2
...
1.24.1-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3fbb9fe51e | ||
|
|
d9fa8158c3 | ||
|
|
0aa590649c | ||
|
|
eb2fdf81b4 | ||
|
|
917c2701f2 | ||
|
|
3a3288c54b | ||
|
|
428942498b | ||
|
|
c54341758a | ||
|
|
662761dbba | ||
|
|
0e05ac6d2c | ||
|
|
295dd9abda | ||
|
|
81b30c4380 | ||
|
|
360753ecc1 | ||
|
|
3fae0119ca | ||
|
|
0fdb9783cd | ||
|
|
0dec6b5ff1 | ||
|
|
e0412a2488 | ||
|
|
3fc5c6f563 | ||
|
|
28310b3ba4 | ||
|
|
4585db124a | ||
|
|
1f9b20d97b | ||
|
|
82a89aef1c | ||
|
|
3934617e37 | ||
|
|
82db4fd4f2 | ||
|
|
0f3d4ddaa7 | ||
|
|
2007951731 | ||
|
|
60f8ce09f9 | ||
|
|
cf96fcb4af | ||
|
|
bcccac69fa | ||
|
|
2ec7615ed6 | ||
|
|
2ed171cae9 | ||
|
|
325637d9d5 | ||
|
|
bab8b3985e | ||
|
|
532d00fede | ||
|
|
ab0a0d69d9 | ||
|
|
56fbd22825 | ||
|
|
8419a670ae | ||
|
|
4bd93b95a9 | ||
|
|
47ff8d710c | ||
|
|
d980d170a6 | ||
|
|
f9061720b5 | ||
|
|
01eb4b6250 | ||
|
|
b7374b6271 | ||
|
|
6b3855335e | ||
|
|
6e697c3b97 | ||
|
|
fee5261014 | ||
|
|
0612d973c7 | ||
|
|
0323920957 | ||
|
|
5232100331 | ||
|
|
8b293d486e | ||
|
|
a2bcf52665 | ||
|
|
afc161a0b1 | ||
|
|
14e7a11b3c | ||
|
|
c139455fce | ||
|
|
d3933cd34a | ||
|
|
5b2092688a | ||
|
|
64633a81cc | ||
|
|
fc3df83d39 | ||
|
|
0fc3b51b50 | ||
|
|
7b82b2e8c7 | ||
|
|
cfa5d02b52 | ||
|
|
dd240787c2 | ||
|
|
d563a66405 | ||
|
|
dd927e0fdd | ||
|
|
a7894ddfea | ||
|
|
516eae0f5a | ||
|
|
4bc1cbc32a | ||
|
|
d9e05f262f | ||
|
|
d1bf27e73a | ||
|
|
b8b6199958 | ||
|
|
dbe3a6e9a9 | ||
|
|
61bb1ea484 | ||
|
|
eedbb28d5e | ||
|
|
2e20097f56 | ||
|
|
10864ba687 | ||
|
|
6421ae5ea3 | ||
|
|
6ea20e43f6 | ||
|
|
ccc777831c | ||
|
|
2975b5a279 | ||
|
|
e7f82d2989 | ||
|
|
6559af7660 | ||
|
|
c32bc095f3 | ||
|
|
1affc55b17 | ||
|
|
e86e10fb6b | ||
|
|
e0e06a4b56 | ||
|
|
05efe52ccd | ||
|
|
ba1e0311a7 | ||
|
|
8edb0d872d | ||
|
|
d5eb209be0 | ||
|
|
f009de025c | ||
|
|
5b02922455 | ||
|
|
2b604c1e8b | ||
|
|
db819bf0b2 | ||
|
|
afa5d93c90 | ||
|
|
fb8cd7d813 | ||
|
|
8f4d56a648 | ||
|
|
9b12f489aa | ||
|
|
03bdd67eb5 | ||
|
|
69fe42027a | ||
|
|
7925f8cfa8 | ||
|
|
147a8e9ab8 | ||
|
|
91182ccb34 | ||
|
|
9194b8783e | ||
|
|
fd83791d55 | ||
|
|
f0264e1991 | ||
|
|
e008db5c97 | ||
|
|
4368b8ac05 | ||
|
|
2f5d5fc93f | ||
|
|
98bb68e404 | ||
|
|
de8717cd07 | ||
|
|
ca8ab06571 | ||
|
|
956434504c | ||
|
|
7712d19b32 | ||
|
|
b1adcfb7e3 | ||
|
|
5017b25f14 | ||
|
|
12ed765af8 | ||
|
|
62057d098f | ||
|
|
fdb7a16212 | ||
|
|
5b869b1ad5 | ||
|
|
4cb92294a3 | ||
|
|
9df0a4f3a9 | ||
|
|
3844ff2fde | ||
|
|
d82190025a | ||
|
|
013cb51582 | ||
|
|
402060e419 | ||
|
|
bd67b90869 | ||
|
|
297bee897b | ||
|
|
be324d57a2 | ||
|
|
c7c5b5e8c4 | ||
|
|
9018511750 | ||
|
|
7107431ae0 | ||
|
|
21a51bcd60 | ||
|
|
5629f62644 | ||
|
|
756eae0f01 | ||
|
|
6a35663781 | ||
|
|
9d7202d122 | ||
|
|
e3e93d40a8 | ||
|
|
feccc03e4a | ||
|
|
b21a06cd6f | ||
|
|
7b02f4c3a7 | ||
|
|
cc595a65f0 | ||
|
|
25e419c763 | ||
|
|
abf67565f6 | ||
|
|
7208a50bdc | ||
|
|
8493540a1c | ||
|
|
15089886c2 | ||
|
|
48a6f2132b | ||
|
|
467d910959 | ||
|
|
5b9b519e8a | ||
|
|
b29ffb49e9 | ||
|
|
c17274d014 | ||
|
|
772a307192 | ||
|
|
bf46a6cc60 | ||
|
|
39b0518850 | ||
|
|
de1958c5ff | ||
|
|
bbcfce4029 | ||
|
|
879f7cb1ed | ||
|
|
54c3136e34 | ||
|
|
cc2462e6f4 | ||
|
|
6194d78813 | ||
|
|
4b4c250638 |
@@ -10,7 +10,7 @@ jobs:
|
||||
command: ./script/setup/osx
|
||||
- run:
|
||||
name: install tox
|
||||
command: sudo pip install --upgrade tox==2.1.1
|
||||
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
|
||||
- run:
|
||||
name: unit tests
|
||||
command: tox -e py27,py36,py37 -- tests/unit
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
- checkout
|
||||
- run:
|
||||
name: upgrade python tools
|
||||
command: sudo pip install --upgrade pip virtualenv
|
||||
command: sudo pip install --upgrade pip virtualenv==16.2.0
|
||||
- run:
|
||||
name: setup script
|
||||
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||
|
||||
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug encountered while using docker-compose
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
3. When making a bug report, make sure you provide all required information. The easier it is for
|
||||
maintainers to reproduce, the faster it'll be fixed.
|
||||
-->
|
||||
|
||||
## Description of the issue
|
||||
|
||||
## Context information (for bug reports)
|
||||
|
||||
**Output of `docker-compose version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker-compose config`**
|
||||
(Make sure to add the relevant `-f` and other flags)
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
|
||||
## Steps to reproduce the issue
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Observed result
|
||||
|
||||
### Expected result
|
||||
|
||||
### Stacktrace / full error message
|
||||
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
## Additional information
|
||||
|
||||
OS version / distribution, `docker-compose` install method, etc.
|
||||
29
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea to improve Compose
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
-->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
9
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
name: Question about using Compose
|
||||
about: This is not the appropriate channel
|
||||
|
||||
---
|
||||
|
||||
Please post on our forums: https://forums.docker.com for questions about using `docker-compose`.
|
||||
|
||||
Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker.
|
||||
@@ -14,7 +14,7 @@
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: git://github.com/asottile/reorder_python_imports
|
||||
sha: v0.3.5
|
||||
sha: v1.3.4
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
language_version: 'python2.7'
|
||||
|
||||
52
CHANGELOG.md
52
CHANGELOG.md
@@ -1,6 +1,58 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.24.0 (2019-03-22)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Added support for connecting to the Docker Engine using the `ssh` protocol.
|
||||
|
||||
- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
|
||||
in the command's output.
|
||||
|
||||
- Add bash completion for `ps --all|-a`
|
||||
|
||||
- Support for credential_spec
|
||||
|
||||
- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where some valid credential helpers weren't properly handled by Compose
|
||||
when attempting to pull images from private registries.
|
||||
|
||||
- Fixed an issue where the output of `docker-compose start` before containers were created
|
||||
was misleading
|
||||
|
||||
- To match the Docker CLI behavior and to avoid confusing issues, Compose will no longer
|
||||
accept whitespace in variable names sourced from environment files.
|
||||
|
||||
- Compose will now report a configuration error if a service attempts to declare
|
||||
duplicate mount points in the volumes section.
|
||||
|
||||
- Fixed an issue with the containerized version of Compose that prevented users from
|
||||
writing to stdin during interactive sessions started by `run` or `exec`.
|
||||
|
||||
- One-off containers started by `run` no longer adopt the restart policy of the service,
|
||||
and are instead set to never restart.
|
||||
|
||||
- Fixed an issue that caused some container events to not appear in the output of
|
||||
the `docker-compose events` command.
|
||||
|
||||
- Missing images will no longer stop the execution of `docker-compose down` commands
|
||||
(a warning will be displayed instead).
|
||||
|
||||
- Force `virtualenv` version for macOS CI
|
||||
|
||||
- Fix merging of compose files when network has `None` config
|
||||
|
||||
- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
|
||||
|
||||
- Bump `docker-py` version to `3.7.2` to fix SSH and proxy config issues
|
||||
|
||||
- Fix release script and some typos on release documentation
|
||||
|
||||
1.23.2 (2018-11-28)
|
||||
-------------------
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ ENV LANG en_US.UTF-8
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
|
||||
RUN pip install virtualenv==16.2.0
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
@@ -25,6 +27,7 @@ ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
ADD README.md /code/
|
||||
RUN tox --notest
|
||||
|
||||
ADD . /code/
|
||||
|
||||
@@ -4,8 +4,7 @@ include requirements.txt
|
||||
include requirements-dev.txt
|
||||
include tox.ini
|
||||
include *.md
|
||||
exclude README.md
|
||||
include README.rst
|
||||
include README.md
|
||||
include compose/config/*.json
|
||||
include compose/GITSHA
|
||||
recursive-include contrib/completion *
|
||||
|
||||
@@ -35,7 +35,7 @@ A `docker-compose.yml` looks like this:
|
||||
image: redis
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md).
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
@@ -48,9 +48,8 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
|
||||
- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose)
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose).
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new/choose). Thank you!
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.23.2'
|
||||
__version__ = '1.24.0'
|
||||
|
||||
@@ -67,7 +67,9 @@ def handle_connection_errors(client):
|
||||
|
||||
|
||||
def log_windows_pipe_error(exc):
|
||||
if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
if exc.winerror == 2:
|
||||
log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
|
||||
elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
log.error(
|
||||
"The current Compose file version is not compatible with your engine version. "
|
||||
"Please upgrade your Compose file to a more recent version, or set "
|
||||
|
||||
@@ -236,7 +236,8 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||
thread_map[event['id']] = build_thread(
|
||||
event['container'],
|
||||
next(presenters),
|
||||
*thread_args)
|
||||
*thread_args
|
||||
)
|
||||
|
||||
|
||||
def consume_queue(queue, cascade_stop):
|
||||
|
||||
@@ -206,8 +206,8 @@ class TopLevelCommand(object):
|
||||
name specified in the client certificate
|
||||
--project-directory PATH Specify an alternate working directory
|
||||
(default: the path of the Compose file)
|
||||
--compatibility If set, Compose will attempt to convert deploy
|
||||
keys in v3 files to their non-Swarm equivalent
|
||||
--compatibility If set, Compose will attempt to convert keys
|
||||
in v3 files to their non-Swarm equivalent
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
@@ -694,6 +694,7 @@ class TopLevelCommand(object):
|
||||
-q, --quiet Only display IDs
|
||||
--services Display services
|
||||
--filter KEY=VAL Filter services by a property
|
||||
-a, --all Show all stopped containers (including those created by the run command)
|
||||
"""
|
||||
if options['--quiet'] and options['--services']:
|
||||
raise UserError('--quiet and --services cannot be combined')
|
||||
@@ -706,10 +707,14 @@ class TopLevelCommand(object):
|
||||
print('\n'.join(service.name for service in services))
|
||||
return
|
||||
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
if options['--all']:
|
||||
containers = sorted(self.project.containers(service_names=options['SERVICE'],
|
||||
one_off=OneOffFilter.include, stopped=True))
|
||||
else:
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
|
||||
if options['--quiet']:
|
||||
for container in containers:
|
||||
@@ -867,7 +872,7 @@ class TopLevelCommand(object):
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
container_options = build_container_options(options, detach, command)
|
||||
container_options = build_one_off_container_options(options, detach, command)
|
||||
run_one_off_container(
|
||||
container_options, self.project, service, options,
|
||||
self.toplevel_options, self.project_dir
|
||||
@@ -1262,7 +1267,7 @@ def build_action_from_opts(options):
|
||||
return BuildAction.none
|
||||
|
||||
|
||||
def build_container_options(options, detach, command):
|
||||
def build_one_off_container_options(options, detach, command):
|
||||
container_options = {
|
||||
'command': command,
|
||||
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
|
||||
@@ -1283,8 +1288,8 @@ def build_container_options(options, detach, command):
|
||||
[""] if options['--entrypoint'] == '' else options['--entrypoint']
|
||||
)
|
||||
|
||||
if options['--rm']:
|
||||
container_options['restart'] = None
|
||||
# Ensure that run command remains one-off (issue #6302)
|
||||
container_options['restart'] = None
|
||||
|
||||
if options['--user']:
|
||||
container_options['user'] = options.get('--user')
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import string
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
|
||||
import six
|
||||
import yaml
|
||||
@@ -50,6 +51,7 @@ from .validation import match_named_volumes
|
||||
from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_cpu
|
||||
from .validation import validate_credential_spec
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_healthcheck
|
||||
@@ -368,7 +370,6 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
)
|
||||
if not compatibility:
|
||||
check_swarm_only_key(service_dicts, 'deploy')
|
||||
check_swarm_only_key(service_dicts, 'credential_spec')
|
||||
check_swarm_only_key(service_dicts, 'configs')
|
||||
|
||||
|
||||
@@ -705,6 +706,7 @@ def validate_service(service_config, service_names, config_file):
|
||||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
validate_healthcheck(service_config)
|
||||
validate_credential_spec(service_config)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
@@ -835,6 +837,17 @@ def finalize_service_volumes(service_dict, environment):
|
||||
finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
|
||||
else:
|
||||
finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
|
||||
|
||||
duplicate_mounts = []
|
||||
mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
|
||||
for mount in mounts:
|
||||
if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
|
||||
duplicate_mounts.append(mount.repr())
|
||||
|
||||
if duplicate_mounts:
|
||||
raise ConfigurationError("Duplicate mount points: [%s]" % (
|
||||
', '.join(duplicate_mounts)))
|
||||
|
||||
service_dict['volumes'] = finalized_volumes
|
||||
|
||||
return service_dict
|
||||
@@ -882,6 +895,7 @@ def finalize_service(service_config, service_names, version, environment, compat
|
||||
normalize_build(service_dict, service_config.working_dir, environment)
|
||||
|
||||
if compatibility:
|
||||
service_dict = translate_credential_spec_to_security_opt(service_dict)
|
||||
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
|
||||
service_dict
|
||||
)
|
||||
@@ -918,6 +932,25 @@ def convert_restart_policy(name):
|
||||
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
|
||||
|
||||
|
||||
def convert_credential_spec_to_security_opt(credential_spec):
|
||||
if 'file' in credential_spec:
|
||||
return 'file://{file}'.format(file=credential_spec['file'])
|
||||
return 'registry://{registry}'.format(registry=credential_spec['registry'])
|
||||
|
||||
|
||||
def translate_credential_spec_to_security_opt(service_dict):
|
||||
result = []
|
||||
|
||||
if 'credential_spec' in service_dict:
|
||||
spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
|
||||
result.append('credentialspec={spec}'.format(spec=spec))
|
||||
|
||||
if result:
|
||||
service_dict['security_opt'] = result
|
||||
|
||||
return service_dict
|
||||
|
||||
|
||||
def translate_deploy_keys_to_container_config(service_dict):
|
||||
if 'credential_spec' in service_dict:
|
||||
del service_dict['credential_spec']
|
||||
@@ -1040,7 +1073,6 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_mapping('environment', parse_environment)
|
||||
md.merge_mapping('labels', parse_labels)
|
||||
md.merge_mapping('ulimits', parse_flat_dict)
|
||||
md.merge_mapping('networks', parse_networks)
|
||||
md.merge_mapping('sysctls', parse_sysctls)
|
||||
md.merge_mapping('depends_on', parse_depends_on)
|
||||
md.merge_mapping('storage_opt', parse_flat_dict)
|
||||
@@ -1050,6 +1082,7 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_sequence('security_opt', types.SecurityOpt.parse)
|
||||
md.merge_mapping('extra_hosts', parse_extra_hosts)
|
||||
|
||||
md.merge_field('networks', merge_networks, default={})
|
||||
for field in ['volumes', 'devices']:
|
||||
md.merge_field(field, merge_path_mappings)
|
||||
|
||||
@@ -1154,6 +1187,22 @@ def merge_deploy(base, override):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_networks(base, override):
|
||||
merged_networks = {}
|
||||
all_network_names = set(base) | set(override)
|
||||
base = {k: {} for k in base} if isinstance(base, list) else base
|
||||
override = {k: {} for k in override} if isinstance(override, list) else override
|
||||
for network_name in all_network_names:
|
||||
md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
|
||||
md.merge_field('aliases', merge_unique_items_lists, [])
|
||||
md.merge_field('link_local_ips', merge_unique_items_lists, [])
|
||||
md.merge_scalar('priority')
|
||||
md.merge_scalar('ipv4_address')
|
||||
md.merge_scalar('ipv6_address')
|
||||
merged_networks[network_name] = dict(md)
|
||||
return merged_networks
|
||||
|
||||
|
||||
def merge_reservations(base, override):
|
||||
md = MergeDict(base, override)
|
||||
md.merge_scalar('cpus')
|
||||
@@ -1283,7 +1332,7 @@ def resolve_volume_paths(working_dir, service_dict):
|
||||
|
||||
def resolve_volume_path(working_dir, volume):
|
||||
if isinstance(volume, dict):
|
||||
if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
|
||||
if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
|
||||
volume['source'] = expand_path(working_dir, volume['source'])
|
||||
return volume
|
||||
|
||||
|
||||
@@ -5,11 +5,13 @@ import codecs
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import six
|
||||
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
from .errors import ConfigurationError
|
||||
from .errors import EnvFileNotFound
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -17,10 +19,16 @@ log = logging.getLogger(__name__)
|
||||
def split_env(env):
|
||||
if isinstance(env, six.binary_type):
|
||||
env = env.decode('utf-8', 'replace')
|
||||
key = value = None
|
||||
if '=' in env:
|
||||
return env.split('=', 1)
|
||||
key, value = env.split('=', 1)
|
||||
else:
|
||||
return env, None
|
||||
key = env
|
||||
if re.search(r'\s', key):
|
||||
raise ConfigurationError(
|
||||
"environment variable name '{}' may not contains whitespace.".format(key)
|
||||
)
|
||||
return key, value
|
||||
|
||||
|
||||
def env_vars_from_file(filename):
|
||||
@@ -28,16 +36,19 @@ def env_vars_from_file(filename):
|
||||
Read in a line delimited file of environment variables.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError("Couldn't find env file: %s" % filename)
|
||||
raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
|
||||
elif not os.path.isfile(filename):
|
||||
raise ConfigurationError("%s is not a file." % (filename))
|
||||
raise EnvFileNotFound("{} is not a file.".format(filename))
|
||||
env = {}
|
||||
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
try:
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
except ConfigurationError as e:
|
||||
raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
|
||||
return env
|
||||
|
||||
|
||||
@@ -55,9 +66,10 @@ class Environment(dict):
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
try:
|
||||
return cls(env_vars_from_file(env_file_path))
|
||||
except ConfigurationError:
|
||||
except EnvFileNotFound:
|
||||
pass
|
||||
return result
|
||||
|
||||
instance = _initialize()
|
||||
instance.update(os.environ)
|
||||
return instance
|
||||
|
||||
@@ -19,6 +19,10 @@ class ConfigurationError(Exception):
|
||||
return self.msg
|
||||
|
||||
|
||||
class EnvFileNotFound(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
class DependencyError(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
@@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
|
||||
)
|
||||
|
||||
|
||||
def validate_credential_spec(service_config):
|
||||
credential_spec = service_config.config.get('credential_spec')
|
||||
if not credential_spec:
|
||||
return
|
||||
|
||||
if 'registry' not in credential_spec and 'file' not in credential_spec:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' is missing 'credential_spec.file' or "
|
||||
"credential_spec.registry'".format(s=service_config)
|
||||
)
|
||||
|
||||
|
||||
def get_unsupported_config_msg(path, error_key):
|
||||
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
|
||||
if error_key in DOCKER_CONFIG_HINTS:
|
||||
|
||||
@@ -7,7 +7,6 @@ from .version import ComposeVersion
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
HTTP_TIMEOUT = 60
|
||||
IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
|
||||
|
||||
@@ -43,14 +43,17 @@ class GlobalLimit(object):
|
||||
cls.global_limiter = Semaphore(value)
|
||||
|
||||
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
|
||||
""" Watch events from a parallel execution, update status and fill errors and results.
|
||||
Returns exception to re-raise.
|
||||
"""
|
||||
error_to_reraise = None
|
||||
for obj, result, exception in events:
|
||||
if exception is None:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
if fail_check is not None and fail_check(obj):
|
||||
writer.write(msg, get_name(obj), 'failed', red)
|
||||
else:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
results.append(result)
|
||||
elif isinstance(exception, ImageNotFound):
|
||||
# This is to bubble up ImageNotFound exceptions to the client so we
|
||||
@@ -72,12 +75,14 @@ def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
return error_to_reraise
|
||||
|
||||
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
|
||||
"""Runs func on objects in parallel while ensuring that func is
|
||||
ran on object only after it is ran on all its dependencies.
|
||||
|
||||
get_deps called on object must return a collection with its dependencies.
|
||||
get_name called on object must return its name.
|
||||
fail_check is an additional failure check for cases that should display as a failure
|
||||
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
|
||||
"""
|
||||
objects = list(objects)
|
||||
stream = get_output_stream(sys.stderr)
|
||||
@@ -96,7 +101,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
|
||||
errors = {}
|
||||
results = []
|
||||
error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
|
||||
error_to_reraise = parallel_execute_watch(
|
||||
events, writer, errors, results, msg, get_name, fail_check
|
||||
)
|
||||
|
||||
for obj_name, error in errors.items():
|
||||
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
|
||||
|
||||
@@ -98,14 +98,14 @@ def print_output_event(event, stream, is_terminal):
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
digest = None
|
||||
for event in events:
|
||||
status = event.get('status')
|
||||
if not status or 'Digest' not in status:
|
||||
continue
|
||||
|
||||
_, digest = status.split(':', 1)
|
||||
return digest.strip()
|
||||
return None
|
||||
else:
|
||||
digest = status.split(':', 1)[1].strip()
|
||||
return digest
|
||||
|
||||
|
||||
def get_digest_from_push(events):
|
||||
|
||||
@@ -10,13 +10,13 @@ from functools import reduce
|
||||
import enum
|
||||
import six
|
||||
from docker.errors import APIError
|
||||
from docker.utils import version_lt
|
||||
|
||||
from . import parallel
|
||||
from .config import ConfigurationError
|
||||
from .config.config import V1
|
||||
from .config.sort_services import get_container_name_from_network_mode
|
||||
from .config.sort_services import get_service_name_from_network_mode
|
||||
from .const import IMAGE_EVENTS
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
@@ -29,6 +29,7 @@ from .service import ContainerNetworkMode
|
||||
from .service import ContainerPidMode
|
||||
from .service import ConvergenceStrategy
|
||||
from .service import NetworkMode
|
||||
from .service import parse_repository_tag
|
||||
from .service import PidMode
|
||||
from .service import Service
|
||||
from .service import ServiceNetworkMode
|
||||
@@ -279,6 +280,7 @@ class Project(object):
|
||||
operator.attrgetter('name'),
|
||||
'Starting',
|
||||
get_deps,
|
||||
fail_check=lambda obj: not obj.containers(),
|
||||
)
|
||||
|
||||
return containers
|
||||
@@ -401,11 +403,13 @@ class Project(object):
|
||||
detached=True,
|
||||
start=False)
|
||||
|
||||
def events(self, service_names=None):
|
||||
def _legacy_event_processor(self, service_names):
|
||||
# Only for v1 files or when Compose is forced to use an older API version
|
||||
def build_container_event(event, container):
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano']))
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
@@ -424,17 +428,15 @@ class Project(object):
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# The first part of this condition is a guard against some events
|
||||
# broadcasted by swarm that don't have a status field.
|
||||
# This is a guard against some events broadcasted by swarm that
|
||||
# don't have a status field.
|
||||
# See https://github.com/docker/compose/issues/3316
|
||||
if 'status' not in event or event['status'] in IMAGE_EVENTS:
|
||||
# We don't receive any image events because labels aren't applied
|
||||
# to images
|
||||
if 'status' not in event:
|
||||
continue
|
||||
|
||||
# TODO: get labels from the API v1.22 , see github issue 2618
|
||||
try:
|
||||
# this can fail if the container has been removed
|
||||
# this can fail if the container has been removed or if the event
|
||||
# refers to an image
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
continue
|
||||
@@ -442,6 +444,56 @@ class Project(object):
|
||||
continue
|
||||
yield build_container_event(event, container)
|
||||
|
||||
def events(self, service_names=None):
|
||||
if version_lt(self.client.api_version, '1.22'):
|
||||
# New, better event API was introduced in 1.22.
|
||||
return self._legacy_event_processor(service_names)
|
||||
|
||||
def build_container_event(event):
|
||||
container_attrs = event['Actor']['Attributes']
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
|
||||
container = None
|
||||
try:
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
# Container may have been removed (e.g. if this is a destroy event)
|
||||
pass
|
||||
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
'action': event['status'],
|
||||
'id': event['Actor']['ID'],
|
||||
'service': container_attrs.get(LABEL_SERVICE),
|
||||
'attributes': dict([
|
||||
(k, v) for k, v in container_attrs.items()
|
||||
if not k.startswith('com.docker.compose.')
|
||||
]),
|
||||
'container': container,
|
||||
}
|
||||
|
||||
def yield_loop(service_names):
|
||||
for event in self.client.events(
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# TODO: support other event types
|
||||
if event.get('Type') != 'container':
|
||||
continue
|
||||
|
||||
try:
|
||||
if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names:
|
||||
continue
|
||||
except KeyError:
|
||||
continue
|
||||
yield build_container_event(event)
|
||||
|
||||
return yield_loop(set(service_names) if service_names else self.service_names)
|
||||
|
||||
def up(self,
|
||||
service_names=None,
|
||||
start_deps=True,
|
||||
@@ -592,8 +644,15 @@ class Project(object):
|
||||
service.pull(ignore_pull_failures, silent=silent)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
unique_images = set()
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.push(ignore_push_failures)
|
||||
# Considering <image> and <image:latest> as the same
|
||||
repo, tag, sep = parse_repository_tag(service.image_name)
|
||||
service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest'))
|
||||
|
||||
if service_image_name not in unique_images:
|
||||
service.push(ignore_push_failures)
|
||||
unique_images.add(service_image_name)
|
||||
|
||||
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
|
||||
ctnrs = list(filter(None, [
|
||||
|
||||
@@ -199,7 +199,9 @@ class Service(object):
|
||||
def __repr__(self):
|
||||
return '<Service: {}>'.format(self.name)
|
||||
|
||||
def containers(self, stopped=False, one_off=False, filters={}, labels=None):
|
||||
def containers(self, stopped=False, one_off=False, filters=None, labels=None):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
|
||||
|
||||
result = list(filter(None, [
|
||||
@@ -289,7 +291,7 @@ class Service(object):
|
||||
c for c in stopped_containers if self._containers_have_diverged([c])
|
||||
]
|
||||
for c in divergent_containers:
|
||||
c.remove()
|
||||
c.remove()
|
||||
|
||||
all_containers = list(set(all_containers) - set(divergent_containers))
|
||||
|
||||
@@ -459,50 +461,50 @@ class Service(object):
|
||||
|
||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
||||
renew_anonymous_volumes):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _downscale(self, containers, timeout=None):
|
||||
def stop_and_remove(container):
|
||||
@@ -1146,6 +1148,9 @@ class Service(object):
|
||||
try:
|
||||
self.client.remove_image(self.image_name)
|
||||
return True
|
||||
except ImageNotFound:
|
||||
log.warning("Image %s not found.", self.image_name)
|
||||
return False
|
||||
except APIError as e:
|
||||
log.error("Failed to remove image for service %s: %s", self.name, e)
|
||||
return False
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import codecs
|
||||
import hashlib
|
||||
import json
|
||||
import json.decoder
|
||||
import logging
|
||||
import ntpath
|
||||
|
||||
@@ -114,7 +114,7 @@ _docker_compose_build() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services --filter source=build
|
||||
@@ -361,7 +361,7 @@ _docker_compose_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services
|
||||
|
||||
@@ -117,6 +117,7 @@ __docker-compose_subcommand() {
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'--compress[Compress the build context using gzip.]' \
|
||||
'--parallel[Build images in parallel.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
@@ -339,7 +340,7 @@ _docker-compose() {
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
"--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \
|
||||
"--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'--verbose[Show more output]' \
|
||||
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
|
||||
@@ -354,7 +355,7 @@ _docker-compose() {
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
|
||||
local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--file" "-f"
|
||||
@@ -368,6 +369,10 @@ _docker-compose() {
|
||||
"--skip-hostname-check"
|
||||
)
|
||||
|
||||
relevant_compose_repeatable_flags=(
|
||||
"--file" "-f"
|
||||
)
|
||||
|
||||
relevant_docker_flags=(
|
||||
"--host" "-H"
|
||||
"--tls"
|
||||
@@ -385,9 +390,18 @@ _docker-compose() {
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
if [[ -n "${relevant_compose_repeatable_flags[(r)$k]}" ]]; then
|
||||
values=("${(@s/:/)opt_args[$k]}")
|
||||
for value in $values
|
||||
do
|
||||
compose_options+=$k
|
||||
compose_options+=$value
|
||||
done
|
||||
else
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -98,4 +98,5 @@ exe = EXE(pyz,
|
||||
debug=False,
|
||||
strip=None,
|
||||
upx=True,
|
||||
console=True)
|
||||
console=True,
|
||||
bootloader_ignore_signals=True)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
coverage==4.4.2
|
||||
flake8==3.5.0
|
||||
mock>=1.0.1
|
||||
mock==2.0.0
|
||||
pytest==3.6.3
|
||||
pytest-cov==2.5.1
|
||||
|
||||
@@ -3,8 +3,8 @@ cached-property==1.3.0
|
||||
certifi==2017.4.17
|
||||
chardet==3.0.4
|
||||
colorama==0.4.0; sys_platform == 'win32'
|
||||
docker==3.6.0
|
||||
docker-pycreds==0.3.0
|
||||
docker==3.7.3
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6; python_version < '3.4'
|
||||
@@ -12,12 +12,13 @@ functools32==3.2.3.post2; python_version < '3.2'
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
jsonschema==2.6.0
|
||||
paramiko==2.4.2
|
||||
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
||||
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||
PySocks==1.6.7
|
||||
PyYAML==3.12
|
||||
PyYAML==4.2b1
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
texttable==0.9.1
|
||||
urllib3==1.21.1; python_version == '3.3'
|
||||
websocket-client==0.32.0
|
||||
websocket-client==0.56.0
|
||||
|
||||
@@ -5,7 +5,7 @@ set -ex
|
||||
./script/clean
|
||||
|
||||
TAG="docker-compose"
|
||||
docker build -t "$TAG" . | tail -n 200
|
||||
docker build -t "$TAG" .
|
||||
docker run \
|
||||
--rm --entrypoint="script/build/linux-entrypoint" \
|
||||
-v $(pwd)/dist:/code/dist \
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
#
|
||||
# Write the current commit sha to the file GITSHA. This file is included in
|
||||
# packaging so that `docker-compose version` can include the git sha.
|
||||
#
|
||||
set -e
|
||||
git rev-parse --short HEAD > compose/GITSHA
|
||||
# sets to 'unknown' and echoes a message if the command is not successful
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(git rev-parse --short HEAD)"
|
||||
if [[ "${?}" != "0" ]]; then
|
||||
echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
|
||||
DOCKER_COMPOSE_GITSHA="unknown"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
@@ -40,7 +40,7 @@ This API token should be exposed to the release script through the
|
||||
### A Bintray account and Bintray API key
|
||||
|
||||
Your Bintray account will need to be an admin member of the
|
||||
[docker-compose organization](https://github.com/settings/tokens).
|
||||
[docker-compose organization](https://bintray.com/docker-compose).
|
||||
Additionally, you should generate a personal API key. To do so, click your
|
||||
username in the top-right hand corner and select "Edit profile" ; on the new
|
||||
page, select "API key" in the left-side menu.
|
||||
@@ -129,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide.
|
||||
Once you're ready to make your release public, you may execute the following
|
||||
command from the root of the Compose repository:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEAE_VERSION
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
|
||||
```
|
||||
|
||||
Note that this command will create and publish versioned assets to the public.
|
||||
|
||||
@@ -26,12 +26,6 @@ if [ -z "$(command -v jq 2> /dev/null)" ]; then
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
|
||||
>&2 echo "$0 requires http://pandoc.org/"
|
||||
>&2 echo "Please install it and make sure it is available on your \$PATH."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
API=https://api.github.com/repos
|
||||
REPO=docker/compose
|
||||
GITHUB_REPO=git@github.com:$REPO
|
||||
@@ -59,8 +53,6 @@ docker push docker/compose-tests:latest
|
||||
docker push docker/compose-tests:$VERSION
|
||||
|
||||
echo "Uploading package to PyPI"
|
||||
pandoc -f markdown -t rst README.md -o README.rst
|
||||
sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
|
||||
./script/build/write-git-sha
|
||||
python setup.py sdist bdist_wheel
|
||||
if [ "$(command -v twine 2> /dev/null)" ]; then
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker Desktop for Mac and Windows](https://www.docker.com/products/docker-desktop)**.
|
||||
|
||||
Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
|
||||
Docker Desktop will automatically install the latest version of Docker Engine for you.
|
||||
|
||||
Alternatively, you can use the usual commands to install or upgrade Compose:
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from distutils.core import run_setup
|
||||
|
||||
import pypandoc
|
||||
from jinja2 import Template
|
||||
from release.bintray import BintrayAPI
|
||||
from release.const import BINTRAY_ORG
|
||||
@@ -277,10 +275,8 @@ def finalize(args):
|
||||
|
||||
repository.checkout_branch(br_name)
|
||||
|
||||
pypandoc.convert_file(
|
||||
os.path.join(REPO_ROOT, 'README.md'), 'rst', outputfile=os.path.join(REPO_ROOT, 'README.rst')
|
||||
)
|
||||
run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel'])
|
||||
os.system('python {setup_script} sdist bdist_wheel'.format(
|
||||
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
|
||||
|
||||
merge_status = pr_data.merge()
|
||||
if not merge_status.merged and not args.finalize_resume:
|
||||
|
||||
@@ -18,7 +18,7 @@ def pypi_upload(args):
|
||||
'dist/docker-compose-{}*.tar.gz'.format(rel)
|
||||
])
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 400 and 'File already exists' in e.message:
|
||||
if e.response.status_code == 400 and 'File already exists' in str(e):
|
||||
if not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Package already uploaded on PyPi.'
|
||||
|
||||
@@ -219,6 +219,8 @@ def get_contributors(pr_data):
|
||||
commits = pr_data.get_commits()
|
||||
authors = {}
|
||||
for commit in commits:
|
||||
if not commit.author:
|
||||
continue
|
||||
author = commit.author.login
|
||||
authors[author] = authors.get(author, 0) + 1
|
||||
return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
|
||||
|
||||
@@ -39,9 +39,9 @@ fi
|
||||
|
||||
$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
|
||||
PyGithub==1.39 \
|
||||
pypandoc==1.4 \
|
||||
GitPython==2.1.9 \
|
||||
requests==2.18.4 \
|
||||
setuptools==40.6.2 \
|
||||
twine==1.11.0
|
||||
|
||||
$VENV_PYTHONBIN setup.py develop
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.23.2"
|
||||
VERSION="1.24.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
@@ -47,14 +47,14 @@ if [ -n "$HOME" ]; then
|
||||
fi
|
||||
|
||||
# Only allocate tty if we detect one
|
||||
if [ -t 0 ]; then
|
||||
if [ -t 1 ]; then
|
||||
if [ -t 0 -a -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
fi
|
||||
else
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
fi
|
||||
|
||||
# Always set -i to support piped and terminal input in run/exec
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
|
||||
|
||||
# Handle userns security
|
||||
if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
|
||||
|
||||
@@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
||||
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||
fi
|
||||
|
||||
OPENSSL_VERSION=1.1.0h
|
||||
OPENSSL_VERSION=1.1.0j
|
||||
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||
OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd
|
||||
OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a
|
||||
|
||||
PYTHON_VERSION=3.6.6
|
||||
PYTHON_VERSION=3.6.8
|
||||
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||
PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652
|
||||
PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f
|
||||
|
||||
#
|
||||
# Install prerequisites.
|
||||
|
||||
21
setup.py
21
setup.py
@@ -32,11 +32,11 @@ def find_version(*file_paths):
|
||||
install_requires = [
|
||||
'cached-property >= 1.2.0, < 2',
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4',
|
||||
'PyYAML >= 3.10, < 4.3',
|
||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.21',
|
||||
'texttable >= 0.9.0, < 0.10',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker >= 3.6.0, < 4.0',
|
||||
'docker[ssh] >= 3.7.0, < 4.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
@@ -77,19 +77,26 @@ setup(
|
||||
name='docker-compose',
|
||||
version=find_version("compose", "__init__.py"),
|
||||
description='Multi-container orchestration for Docker',
|
||||
long_description=read('README.md'),
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://www.docker.com/',
|
||||
project_urls={
|
||||
'Documentation': 'https://docs.docker.com/compose/overview',
|
||||
'Changelog': 'https://github.com/docker/compose/blob/release/CHANGELOG.md',
|
||||
'Source': 'https://github.com/docker/compose',
|
||||
'Tracker': 'https://github.com/docker/compose/issues',
|
||||
},
|
||||
author='Docker, Inc.',
|
||||
license='Apache License 2.0',
|
||||
packages=find_packages(exclude=['tests.*', 'tests']),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
tests_require=tests_require,
|
||||
entry_points="""
|
||||
[console_scripts]
|
||||
docker-compose=compose.cli.main:main
|
||||
""",
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
entry_points={
|
||||
'console_scripts': ['docker-compose=compose.cli.main:main'],
|
||||
},
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import signal
|
||||
@@ -41,7 +40,7 @@ ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
|
||||
|
||||
|
||||
BUILD_CACHE_TEXT = 'Using cache'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
|
||||
|
||||
|
||||
def start_process(base_dir, options):
|
||||
@@ -599,10 +598,20 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert 'with_build' in running.stdout
|
||||
assert 'with_image' in running.stdout
|
||||
|
||||
def test_ps_all(self):
|
||||
self.project.get_service('simple').create_container(one_off='blahblah')
|
||||
result = self.dispatch(['ps'])
|
||||
assert 'simple-composefile_simple_run_' not in result.stdout
|
||||
|
||||
result2 = self.dispatch(['ps', '--all'])
|
||||
assert 'simple-composefile_simple_run_' in result2.stdout
|
||||
|
||||
def test_pull(self):
|
||||
result = self.dispatch(['pull'])
|
||||
assert 'Pulling simple' in result.stderr
|
||||
assert 'Pulling another' in result.stderr
|
||||
assert 'done' in result.stderr
|
||||
assert 'failed' not in result.stderr
|
||||
|
||||
def test_pull_with_digest(self):
|
||||
result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
|
||||
@@ -649,15 +658,15 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_pull_with_include_deps(self):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling db (busybox:latest)...',
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling db (busybox:1.27.2)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_build_plain(self):
|
||||
@@ -2221,6 +2230,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_start_no_containers(self):
|
||||
result = self.dispatch(['start'], returncode=1)
|
||||
assert 'failed' in result.stderr
|
||||
assert 'No containers to start' in result.stderr
|
||||
|
||||
@v2_only()
|
||||
@@ -2582,7 +2592,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
container, = self.project.containers()
|
||||
expected_template = ' container {} {}'
|
||||
expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_']
|
||||
expected_meta_info = ['image=busybox:1.27.2', 'name=simple-composefile_simple_']
|
||||
|
||||
assert expected_template.format('create', container.id) in lines[0]
|
||||
assert expected_template.format('start', container.id) in lines[1]
|
||||
|
||||
@@ -2,7 +2,7 @@ version: "2.2"
|
||||
|
||||
services:
|
||||
service:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
environment:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
db:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
web:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
links:
|
||||
- db:db
|
||||
console:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
|
||||
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
@@ -1,3 +1,3 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.27.2
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
4
tests/fixtures/v2-simple/docker-compose.yml
vendored
4
tests/fixtures/v2-simple/docker-compose.yml
vendored
@@ -1,8 +1,8 @@
|
||||
version: "2"
|
||||
services:
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
@@ -193,7 +193,7 @@ class TestConsumeQueue(object):
|
||||
queue.put(item)
|
||||
|
||||
generator = consume_queue(queue, True)
|
||||
assert next(generator) is 'foobar-1'
|
||||
assert next(generator) == 'foobar-1'
|
||||
|
||||
def test_item_is_none_when_timeout_is_hit(self):
|
||||
queue = Queue()
|
||||
|
||||
@@ -171,7 +171,10 @@ class CLITestCase(unittest.TestCase):
|
||||
'--workdir': None,
|
||||
})
|
||||
|
||||
assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
|
||||
# NOTE: The "run" command is supposed to be a one-off tool; therefore restart policy "no"
|
||||
# (the default) is enforced despite explicit wish for "always" in the project
|
||||
# configuration file
|
||||
assert not mock_client.create_host_config.call_args[1].get('restart_policy')
|
||||
|
||||
command = TopLevelCommand(project)
|
||||
command.run({
|
||||
|
||||
@@ -1085,8 +1085,43 @@ class ConfigTest(unittest.TestCase):
|
||||
details = config.ConfigDetails('.', [base_file, override_file])
|
||||
web_service = config.load(details).services[0]
|
||||
assert web_service['networks'] == {
|
||||
'foobar': {'aliases': ['foo', 'bar']},
|
||||
'baz': None
|
||||
'foobar': {'aliases': ['bar', 'foo']},
|
||||
'baz': {}
|
||||
}
|
||||
|
||||
def test_load_with_multiple_files_mismatched_networks_format_inverse_order(self):
|
||||
base_file = config.ConfigFile(
|
||||
'override.yaml',
|
||||
{
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {
|
||||
'networks': ['baz']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
override_file = config.ConfigFile(
|
||||
'base.yaml',
|
||||
{
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'example/web',
|
||||
'networks': {
|
||||
'foobar': {'aliases': ['foo', 'bar']}
|
||||
}
|
||||
}
|
||||
},
|
||||
'networks': {'foobar': {}, 'baz': {}}
|
||||
}
|
||||
)
|
||||
|
||||
details = config.ConfigDetails('.', [base_file, override_file])
|
||||
web_service = config.load(details).services[0]
|
||||
assert web_service['networks'] == {
|
||||
'foobar': {'aliases': ['bar', 'foo']},
|
||||
'baz': {}
|
||||
}
|
||||
|
||||
def test_load_with_multiple_files_v2(self):
|
||||
@@ -1336,6 +1371,32 @@ class ConfigTest(unittest.TestCase):
|
||||
assert mount.type == 'bind'
|
||||
assert mount.source == expected_source
|
||||
|
||||
def test_load_bind_mount_relative_path_with_tilde(self):
|
||||
base_file = config.ConfigFile(
|
||||
'base.yaml', {
|
||||
'version': '3.4',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'volumes': [
|
||||
{'type': 'bind', 'source': '~/web', 'target': '/web'},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
details = config.ConfigDetails('.', [base_file])
|
||||
config_data = config.load(details)
|
||||
mount = config_data.services[0].get('volumes')[0]
|
||||
assert mount.target == '/web'
|
||||
assert mount.type == 'bind'
|
||||
assert (
|
||||
not mount.source.startswith('~') and mount.source.endswith(
|
||||
'{}web'.format(os.path.sep)
|
||||
)
|
||||
)
|
||||
|
||||
def test_config_invalid_ipam_config(self):
|
||||
with pytest.raises(ConfigurationError) as excinfo:
|
||||
config.load(
|
||||
@@ -3045,6 +3106,41 @@ class ConfigTest(unittest.TestCase):
|
||||
)
|
||||
config.load(config_details)
|
||||
|
||||
def test_config_duplicate_mount_points(self):
|
||||
config1 = build_config_details(
|
||||
{
|
||||
'version': '3.5',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/tmp/foo:/tmp/foo', '/tmp/foo:/tmp/foo:rw']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
config2 = build_config_details(
|
||||
{
|
||||
'version': '3.5',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/x:/y', '/z:/y']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
with self.assertRaises(ConfigurationError) as e:
|
||||
config.load(config1)
|
||||
self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
|
||||
', '.join(['/tmp/foo:/tmp/foo:rw']*2)))
|
||||
|
||||
with self.assertRaises(ConfigurationError) as e:
|
||||
config.load(config2)
|
||||
self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
|
||||
', '.join(['/x:/y:rw', '/z:/y:rw'])))
|
||||
|
||||
|
||||
class NetworkModeTest(unittest.TestCase):
|
||||
|
||||
@@ -3497,6 +3593,9 @@ class InterpolationTest(unittest.TestCase):
|
||||
'reservations': {'memory': '100M'},
|
||||
},
|
||||
},
|
||||
'credential_spec': {
|
||||
'file': 'spec.json'
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -3514,7 +3613,8 @@ class InterpolationTest(unittest.TestCase):
|
||||
'mem_limit': '300M',
|
||||
'mem_reservation': '100M',
|
||||
'cpus': 0.7,
|
||||
'name': 'foo'
|
||||
'name': 'foo',
|
||||
'security_opt': ['credentialspec=file://spec.json'],
|
||||
}
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
@@ -3817,8 +3917,95 @@ class MergePortsTest(unittest.TestCase, MergeListsTest):
|
||||
|
||||
class MergeNetworksTest(unittest.TestCase, MergeListsTest):
|
||||
config_name = 'networks'
|
||||
base_config = ['frontend', 'backend']
|
||||
override_config = ['monitoring']
|
||||
base_config = {'default': {'aliases': ['foo.bar', 'foo.baz']}}
|
||||
override_config = {'default': {'ipv4_address': '123.234.123.234'}}
|
||||
|
||||
def test_no_network_overrides(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: self.base_config},
|
||||
{self.config_name: self.override_config},
|
||||
DEFAULT_VERSION)
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
|
||||
def test_network_has_none_value(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
'default': None
|
||||
}},
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}},
|
||||
DEFAULT_VERSION)
|
||||
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}
|
||||
|
||||
def test_all_properties(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'link_local_ips': ['192.168.1.10', '192.168.1.11'],
|
||||
'ipv4_address': '111.111.111.111',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-first'
|
||||
}
|
||||
}},
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.baz', 'foo.baz2'],
|
||||
'link_local_ips': ['192.168.1.11', '192.168.1.12'],
|
||||
'ipv4_address': '123.234.123.234',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
|
||||
}
|
||||
}},
|
||||
DEFAULT_VERSION)
|
||||
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz', 'foo.baz2'],
|
||||
'link_local_ips': ['192.168.1.10', '192.168.1.11', '192.168.1.12'],
|
||||
'ipv4_address': '123.234.123.234',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
|
||||
}
|
||||
}
|
||||
|
||||
def test_no_network_name_overrides(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{
|
||||
self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
self.config_name: {
|
||||
'another_network': {
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
},
|
||||
DEFAULT_VERSION)
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
},
|
||||
'another_network': {
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class MergeStringsOrListsTest(unittest.TestCase):
|
||||
|
||||
@@ -9,6 +9,7 @@ import pytest
|
||||
|
||||
from compose.config.environment import env_vars_from_file
|
||||
from compose.config.environment import Environment
|
||||
from compose.config.errors import ConfigurationError
|
||||
from tests import unittest
|
||||
|
||||
|
||||
@@ -52,3 +53,12 @@ class EnvironmentTest(unittest.TestCase):
|
||||
assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
|
||||
'PARK_BOM': '박봄'
|
||||
}
|
||||
|
||||
def test_env_vars_from_file_whitespace(self):
|
||||
tmpdir = pytest.ensuretemp('env_file')
|
||||
self.addCleanup(tmpdir.remove)
|
||||
with codecs.open('{}/whitespace.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
|
||||
f.write('WHITESPACE =yes\n')
|
||||
with pytest.raises(ConfigurationError) as exc:
|
||||
env_vars_from_file(str(tmpdir.join('whitespace.env')))
|
||||
assert 'environment variable' in exc.exconly()
|
||||
|
||||
@@ -97,22 +97,24 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
tf.seek(0)
|
||||
assert tf.read() == '???'
|
||||
|
||||
def test_get_digest_from_push(self):
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"progressDetail": {}, "aux": {"Digest": digest}},
|
||||
]
|
||||
assert progress_stream.get_digest_from_push(events) == digest
|
||||
|
||||
def test_get_digest_from_push():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"progressDetail": {}, "aux": {"Digest": digest}},
|
||||
]
|
||||
assert progress_stream.get_digest_from_push(events) == digest
|
||||
def test_get_digest_from_pull(self):
|
||||
events = list()
|
||||
assert progress_stream.get_digest_from_pull(events) is None
|
||||
|
||||
|
||||
def test_get_digest_from_pull():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"status": "Digest: %s" % digest},
|
||||
]
|
||||
assert progress_stream.get_digest_from_pull(events) == digest
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"status": "Digest: %s" % digest},
|
||||
{"status": "..."},
|
||||
]
|
||||
assert progress_stream.get_digest_from_pull(events) == digest
|
||||
|
||||
@@ -254,9 +254,10 @@ class ProjectTest(unittest.TestCase):
|
||||
[container_ids[0] + ':rw']
|
||||
)
|
||||
|
||||
def test_events(self):
|
||||
def test_events_legacy(self):
|
||||
services = [Service(name='web'), Service(name='db')]
|
||||
project = Project('test', services, self.mock_client)
|
||||
self.mock_client.api_version = '1.21'
|
||||
self.mock_client.events.return_value = iter([
|
||||
{
|
||||
'status': 'create',
|
||||
@@ -362,6 +363,175 @@ class ProjectTest(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
|
||||
def test_events(self):
|
||||
services = [Service(name='web'), Service(name='db')]
|
||||
project = Project('test', services, self.mock_client)
|
||||
self.mock_client.api_version = '1.35'
|
||||
self.mock_client.events.return_value = iter([
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/image',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'abcde',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'web',
|
||||
'image': 'example/image',
|
||||
'name': 'test_web_1',
|
||||
}
|
||||
},
|
||||
'id': 'abcde',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000002000,
|
||||
},
|
||||
{
|
||||
'status': 'attach',
|
||||
'from': 'example/image',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'abcde',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'web',
|
||||
'image': 'example/image',
|
||||
'name': 'test_web_1',
|
||||
}
|
||||
},
|
||||
'id': 'abcde',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000003000,
|
||||
},
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/other',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'bdbdbd',
|
||||
'Attributes': {
|
||||
'image': 'example/other',
|
||||
'name': 'shrewd_einstein',
|
||||
}
|
||||
},
|
||||
'id': 'bdbdbd',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000005000,
|
||||
},
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/db',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'ababa',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'db',
|
||||
'image': 'example/db',
|
||||
'name': 'test_db_1',
|
||||
}
|
||||
},
|
||||
'id': 'ababa',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000004000,
|
||||
},
|
||||
{
|
||||
'status': 'destroy',
|
||||
'from': 'example/db',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'eeeee',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'db',
|
||||
'image': 'example/db',
|
||||
'name': 'test_db_1',
|
||||
}
|
||||
},
|
||||
'id': 'eeeee',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000004000,
|
||||
},
|
||||
])
|
||||
|
||||
def dt_with_microseconds(dt, us):
|
||||
return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
|
||||
|
||||
def get_container(cid):
|
||||
if cid == 'eeeee':
|
||||
raise NotFound(None, None, "oops")
|
||||
if cid == 'abcde':
|
||||
name = 'web'
|
||||
labels = {LABEL_SERVICE: name}
|
||||
elif cid == 'ababa':
|
||||
name = 'db'
|
||||
labels = {LABEL_SERVICE: name}
|
||||
else:
|
||||
labels = {}
|
||||
name = ''
|
||||
return {
|
||||
'Id': cid,
|
||||
'Config': {'Labels': labels},
|
||||
'Name': '/project_%s_1' % name,
|
||||
}
|
||||
|
||||
self.mock_client.inspect_container.side_effect = get_container
|
||||
|
||||
events = project.events()
|
||||
|
||||
events_list = list(events)
|
||||
# Assert the return value is a generator
|
||||
assert not list(events)
|
||||
assert events_list == [
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'web',
|
||||
'action': 'create',
|
||||
'id': 'abcde',
|
||||
'attributes': {
|
||||
'name': 'test_web_1',
|
||||
'image': 'example/image',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 2),
|
||||
'container': Container(None, get_container('abcde')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'web',
|
||||
'action': 'attach',
|
||||
'id': 'abcde',
|
||||
'attributes': {
|
||||
'name': 'test_web_1',
|
||||
'image': 'example/image',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 3),
|
||||
'container': Container(None, get_container('abcde')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'db',
|
||||
'action': 'create',
|
||||
'id': 'ababa',
|
||||
'attributes': {
|
||||
'name': 'test_db_1',
|
||||
'image': 'example/db',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 4),
|
||||
'container': Container(None, get_container('ababa')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'db',
|
||||
'action': 'destroy',
|
||||
'id': 'eeeee',
|
||||
'attributes': {
|
||||
'name': 'test_db_1',
|
||||
'image': 'example/db',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 4),
|
||||
'container': None,
|
||||
},
|
||||
]
|
||||
|
||||
def test_net_unset(self):
|
||||
project = Project.from_config(
|
||||
name='test',
|
||||
@@ -620,3 +790,23 @@ class ProjectTest(unittest.TestCase):
|
||||
self.mock_client.pull.side_effect = OperationFailedError(b'pull error')
|
||||
with pytest.raises(ProjectError):
|
||||
project.pull(parallel_pull=True)
|
||||
|
||||
def test_avoid_multiple_push(self):
|
||||
service_config_latest = {'image': 'busybox:latest', 'build': '.'}
|
||||
service_config_default = {'image': 'busybox', 'build': '.'}
|
||||
service_config_sha = {
|
||||
'image': 'busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d',
|
||||
'build': '.'
|
||||
}
|
||||
svc1 = Service('busy1', **service_config_latest)
|
||||
svc1_1 = Service('busy11', **service_config_latest)
|
||||
svc2 = Service('busy2', **service_config_default)
|
||||
svc2_1 = Service('busy21', **service_config_default)
|
||||
svc3 = Service('busy3', **service_config_sha)
|
||||
svc3_1 = Service('busy31', **service_config_sha)
|
||||
project = Project(
|
||||
'composetest', [svc1, svc1_1, svc2, svc2_1, svc3, svc3_1], self.mock_client
|
||||
)
|
||||
with mock.patch('compose.service.Service.push') as fake_push:
|
||||
project.push()
|
||||
assert fake_push.call_count == 2
|
||||
|
||||
@@ -5,6 +5,7 @@ import docker
|
||||
import pytest
|
||||
from docker.constants import DEFAULT_DOCKER_API_VERSION
|
||||
from docker.errors import APIError
|
||||
from docker.errors import ImageNotFound
|
||||
from docker.errors import NotFound
|
||||
|
||||
from .. import mock
|
||||
@@ -755,6 +756,13 @@ class ServiceTest(unittest.TestCase):
|
||||
mock_log.error.assert_called_once_with(
|
||||
"Failed to remove image for service %s: %s", web.name, error)
|
||||
|
||||
def test_remove_non_existing_image(self):
|
||||
self.mock_client.remove_image.side_effect = ImageNotFound('image not found')
|
||||
web = Service('web', image='example', client=self.mock_client)
|
||||
with mock.patch('compose.service.log', autospec=True) as mock_log:
|
||||
assert not web.remove_image(ImageType.all)
|
||||
mock_log.warning.assert_called_once_with("Image %s not found.", web.image_name)
|
||||
|
||||
def test_specifies_host_port_with_no_ports(self):
|
||||
service = Service(
|
||||
'foo',
|
||||
|
||||
Reference in New Issue
Block a user