mirror of
https://github.com/docker/compose.git
synced 2026-02-13 03:59:29 +08:00
Compare commits
242 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
94f7016fb7 | ||
|
|
f3628c7a5e | ||
|
|
e115eaf6fc | ||
|
|
1bf0cd07de | ||
|
|
cefa239c2e | ||
|
|
6f3e4bbc6c | ||
|
|
87b6b3c139 | ||
|
|
1fb5c4b15a | ||
|
|
35ed189981 | ||
|
|
c392acc56b | ||
|
|
cd267d5121 | ||
|
|
cb076a57b9 | ||
|
|
6246a2592e | ||
|
|
606358cfb7 | ||
|
|
0488dd3709 | ||
|
|
b72f911ccf | ||
|
|
f9c5816ab8 | ||
|
|
2ecbf25445 | ||
|
|
8f842d55d7 | ||
|
|
ad19ff6c67 | ||
|
|
4fb7033d9c | ||
|
|
e1b7510e4a | ||
|
|
f7853a30bd | ||
|
|
3b4a53c959 | ||
|
|
7f54850b4a | ||
|
|
c72c966abc | ||
|
|
801167d271 | ||
|
|
408e4719e1 | ||
|
|
dbf40d8244 | ||
|
|
6021237a69 | ||
|
|
e4159cfd42 | ||
|
|
d8ec9c1572 | ||
|
|
244b303625 | ||
|
|
217f762a60 | ||
|
|
70da16103a | ||
|
|
6b71645ed7 | ||
|
|
05bf9a054a | ||
|
|
6e3d82eea6 | ||
|
|
21d114b879 | ||
|
|
72849d99c0 | ||
|
|
85e3ad2655 | ||
|
|
a3e30c3eed | ||
|
|
6fd77fa698 | ||
|
|
a7fc3e2220 | ||
|
|
db02c9f537 | ||
|
|
33cc601176 | ||
|
|
44e82edc5f | ||
|
|
53341b82f9 | ||
|
|
0de9a1b388 | ||
|
|
9bf6bc6dbd | ||
|
|
80afbd3961 | ||
|
|
de374d845e | ||
|
|
0fe82614a6 | ||
|
|
8f8c17bf66 | ||
|
|
9b7bd69cfc | ||
|
|
1ea9dda1d3 | ||
|
|
21f20cbc9b | ||
|
|
2123906586 | ||
|
|
59e96fea4f | ||
|
|
60f7e021ad | ||
|
|
61324ef308 | ||
|
|
e502417df2 | ||
|
|
6e2d1eb80e | ||
|
|
ea640f3821 | ||
|
|
427ec899df | ||
|
|
e7a8b2fed5 | ||
|
|
17b219454f | ||
|
|
dd3590180d | ||
|
|
a67ba5536d | ||
|
|
90fba58df9 | ||
|
|
c148849f0e | ||
|
|
1298b9aa5d | ||
|
|
e3e8a619cc | ||
|
|
c46737ed02 | ||
|
|
86b5ed1a84 | ||
|
|
4bb80c25d3 | ||
|
|
048360d1ed | ||
|
|
a34cd5ed54 | ||
|
|
c4229b469a | ||
|
|
842e372258 | ||
|
|
33bed5c706 | ||
|
|
7763122ecb | ||
|
|
2b5b665d3a | ||
|
|
9a39208741 | ||
|
|
ce8df9e789 | ||
|
|
0c8aeb9e05 | ||
|
|
db0a6cf2bb | ||
|
|
844b7d463f | ||
|
|
ca3aef0c84 | ||
|
|
e5645595e3 | ||
|
|
c9fe8920c9 | ||
|
|
4bf5271ae2 | ||
|
|
72f6a5c8d3 | ||
|
|
dc88e54010 | ||
|
|
4b01f6dcd6 | ||
|
|
f1603a3ee2 | ||
|
|
5fa81c4044 | ||
|
|
6c29830127 | ||
|
|
0a9ab358bf | ||
|
|
3c424b709e | ||
|
|
47a40d42c7 | ||
|
|
f316b448c2 | ||
|
|
6bfdde6855 | ||
|
|
2a08d4731e | ||
|
|
11d8093fc8 | ||
|
|
d0b46ca9b2 | ||
|
|
b7f9fc4b28 | ||
|
|
70a605acac | ||
|
|
85b85bc675 | ||
|
|
b334b6f059 | ||
|
|
0c1c338a02 | ||
|
|
f655a8af95 | ||
|
|
f7cd94d4a9 | ||
|
|
e4d2d7ed8a | ||
|
|
2a8c2c8ad6 | ||
|
|
5852db4d72 | ||
|
|
250a7a530b | ||
|
|
4e8b017283 | ||
|
|
a86a195c50 | ||
|
|
3368887a29 | ||
|
|
e5f1429ce1 | ||
|
|
65b0e5973b | ||
|
|
9cf483e224 | ||
|
|
1e164ca802 | ||
|
|
a2ded237e4 | ||
|
|
8a9ab69a1c | ||
|
|
9cfbfd55c4 | ||
|
|
d41e6e00fa | ||
|
|
3b7191f246 | ||
|
|
3bf75b7330 | ||
|
|
c1c8c70800 | ||
|
|
94bcbd1fb6 | ||
|
|
3e11a95056 | ||
|
|
310b3d9441 | ||
|
|
28fb91b344 | ||
|
|
c41f30c3ff | ||
|
|
fe17e0f948 | ||
|
|
bd7ec24e25 | ||
|
|
e4bb678875 | ||
|
|
84aa39e978 | ||
|
|
8cc7d68a00 | ||
|
|
0b24883cef | ||
|
|
2efcec776c | ||
|
|
61794ba97c | ||
|
|
87ee38ed2c | ||
|
|
7ad7eb71ca | ||
|
|
d3e645488a | ||
|
|
756ef14edc | ||
|
|
6064d200f9 | ||
|
|
84a3e2fe79 | ||
|
|
a4d3dd6197 | ||
|
|
0cd35913c3 | ||
|
|
26fe8213aa | ||
|
|
ac82597ac1 | ||
|
|
1988dfeaf0 | ||
|
|
da1d603463 | ||
|
|
75bcc382d9 | ||
|
|
b67f110620 | ||
|
|
27628f8655 | ||
|
|
55fcd1c3e3 | ||
|
|
a0aea42f75 | ||
|
|
6ff3c47630 | ||
|
|
8a34ee0eaa | ||
|
|
ba10f1cd55 | ||
|
|
cafe1315b2 | ||
|
|
984f839d33 | ||
|
|
d21e1c5a30 | ||
|
|
52fa010ac7 | ||
|
|
d4bebbb1ba | ||
|
|
6d2805917c | ||
|
|
377be5aa1f | ||
|
|
68272b0216 | ||
|
|
56c6e29819 | ||
|
|
4702703615 | ||
|
|
ad306f0479 | ||
|
|
e1356e1f6f | ||
|
|
abb5ae7fe4 | ||
|
|
e71c62b8d1 | ||
|
|
e2cb7b0237 | ||
|
|
aebb3d5d0a | ||
|
|
50287722f2 | ||
|
|
e8da6cb631 | ||
|
|
339ebc0483 | ||
|
|
ae46bf8907 | ||
|
|
276738f733 | ||
|
|
f10bc8072e | ||
|
|
7781f62ddf | ||
|
|
3d3f331404 | ||
|
|
d05feb1a4d | ||
|
|
7cfb5e7bc9 | ||
|
|
3722bb38c6 | ||
|
|
15c5bc2e6c | ||
|
|
0671b8b8c3 | ||
|
|
0e3db185cf | ||
|
|
97467c7dec | ||
|
|
4192a009da | ||
|
|
80614cff9b | ||
|
|
d4e9a3b6b1 | ||
|
|
3ca8858897 | ||
|
|
83df95d511 | ||
|
|
e5443717fb | ||
|
|
91a545813a | ||
|
|
be27e266da | ||
|
|
5450a67c2d | ||
|
|
54b6fc4219 | ||
|
|
ffab27c049 | ||
|
|
3720b50c3b | ||
|
|
af9526fb82 | ||
|
|
141b96bb31 | ||
|
|
bcdf541c8c | ||
|
|
f5b80640fe | ||
|
|
9f47e43b5c | ||
|
|
5d0aab4a8e | ||
|
|
3ef6b17bfc | ||
|
|
958f96c78a | ||
|
|
b33d7b3dd8 | ||
|
|
129fb5b356 | ||
|
|
86530287d6 | ||
|
|
c1026e815a | ||
|
|
c72d62f96d | ||
|
|
a63a05964b | ||
|
|
9509508f3e | ||
|
|
1a7a65f84d | ||
|
|
3034803258 | ||
|
|
78a8be07ad | ||
|
|
d27b82207c | ||
|
|
71c86acaa4 | ||
|
|
85e2fb63b3 | ||
|
|
024a810617 | ||
|
|
668d45c7cc | ||
|
|
09ea74245d | ||
|
|
386edd892c | ||
|
|
7116aefe43 | ||
|
|
93901ec480 | ||
|
|
8ae8f7ed4b | ||
|
|
9729c0d3c7 | ||
|
|
9d58b19ecc | ||
|
|
63b448120a | ||
|
|
0f1fb42326 | ||
|
|
a53b29467a | ||
|
|
7fc40dd7cc | ||
|
|
000eaee16a |
103
CHANGELOG.md
103
CHANGELOG.md
@@ -1,6 +1,109 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.8.0 (2016-06-14)
|
||||
-----------------
|
||||
|
||||
**Breaking Changes**
|
||||
|
||||
- As announced in 1.7.0, `docker-compose rm` now removes containers
|
||||
created by `docker-compose run` by default.
|
||||
|
||||
- Setting `entrypoint` on a service now empties out any default
|
||||
command that was set on the image (i.e. any `CMD` instruction in the
|
||||
Dockerfile used to build it). This makes it consistent with
|
||||
the `--entrypoint` flag to `docker run`.
|
||||
|
||||
New Features
|
||||
|
||||
- Added `docker-compose bundle`, a command that builds a bundle file
|
||||
to be consumed by the new *Docker Stack* commands in Docker 1.12.
|
||||
|
||||
- Added `docker-compose push`, a command that pushes service images
|
||||
to a registry.
|
||||
|
||||
- Compose now supports specifying a custom TLS version for
|
||||
interaction with the Docker Engine using the `COMPOSE_TLS_VERSION`
|
||||
environment variable.
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed a bug where Compose would erroneously try to read `.env`
|
||||
at the project's root when it is a directory.
|
||||
|
||||
- `docker-compose run -e VAR` now passes `VAR` through from the shell
|
||||
to the container, as with `docker run -e VAR`.
|
||||
|
||||
- Improved config merging when multiple compose files are involved
|
||||
for several service sub-keys.
|
||||
|
||||
- Fixed a bug where volume mappings containing Windows drives would
|
||||
sometimes be parsed incorrectly.
|
||||
|
||||
- Fixed a bug in Windows environment where volume mappings of the
|
||||
host's root directory would be parsed incorrectly.
|
||||
|
||||
- Fixed a bug where `docker-compose config` would ouput an invalid
|
||||
Compose file if external networks were specified.
|
||||
|
||||
- Fixed an issue where unset buildargs would be assigned a string
|
||||
containing `'None'` instead of the expected empty value.
|
||||
|
||||
- Fixed a bug where yes/no prompts on Windows would not show before
|
||||
receiving input.
|
||||
|
||||
- Fixed a bug where trying to `docker-compose exec` on Windows
|
||||
without the `-d` option would exit with a stacktrace. This will
|
||||
still fail for the time being, but should do so gracefully.
|
||||
|
||||
- Fixed a bug where errors during `docker-compose up` would show
|
||||
an unrelated stacktrace at the end of the process.
|
||||
|
||||
- `docker-compose create` and `docker-compose start` show more
|
||||
descriptive error messages when something goes wrong.
|
||||
|
||||
|
||||
1.7.1 (2016-05-04)
|
||||
-----------------
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed a bug where the output of `docker-compose config` for v1 files
|
||||
would be an invalid configuration file.
|
||||
|
||||
- Fixed a bug where `docker-compose config` would not check the validity
|
||||
of links.
|
||||
|
||||
- Fixed an issue where `docker-compose help` would not output a list of
|
||||
available commands and generic options as expected.
|
||||
|
||||
- Fixed an issue where filtering by service when using `docker-compose logs`
|
||||
would not apply for newly created services.
|
||||
|
||||
- Fixed a bug where unchanged services would sometimes be recreated in
|
||||
in the up phase when using Compose with Python 3.
|
||||
|
||||
- Fixed an issue where API errors encountered during the up phase would
|
||||
not be recognized as a failure state by Compose.
|
||||
|
||||
- Fixed a bug where Compose would raise a NameError because of an undefined
|
||||
exception name on non-Windows platforms.
|
||||
|
||||
- Fixed a bug where the wrong version of `docker-py` would sometimes be
|
||||
installed alongside Compose.
|
||||
|
||||
- Fixed a bug where the host value output by `docker-machine config default`
|
||||
would not be recognized as valid options by the `docker-compose`
|
||||
command line.
|
||||
|
||||
- Fixed an issue where Compose would sometimes exit unexpectedly while
|
||||
reading events broadcasted by a Swarm cluster.
|
||||
|
||||
- Corrected a statement in the docs about the location of the `.env` file,
|
||||
which is indeed read from the current directory, instead of in the same
|
||||
location as the Compose file.
|
||||
|
||||
|
||||
1.7.0 (2016-04-13)
|
||||
------------------
|
||||
|
||||
|
||||
@@ -49,11 +49,11 @@ RUN set -ex; \
|
||||
|
||||
# Install pip
|
||||
RUN set -ex; \
|
||||
curl -L https://pypi.python.org/packages/source/p/pip/pip-7.0.1.tar.gz | tar -xz; \
|
||||
cd pip-7.0.1; \
|
||||
curl -L https://pypi.python.org/packages/source/p/pip/pip-8.1.1.tar.gz | tar -xz; \
|
||||
cd pip-8.1.1; \
|
||||
python setup.py install; \
|
||||
cd ..; \
|
||||
rm -rf pip-7.0.1
|
||||
rm -rf pip-8.1.1
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
FROM alpine:edge
|
||||
FROM alpine:3.4
|
||||
RUN apk -U add \
|
||||
python \
|
||||
py-pip
|
||||
|
||||
21
README.md
21
README.md
@@ -22,16 +22,17 @@ they can be run together in an isolated environment:
|
||||
|
||||
A `docker-compose.yml` looks like this:
|
||||
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
links:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](https://github.com/docker/compose/blob/release/docs/compose-file.md)
|
||||
|
||||
19
ROADMAP.md
19
ROADMAP.md
@@ -1,13 +1,21 @@
|
||||
# Roadmap
|
||||
|
||||
## An even better tool for development environments
|
||||
|
||||
Compose is a great tool for development environments, but it could be even better. For example:
|
||||
|
||||
- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
|
||||
|
||||
## More than just development environments
|
||||
|
||||
Over time we will extend Compose's remit to cover test, staging and production environments. This is not a simple task, and will take many incremental improvements such as:
|
||||
Compose currently works really well in development, but we want to make the Compose file format better for test, staging, and production environments. To support these use cases, there will need to be improvements to the file format, improvements to the command-line tool, integrations with other tools, and perhaps new tools altogether.
|
||||
|
||||
Some specific things we are considering:
|
||||
|
||||
- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
|
||||
- It should roll back to a known good state if it fails.
|
||||
- It should allow a user to check the actions it is about to perform before running them.
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#1377](https://github.com/docker/compose/issues/1377))
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
|
||||
- Compose should recommend a technique for zero-downtime deploys.
|
||||
- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
|
||||
|
||||
@@ -22,10 +30,3 @@ The current state of integration is documented in [SWARM.md](SWARM.md).
|
||||
Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well.
|
||||
|
||||
There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318).
|
||||
|
||||
## An even better tool for development environments
|
||||
|
||||
Compose is a great tool for development environments, but it could be even better. For example:
|
||||
|
||||
- [Compose could watch your code and automatically kick off builds when something changes.](https://github.com/docker/fig/issues/184)
|
||||
- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.7.0'
|
||||
__version__ = '1.8.0'
|
||||
|
||||
257
compose/bundle.py
Normal file
257
compose/bundle.py
Normal file
@@ -0,0 +1,257 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
import six
|
||||
from docker.utils import split_command
|
||||
from docker.utils.ports import split_port
|
||||
|
||||
from .cli.errors import UserError
|
||||
from .config.serialize import denormalize_config
|
||||
from .network import get_network_defs_for_service
|
||||
from .service import format_environment
|
||||
from .service import NoSuchImageError
|
||||
from .service import parse_repository_tag
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SERVICE_KEYS = {
|
||||
'working_dir': 'WorkingDir',
|
||||
'user': 'User',
|
||||
'labels': 'Labels',
|
||||
}
|
||||
|
||||
IGNORED_KEYS = {'build'}
|
||||
|
||||
SUPPORTED_KEYS = {
|
||||
'image',
|
||||
'ports',
|
||||
'expose',
|
||||
'networks',
|
||||
'command',
|
||||
'environment',
|
||||
'entrypoint',
|
||||
} | set(SERVICE_KEYS)
|
||||
|
||||
VERSION = '0.1'
|
||||
|
||||
|
||||
class NeedsPush(Exception):
|
||||
def __init__(self, image_name):
|
||||
self.image_name = image_name
|
||||
|
||||
|
||||
class NeedsPull(Exception):
|
||||
def __init__(self, image_name):
|
||||
self.image_name = image_name
|
||||
|
||||
|
||||
class MissingDigests(Exception):
|
||||
def __init__(self, needs_push, needs_pull):
|
||||
self.needs_push = needs_push
|
||||
self.needs_pull = needs_pull
|
||||
|
||||
|
||||
def serialize_bundle(config, image_digests):
|
||||
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def get_image_digests(project, allow_push=False):
|
||||
digests = {}
|
||||
needs_push = set()
|
||||
needs_pull = set()
|
||||
|
||||
for service in project.services:
|
||||
try:
|
||||
digests[service.name] = get_image_digest(
|
||||
service,
|
||||
allow_push=allow_push,
|
||||
)
|
||||
except NeedsPush as e:
|
||||
needs_push.add(e.image_name)
|
||||
except NeedsPull as e:
|
||||
needs_pull.add(e.image_name)
|
||||
|
||||
if needs_push or needs_pull:
|
||||
raise MissingDigests(needs_push, needs_pull)
|
||||
|
||||
return digests
|
||||
|
||||
|
||||
def get_image_digest(service, allow_push=False):
|
||||
if 'image' not in service.options:
|
||||
raise UserError(
|
||||
"Service '{s.name}' doesn't define an image tag. An image name is "
|
||||
"required to generate a proper image digest for the bundle. Specify "
|
||||
"an image repo and tag with the 'image' option.".format(s=service))
|
||||
|
||||
_, _, separator = parse_repository_tag(service.options['image'])
|
||||
# Compose file already uses a digest, no lookup required
|
||||
if separator == '@':
|
||||
return service.options['image']
|
||||
|
||||
try:
|
||||
image = service.image()
|
||||
except NoSuchImageError:
|
||||
action = 'build' if 'build' in service.options else 'pull'
|
||||
raise UserError(
|
||||
"Image not found for service '{service}'. "
|
||||
"You might need to run `docker-compose {action} {service}`."
|
||||
.format(service=service.name, action=action))
|
||||
|
||||
if image['RepoDigests']:
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
return image['RepoDigests'][0]
|
||||
|
||||
if 'build' not in service.options:
|
||||
raise NeedsPull(service.image_name)
|
||||
|
||||
if not allow_push:
|
||||
raise NeedsPush(service.image_name)
|
||||
|
||||
return push_image(service)
|
||||
|
||||
|
||||
def push_image(service):
|
||||
try:
|
||||
digest = service.push()
|
||||
except:
|
||||
log.error(
|
||||
"Failed to push image for service '{s.name}'. Please use an "
|
||||
"image tag that can be pushed to a Docker "
|
||||
"registry.".format(s=service))
|
||||
raise
|
||||
|
||||
if not digest:
|
||||
raise ValueError("Failed to get digest for %s" % service.name)
|
||||
|
||||
repo, _, _ = parse_repository_tag(service.options['image'])
|
||||
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
|
||||
|
||||
# only do this if RepoDigests isn't already populated
|
||||
image = service.image()
|
||||
if not image['RepoDigests']:
|
||||
# Pull by digest so that image['RepoDigests'] is populated for next time
|
||||
# and we don't have to pull/push again
|
||||
service.client.pull(identifier)
|
||||
log.info("Stored digest for {}".format(service.image_name))
|
||||
|
||||
return identifier
|
||||
|
||||
|
||||
def to_bundle(config, image_digests):
|
||||
if config.networks:
|
||||
log.warn("Unsupported top level key 'networks' - ignoring")
|
||||
|
||||
if config.volumes:
|
||||
log.warn("Unsupported top level key 'volumes' - ignoring")
|
||||
|
||||
config = denormalize_config(config)
|
||||
|
||||
return {
|
||||
'Version': VERSION,
|
||||
'Services': {
|
||||
name: convert_service_to_bundle(
|
||||
name,
|
||||
service_dict,
|
||||
image_digests[name],
|
||||
)
|
||||
for name, service_dict in config['services'].items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def convert_service_to_bundle(name, service_dict, image_digest):
|
||||
container_config = {'Image': image_digest}
|
||||
|
||||
for key, value in service_dict.items():
|
||||
if key in IGNORED_KEYS:
|
||||
continue
|
||||
|
||||
if key not in SUPPORTED_KEYS:
|
||||
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
|
||||
continue
|
||||
|
||||
if key == 'environment':
|
||||
container_config['Env'] = format_environment({
|
||||
envkey: envvalue for envkey, envvalue in value.items()
|
||||
if envvalue
|
||||
})
|
||||
continue
|
||||
|
||||
if key in SERVICE_KEYS:
|
||||
container_config[SERVICE_KEYS[key]] = value
|
||||
continue
|
||||
|
||||
set_command_and_args(
|
||||
container_config,
|
||||
service_dict.get('entrypoint', []),
|
||||
service_dict.get('command', []))
|
||||
container_config['Networks'] = make_service_networks(name, service_dict)
|
||||
|
||||
ports = make_port_specs(service_dict)
|
||||
if ports:
|
||||
container_config['Ports'] = ports
|
||||
|
||||
return container_config
|
||||
|
||||
|
||||
# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
|
||||
def set_command_and_args(config, entrypoint, command):
|
||||
if isinstance(entrypoint, six.string_types):
|
||||
entrypoint = split_command(entrypoint)
|
||||
if isinstance(command, six.string_types):
|
||||
command = split_command(command)
|
||||
|
||||
if entrypoint:
|
||||
config['Command'] = entrypoint + command
|
||||
return
|
||||
|
||||
if command:
|
||||
config['Args'] = command
|
||||
|
||||
|
||||
def make_service_networks(name, service_dict):
|
||||
networks = []
|
||||
|
||||
for network_name, network_def in get_network_defs_for_service(service_dict).items():
|
||||
for key in network_def.keys():
|
||||
log.warn(
|
||||
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
|
||||
.format(key, name, network_name))
|
||||
|
||||
networks.append(network_name)
|
||||
|
||||
return networks
|
||||
|
||||
|
||||
def make_port_specs(service_dict):
|
||||
ports = []
|
||||
|
||||
internal_ports = [
|
||||
internal_port
|
||||
for port_def in service_dict.get('ports', [])
|
||||
for internal_port in split_port(port_def)[0]
|
||||
]
|
||||
|
||||
internal_ports += service_dict.get('expose', [])
|
||||
|
||||
for internal_port in internal_ports:
|
||||
spec = make_port_spec(internal_port)
|
||||
if spec not in ports:
|
||||
ports.append(spec)
|
||||
|
||||
return ports
|
||||
|
||||
|
||||
def make_port_spec(value):
|
||||
components = six.text_type(value).partition('/')
|
||||
return {
|
||||
'Protocol': components[2] or 'tcp',
|
||||
'Port': int(components[0]),
|
||||
}
|
||||
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import ssl
|
||||
|
||||
import six
|
||||
|
||||
@@ -21,17 +22,30 @@ log = logging.getLogger(__name__)
|
||||
|
||||
def project_from_options(project_dir, options):
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
host = options.get('--host')
|
||||
if host is not None:
|
||||
host = host.lstrip('=')
|
||||
return get_project(
|
||||
project_dir,
|
||||
get_config_path_from_options(project_dir, options, environment),
|
||||
project_name=options.get('--project-name'),
|
||||
verbose=options.get('--verbose'),
|
||||
host=options.get('--host'),
|
||||
host=host,
|
||||
tls_config=tls_config_from_options(options),
|
||||
environment=environment
|
||||
)
|
||||
|
||||
|
||||
def get_config_from_options(base_dir, options):
|
||||
environment = Environment.from_env_file(base_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
base_dir, options, environment
|
||||
)
|
||||
return config.load(
|
||||
config.find(base_dir, config_path, environment)
|
||||
)
|
||||
|
||||
|
||||
def get_config_path_from_options(base_dir, options, environment):
|
||||
file_option = options.get('--file')
|
||||
if file_option:
|
||||
@@ -43,10 +57,29 @@ def get_config_path_from_options(base_dir, options, environment):
|
||||
return None
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, tls_config=None, host=None):
|
||||
def get_tls_version(environment):
|
||||
compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
|
||||
if not compose_tls_version:
|
||||
return None
|
||||
|
||||
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
|
||||
if not hasattr(ssl, tls_attr_name):
|
||||
log.warn(
|
||||
'The "{}" protocol is unavailable. You may need to update your '
|
||||
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
|
||||
.format(compose_tls_version)
|
||||
)
|
||||
return None
|
||||
|
||||
return getattr(ssl, tls_attr_name)
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
|
||||
client = docker_client(
|
||||
version=version, tls_config=tls_config, host=host,
|
||||
environment=environment
|
||||
environment=environment, tls_version=get_tls_version(environment)
|
||||
)
|
||||
if verbose:
|
||||
version_info = six.iteritems(client.version())
|
||||
@@ -71,6 +104,7 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
api_version = environment.get(
|
||||
'COMPOSE_API_VERSION',
|
||||
API_VERSIONS[config_data.version])
|
||||
|
||||
client = get_client(
|
||||
verbose=verbose, version=api_version, tls_config=tls_config,
|
||||
host=host, environment=environment
|
||||
|
||||
@@ -10,6 +10,7 @@ from docker.utils import kwargs_from_env
|
||||
|
||||
from ..const import HTTP_TIMEOUT
|
||||
from .errors import UserError
|
||||
from .utils import generate_user_agent
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -39,17 +40,14 @@ def tls_config_from_options(options):
|
||||
return None
|
||||
|
||||
|
||||
def docker_client(environment, version=None, tls_config=None, host=None):
|
||||
def docker_client(environment, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
"""
|
||||
Returns a docker-py client configured using environment variables
|
||||
according to the same logic as the official Docker client.
|
||||
"""
|
||||
if 'DOCKER_CLIENT_TIMEOUT' in environment:
|
||||
log.warn("The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. "
|
||||
"Please use COMPOSE_HTTP_TIMEOUT instead.")
|
||||
|
||||
try:
|
||||
kwargs = kwargs_from_env(environment=environment)
|
||||
kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
|
||||
except TLSParameterError:
|
||||
raise UserError(
|
||||
"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
|
||||
@@ -70,4 +68,6 @@ def docker_client(environment, version=None, tls_config=None, host=None):
|
||||
else:
|
||||
kwargs['timeout'] = HTTP_TIMEOUT
|
||||
|
||||
kwargs['user_agent'] = generate_user_agent()
|
||||
|
||||
return Client(**kwargs)
|
||||
|
||||
@@ -13,8 +13,8 @@ from requests.exceptions import SSLError
|
||||
from requests.packages.urllib3.exceptions import ReadTimeoutError
|
||||
|
||||
from ..const import API_VERSION_TO_ENGINE_VERSION
|
||||
from ..const import HTTP_TIMEOUT
|
||||
from .utils import call_silently
|
||||
from .utils import is_docker_for_mac_installed
|
||||
from .utils import is_mac
|
||||
from .utils import is_ubuntu
|
||||
|
||||
@@ -46,18 +46,9 @@ def handle_connection_errors(client):
|
||||
raise ConnectionError()
|
||||
except RequestsConnectionError as e:
|
||||
if e.args and isinstance(e.args[0], ReadTimeoutError):
|
||||
log_timeout_error()
|
||||
log_timeout_error(client.timeout)
|
||||
raise ConnectionError()
|
||||
|
||||
if call_silently(['which', 'docker']) != 0:
|
||||
if is_mac():
|
||||
exit_with_error(docker_not_found_mac)
|
||||
if is_ubuntu():
|
||||
exit_with_error(docker_not_found_ubuntu)
|
||||
exit_with_error(docker_not_found_generic)
|
||||
if call_silently(['which', 'docker-machine']) == 0:
|
||||
exit_with_error(conn_error_docker_machine)
|
||||
exit_with_error(conn_error_generic.format(url=client.base_url))
|
||||
exit_with_error(get_conn_error_message(client.base_url))
|
||||
except APIError as e:
|
||||
log_api_error(e, client.api_version)
|
||||
raise ConnectionError()
|
||||
@@ -66,13 +57,13 @@ def handle_connection_errors(client):
|
||||
raise ConnectionError()
|
||||
|
||||
|
||||
def log_timeout_error():
|
||||
def log_timeout_error(timeout):
|
||||
log.error(
|
||||
"An HTTP request took too long to complete. Retry with --verbose to "
|
||||
"obtain debug information.\n"
|
||||
"If you encounter this issue regularly because of slow network "
|
||||
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
|
||||
"value (current value: %s)." % HTTP_TIMEOUT)
|
||||
"value (current value: %s)." % timeout)
|
||||
|
||||
|
||||
def log_api_error(e, client_version):
|
||||
@@ -97,6 +88,20 @@ def exit_with_error(msg):
|
||||
raise ConnectionError()
|
||||
|
||||
|
||||
def get_conn_error_message(url):
|
||||
if call_silently(['which', 'docker']) != 0:
|
||||
if is_mac():
|
||||
return docker_not_found_mac
|
||||
if is_ubuntu():
|
||||
return docker_not_found_ubuntu
|
||||
return docker_not_found_generic
|
||||
if is_docker_for_mac_installed():
|
||||
return conn_error_docker_for_mac
|
||||
if call_silently(['which', 'docker-machine']) == 0:
|
||||
return conn_error_docker_machine
|
||||
return conn_error_generic.format(url=url)
|
||||
|
||||
|
||||
docker_not_found_mac = """
|
||||
Couldn't connect to Docker daemon. You might need to install Docker:
|
||||
|
||||
@@ -122,6 +127,10 @@ conn_error_docker_machine = """
|
||||
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
|
||||
"""
|
||||
|
||||
conn_error_docker_for_mac = """
|
||||
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
|
||||
"""
|
||||
|
||||
|
||||
conn_error_generic = """
|
||||
Couldn't connect to Docker daemon at {url} - is it running?
|
||||
|
||||
@@ -14,7 +14,9 @@ from operator import attrgetter
|
||||
from . import errors
|
||||
from . import signals
|
||||
from .. import __version__
|
||||
from ..config import config
|
||||
from ..bundle import get_image_digests
|
||||
from ..bundle import MissingDigests
|
||||
from ..bundle import serialize_bundle
|
||||
from ..config import ConfigurationError
|
||||
from ..config import parse_environment
|
||||
from ..config.environment import Environment
|
||||
@@ -24,12 +26,14 @@ from ..const import IS_WINDOWS_PLATFORM
|
||||
from ..progress_stream import StreamOutputError
|
||||
from ..project import NoSuchService
|
||||
from ..project import OneOffFilter
|
||||
from ..project import ProjectError
|
||||
from ..service import BuildAction
|
||||
from ..service import BuildError
|
||||
from ..service import ConvergenceStrategy
|
||||
from ..service import ImageType
|
||||
from ..service import NeedsBuildError
|
||||
from .command import get_config_path_from_options
|
||||
from ..service import OperationFailedError
|
||||
from .command import get_config_from_options
|
||||
from .command import project_from_options
|
||||
from .docopt_command import DocoptDispatcher
|
||||
from .docopt_command import get_handler
|
||||
@@ -58,7 +62,8 @@ def main():
|
||||
except (KeyboardInterrupt, signals.ShutdownException):
|
||||
log.error("Aborting.")
|
||||
sys.exit(1)
|
||||
except (UserError, NoSuchService, ConfigurationError) as e:
|
||||
except (UserError, NoSuchService, ConfigurationError,
|
||||
ProjectError, OperationFailedError) as e:
|
||||
log.error(e.msg)
|
||||
sys.exit(1)
|
||||
except BuildError as e:
|
||||
@@ -97,7 +102,7 @@ def perform_command(options, handler, command_options):
|
||||
handler(command_options)
|
||||
return
|
||||
|
||||
if options['COMMAND'] == 'config':
|
||||
if options['COMMAND'] in ('config', 'bundle'):
|
||||
command = TopLevelCommand(None)
|
||||
handler(command, options, command_options)
|
||||
return
|
||||
@@ -142,7 +147,7 @@ class TopLevelCommand(object):
|
||||
"""Define and run multi-container applications with Docker.
|
||||
|
||||
Usage:
|
||||
docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
|
||||
docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
|
||||
docker-compose -h|--help
|
||||
|
||||
Options:
|
||||
@@ -163,6 +168,7 @@ class TopLevelCommand(object):
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
bundle Generate a Docker bundle from the Compose file
|
||||
config Validate and view the compose file
|
||||
create Create services
|
||||
down Stop and remove containers, networks, images, and volumes
|
||||
@@ -175,6 +181,7 @@ class TopLevelCommand(object):
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
push Push service images
|
||||
restart Restart services
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
@@ -211,6 +218,75 @@ class TopLevelCommand(object):
|
||||
pull=bool(options.get('--pull', False)),
|
||||
force_rm=bool(options.get('--force-rm', False)))
|
||||
|
||||
def bundle(self, config_options, options):
|
||||
"""
|
||||
Generate a Distributed Application Bundle (DAB) from the Compose file.
|
||||
|
||||
Images must have digests stored, which requires interaction with a
|
||||
Docker registry. If digests aren't stored for all images, you can fetch
|
||||
them with `docker-compose pull` or `docker-compose push`. To push images
|
||||
automatically when bundling, pass `--push-images`. Only services with
|
||||
a `build` option specified will have their images pushed.
|
||||
|
||||
Usage: bundle [options]
|
||||
|
||||
Options:
|
||||
--push-images Automatically push images for any services
|
||||
which have a `build` option specified.
|
||||
|
||||
-o, --output PATH Path to write the bundle file to.
|
||||
Defaults to "<project name>.dab".
|
||||
"""
|
||||
self.project = project_from_options('.', config_options)
|
||||
compose_config = get_config_from_options(self.project_dir, config_options)
|
||||
|
||||
output = options["--output"]
|
||||
if not output:
|
||||
output = "{}.dab".format(self.project.name)
|
||||
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
try:
|
||||
image_digests = get_image_digests(
|
||||
self.project,
|
||||
allow_push=options['--push-images'],
|
||||
)
|
||||
except MissingDigests as e:
|
||||
def list_images(images):
|
||||
return "\n".join(" {}".format(name) for name in sorted(images))
|
||||
|
||||
paras = ["Some images are missing digests."]
|
||||
|
||||
if e.needs_push:
|
||||
command_hint = (
|
||||
"Use `docker-compose push {}` to push them. "
|
||||
"You can do this automatically with `docker-compose bundle --push-images`."
|
||||
.format(" ".join(sorted(e.needs_push)))
|
||||
)
|
||||
paras += [
|
||||
"The following images can be pushed:",
|
||||
list_images(e.needs_push),
|
||||
command_hint,
|
||||
]
|
||||
|
||||
if e.needs_pull:
|
||||
command_hint = (
|
||||
"Use `docker-compose pull {}` to pull them. "
|
||||
.format(" ".join(sorted(e.needs_pull)))
|
||||
)
|
||||
|
||||
paras += [
|
||||
"The following images need to be pulled:",
|
||||
list_images(e.needs_pull),
|
||||
command_hint,
|
||||
]
|
||||
|
||||
raise UserError("\n\n".join(paras))
|
||||
|
||||
with open(output, 'w') as f:
|
||||
f.write(serialize_bundle(compose_config, image_digests))
|
||||
|
||||
log.info("Wrote bundle to {}".format(output))
|
||||
|
||||
def config(self, config_options, options):
|
||||
"""
|
||||
Validate and view the compose file.
|
||||
@@ -223,13 +299,7 @@ class TopLevelCommand(object):
|
||||
--services Print the service names, one per line.
|
||||
|
||||
"""
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
self.project_dir, config_options, environment
|
||||
)
|
||||
compose_config = config.load(
|
||||
config.find(self.project_dir, config_path, environment)
|
||||
)
|
||||
compose_config = get_config_from_options(self.project_dir, config_options)
|
||||
|
||||
if options['--quiet']:
|
||||
return
|
||||
@@ -264,18 +334,29 @@ class TopLevelCommand(object):
|
||||
|
||||
def down(self, options):
|
||||
"""
|
||||
Stop containers and remove containers, networks, volumes, and images
|
||||
created by `up`. Only containers and networks are removed by default.
|
||||
Stops containers and removes containers, networks, volumes, and images
|
||||
created by `up`.
|
||||
|
||||
By default, the only things removed are:
|
||||
|
||||
- Containers for services defined in the Compose file
|
||||
- Networks defined in the `networks` section of the Compose file
|
||||
- The default network, if one is used
|
||||
|
||||
Networks and volumes defined as `external` are never removed.
|
||||
|
||||
Usage: down [options]
|
||||
|
||||
Options:
|
||||
--rmi type Remove images, type may be one of: 'all' to remove
|
||||
all images, or 'local' to remove only images that
|
||||
don't have an custom name set by the `image` field
|
||||
-v, --volumes Remove data volumes
|
||||
--remove-orphans Remove containers for services not defined in
|
||||
the Compose file
|
||||
--rmi type Remove images. Type must be one of:
|
||||
'all': Remove all images used by any service.
|
||||
'local': Remove only images that don't have a custom tag
|
||||
set by the `image` field.
|
||||
-v, --volumes Remove named volumes declared in the `volumes` section
|
||||
of the Compose file and anonymous volumes
|
||||
attached to containers.
|
||||
--remove-orphans Remove containers for services not defined in the
|
||||
Compose file
|
||||
"""
|
||||
image_type = image_type_from_opt('--rmi', options['--rmi'])
|
||||
self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
|
||||
@@ -322,6 +403,13 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
index = int(options.get('--index'))
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
detach = options['-d']
|
||||
|
||||
if IS_WINDOWS_PLATFORM and not detach:
|
||||
raise UserError(
|
||||
"Interactive mode is not yet supported on Windows.\n"
|
||||
"Please pass the -d flag when using `docker-compose exec`."
|
||||
)
|
||||
try:
|
||||
container = service.get_container(number=index)
|
||||
except ValueError as e:
|
||||
@@ -338,7 +426,7 @@ class TopLevelCommand(object):
|
||||
|
||||
exec_id = container.create_exec(command, **create_exec_options)
|
||||
|
||||
if options['-d']:
|
||||
if detach:
|
||||
container.start_exec(exec_id, tty=tty)
|
||||
return
|
||||
|
||||
@@ -361,10 +449,14 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
Get help on a command.
|
||||
|
||||
Usage: help COMMAND
|
||||
Usage: help [COMMAND]
|
||||
"""
|
||||
handler = get_handler(cls, options['COMMAND'])
|
||||
raise SystemExit(getdoc(handler))
|
||||
if options['COMMAND']:
|
||||
subject = get_handler(cls, options['COMMAND'])
|
||||
else:
|
||||
subject = cls
|
||||
|
||||
print(getdoc(subject))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
@@ -411,7 +503,8 @@ class TopLevelCommand(object):
|
||||
self.project,
|
||||
containers,
|
||||
options['--no-color'],
|
||||
log_args).run()
|
||||
log_args,
|
||||
event_stream=self.project.events(service_names=options['SERVICE'])).run()
|
||||
|
||||
def pause(self, options):
|
||||
"""
|
||||
@@ -494,12 +587,26 @@ class TopLevelCommand(object):
|
||||
ignore_pull_failures=options.get('--ignore-pull-failures')
|
||||
)
|
||||
|
||||
def push(self, options):
|
||||
"""
|
||||
Pushes images for services.
|
||||
|
||||
Usage: push [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--ignore-push-failures Push what it can and ignores images with push failures.
|
||||
"""
|
||||
self.project.push(
|
||||
service_names=options['SERVICE'],
|
||||
ignore_push_failures=options.get('--ignore-push-failures')
|
||||
)
|
||||
|
||||
def rm(self, options):
|
||||
"""
|
||||
Remove stopped service containers.
|
||||
Removes stopped service containers.
|
||||
|
||||
By default, volumes attached to containers will not be removed. You can see all
|
||||
volumes with `docker volume ls`.
|
||||
By default, anonymous volumes attached to containers will not be removed. You
|
||||
can override this with `-v`. To list all volumes, use `docker volume ls`.
|
||||
|
||||
Any data which is not in a volume will be lost.
|
||||
|
||||
@@ -507,18 +614,16 @@ class TopLevelCommand(object):
|
||||
|
||||
Options:
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove volumes associated with containers
|
||||
-a, --all Also remove one-off containers created by
|
||||
-v Remove any anonymous volumes attached to containers
|
||||
-a, --all Obsolete. Also remove one-off containers created by
|
||||
docker-compose run
|
||||
"""
|
||||
if options.get('--all'):
|
||||
one_off = OneOffFilter.include
|
||||
else:
|
||||
log.warn(
|
||||
'Not including one-off containers created by `docker-compose run`.\n'
|
||||
'To include them, use `docker-compose rm --all`.\n'
|
||||
'This will be the default behavior in the next version of Compose.\n')
|
||||
one_off = OneOffFilter.exclude
|
||||
'--all flag is obsolete. This is now the default behavior '
|
||||
'of `docker-compose rm`'
|
||||
)
|
||||
one_off = OneOffFilter.include
|
||||
|
||||
all_containers = self.project.containers(
|
||||
service_names=options['SERVICE'], stopped=True, one_off=one_off
|
||||
@@ -582,8 +687,10 @@ class TopLevelCommand(object):
|
||||
'can not be used togather'
|
||||
)
|
||||
|
||||
if options['COMMAND']:
|
||||
if options['COMMAND'] is not None:
|
||||
command = [options['COMMAND']] + options['ARGS']
|
||||
elif options['--entrypoint'] is not None:
|
||||
command = []
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
@@ -806,7 +913,9 @@ def build_container_options(options, detach, command):
|
||||
}
|
||||
|
||||
if options['-e']:
|
||||
container_options['environment'] = parse_environment(options['-e'])
|
||||
container_options['environment'] = Environment.from_command_line(
|
||||
parse_environment(options['-e'])
|
||||
)
|
||||
|
||||
if options['--entrypoint']:
|
||||
container_options['entrypoint'] = options.get('--entrypoint')
|
||||
|
||||
@@ -6,12 +6,19 @@ import os
|
||||
import platform
|
||||
import ssl
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import docker
|
||||
from six.moves import input
|
||||
|
||||
import compose
|
||||
|
||||
# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
|
||||
# defining it as OSError (its parent class) if missing.
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = OSError
|
||||
|
||||
|
||||
def yesno(prompt, default=None):
|
||||
"""
|
||||
@@ -35,6 +42,16 @@ def yesno(prompt, default=None):
|
||||
return None
|
||||
|
||||
|
||||
def input(prompt):
|
||||
"""
|
||||
Version of input (raw_input in Python 2) which forces a flush of sys.stdout
|
||||
to avoid problems where the prompt fails to appear due to line buffering
|
||||
"""
|
||||
sys.stdout.write(prompt)
|
||||
sys.stdout.flush()
|
||||
return sys.stdin.readline().rstrip('\n')
|
||||
|
||||
|
||||
def call_silently(*args, **kwargs):
|
||||
"""
|
||||
Like subprocess.call(), but redirects stdout and stderr to /dev/null.
|
||||
@@ -86,3 +103,22 @@ def get_build_version():
|
||||
|
||||
with open(filename) as fh:
|
||||
return fh.read().strip()
|
||||
|
||||
|
||||
def is_docker_for_mac_installed():
|
||||
return is_mac() and os.path.isdir('/Applications/Docker.app')
|
||||
|
||||
|
||||
def generate_user_agent():
|
||||
parts = [
|
||||
"docker-compose/{}".format(compose.__version__),
|
||||
"docker-py/{}".format(docker.__version__),
|
||||
]
|
||||
try:
|
||||
p_system = platform.system()
|
||||
p_release = platform.release()
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
parts.append("{}/{}".format(p_system, p_release))
|
||||
return " ".join(parts)
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import operator
|
||||
import ntpath
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
@@ -37,6 +37,7 @@ from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_links
|
||||
from .validation import validate_network_mode
|
||||
from .validation import validate_service_constraints
|
||||
from .validation import validate_top_level_object
|
||||
@@ -580,6 +581,7 @@ def validate_service(service_config, service_names, version):
|
||||
validate_ulimits(service_config)
|
||||
validate_network_mode(service_config, service_names)
|
||||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
@@ -726,7 +728,7 @@ class MergeDict(dict):
|
||||
|
||||
merged = parse_sequence_func(self.base.get(field, []))
|
||||
merged.update(parse_sequence_func(self.override.get(field, [])))
|
||||
self[field] = [item.repr() for item in merged.values()]
|
||||
self[field] = [item.repr() for item in sorted(merged.values())]
|
||||
|
||||
def merge_scalar(self, field):
|
||||
if self.needs_merge(field):
|
||||
@@ -746,13 +748,10 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_field(field, merge_path_mappings)
|
||||
|
||||
for field in [
|
||||
'depends_on',
|
||||
'expose',
|
||||
'external_links',
|
||||
'ports',
|
||||
'volumes_from',
|
||||
'ports', 'cap_add', 'cap_drop', 'expose', 'external_links',
|
||||
'security_opt', 'volumes_from', 'depends_on',
|
||||
]:
|
||||
md.merge_field(field, operator.add, default=[])
|
||||
md.merge_field(field, merge_unique_items_lists, default=[])
|
||||
|
||||
for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
|
||||
md.merge_field(field, merge_list_or_string)
|
||||
@@ -768,6 +767,10 @@ def merge_service_dicts(base, override, version):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_unique_items_lists(base, override):
|
||||
return sorted(set().union(base, override))
|
||||
|
||||
|
||||
def merge_build(output, base, override):
|
||||
def to_dict(service):
|
||||
build_config = service.get('build', {})
|
||||
@@ -928,7 +931,7 @@ def dict_from_path_mappings(path_mappings):
|
||||
|
||||
|
||||
def path_mappings_from_dict(d):
|
||||
return [join_path_mapping(v) for v in d.items()]
|
||||
return [join_path_mapping(v) for v in sorted(d.items())]
|
||||
|
||||
|
||||
def split_path_mapping(volume_path):
|
||||
@@ -937,12 +940,13 @@ def split_path_mapping(volume_path):
|
||||
path. Using splitdrive so windows absolute paths won't cause issues with
|
||||
splitting on ':'.
|
||||
"""
|
||||
# splitdrive has limitations when it comes to relative paths, so when it's
|
||||
# relative, handle special case to set the drive to ''
|
||||
if volume_path.startswith('.') or volume_path.startswith('~'):
|
||||
# splitdrive is very naive, so handle special cases where we can be sure
|
||||
# the first character is not a drive.
|
||||
if (volume_path.startswith('.') or volume_path.startswith('~') or
|
||||
volume_path.startswith('/')):
|
||||
drive, volume_config = '', volume_path
|
||||
else:
|
||||
drive, volume_config = os.path.splitdrive(volume_path)
|
||||
drive, volume_config = ntpath.splitdrive(volume_path)
|
||||
|
||||
if ':' in volume_config:
|
||||
(host, container) = volume_config.split(':', 1)
|
||||
|
||||
@@ -28,6 +28,8 @@ def env_vars_from_file(filename):
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError("Couldn't find env file: %s" % filename)
|
||||
elif not os.path.isfile(filename):
|
||||
raise ConfigurationError("%s is not a file." % (filename))
|
||||
env = {}
|
||||
for line in codecs.open(filename, 'r', 'utf-8'):
|
||||
line = line.strip()
|
||||
@@ -58,6 +60,18 @@ class Environment(dict):
|
||||
instance.update(os.environ)
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def from_command_line(cls, parsed_env_opts):
|
||||
result = cls()
|
||||
for k, v in parsed_env_opts.items():
|
||||
# Values from the command line take priority, unless they're unset
|
||||
# in which case they take the value from the system's environment
|
||||
if v is None and k in os.environ:
|
||||
result[k] = os.environ[k]
|
||||
else:
|
||||
result[k] = v
|
||||
return result
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return super(Environment, self).__getitem__(key)
|
||||
|
||||
@@ -3,10 +3,11 @@ from __future__ import unicode_literals
|
||||
|
||||
|
||||
VERSION_EXPLANATION = (
|
||||
'Either specify a version of "2" (or "2.0") and place your service '
|
||||
'definitions under the `services` key, or omit the `version` key and place '
|
||||
'your service definitions at the root of the file to use version 1.\n'
|
||||
'For more on the Compose file format versions, see '
|
||||
'You might be seeing this error because you\'re using the wrong Compose '
|
||||
'file version. Either specify a version of "2" (or "2.0") and place your '
|
||||
'service definitions under the `services` key, or omit the `version` key '
|
||||
'and place your service definitions at the root of the file to use '
|
||||
'version 1.\nFor more on the Compose file format versions, see '
|
||||
'https://docs.docker.com/compose/compose-file/')
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import six
|
||||
import yaml
|
||||
|
||||
from compose.config import types
|
||||
from compose.config.config import V1
|
||||
from compose.config.config import V2_0
|
||||
|
||||
|
||||
def serialize_config_type(dumper, data):
|
||||
@@ -16,15 +18,43 @@ yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
|
||||
|
||||
|
||||
def serialize_config(config):
|
||||
output = {
|
||||
'version': config.version,
|
||||
'services': {service.pop('name'): service for service in config.services},
|
||||
'networks': config.networks,
|
||||
def denormalize_config(config):
|
||||
denormalized_services = [
|
||||
denormalize_service_dict(service_dict, config.version)
|
||||
for service_dict in config.services
|
||||
]
|
||||
services = {
|
||||
service_dict.pop('name'): service_dict
|
||||
for service_dict in denormalized_services
|
||||
}
|
||||
networks = config.networks.copy()
|
||||
for net_name, net_conf in networks.items():
|
||||
if 'external_name' in net_conf:
|
||||
del net_conf['external_name']
|
||||
|
||||
return {
|
||||
'version': V2_0,
|
||||
'services': services,
|
||||
'networks': networks,
|
||||
'volumes': config.volumes,
|
||||
}
|
||||
|
||||
|
||||
def serialize_config(config):
|
||||
return yaml.safe_dump(
|
||||
output,
|
||||
denormalize_config(config),
|
||||
default_flow_style=False,
|
||||
indent=2,
|
||||
width=80)
|
||||
|
||||
|
||||
def denormalize_service_dict(service_dict, version):
|
||||
service_dict = service_dict.copy()
|
||||
|
||||
if 'restart' in service_dict:
|
||||
service_dict['restart'] = types.serialize_restart_spec(service_dict['restart'])
|
||||
|
||||
if version == V1 and 'network_mode' not in service_dict:
|
||||
service_dict['network_mode'] = 'bridge'
|
||||
|
||||
return service_dict
|
||||
|
||||
@@ -7,6 +7,8 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from compose.config.config import V1
|
||||
from compose.config.errors import ConfigurationError
|
||||
from compose.const import IS_WINDOWS_PLATFORM
|
||||
@@ -89,6 +91,13 @@ def parse_restart_spec(restart_config):
|
||||
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
|
||||
|
||||
|
||||
def serialize_restart_spec(restart_spec):
|
||||
parts = [restart_spec['Name']]
|
||||
if restart_spec['MaximumRetryCount']:
|
||||
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
|
||||
return ':'.join(parts)
|
||||
|
||||
|
||||
def parse_extra_hosts(extra_hosts_config):
|
||||
if not extra_hosts_config:
|
||||
return {}
|
||||
|
||||
@@ -171,6 +171,14 @@ def validate_network_mode(service_config, service_names):
|
||||
"is undefined.".format(s=service_config, dep=dependency))
|
||||
|
||||
|
||||
def validate_links(service_config, service_names):
|
||||
for link in service_config.config.get('links', []):
|
||||
if link.split(':')[0] not in service_names:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' has a link to service '{link}' which is "
|
||||
"undefined.".format(s=service_config, link=link))
|
||||
|
||||
|
||||
def validate_depends_on(service_config, service_names):
|
||||
for dependency in service_config.config.get('depends_on', []):
|
||||
if dependency not in service_names:
|
||||
@@ -211,7 +219,7 @@ def handle_error_for_schema_with_id(error, path):
|
||||
return get_unsupported_config_msg(path, invalid_config_key)
|
||||
|
||||
if not error.path:
|
||||
return '{}\n{}'.format(error.message, VERSION_EXPLANATION)
|
||||
return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
|
||||
|
||||
|
||||
def handle_generic_error(error, path):
|
||||
@@ -408,6 +416,6 @@ def handle_errors(errors, format_error_func, filename):
|
||||
|
||||
error_msg = '\n'.join(format_error_func(error) for error in errors)
|
||||
raise ConfigurationError(
|
||||
"Validation failed{file_msg}, reason(s):\n{error_msg}".format(
|
||||
file_msg=" in file '{}'".format(filename) if filename else "",
|
||||
"The Compose file{file_msg} is invalid because:\n{error_msg}".format(
|
||||
file_msg=" '{}'".format(filename) if filename else "",
|
||||
error_msg=error_msg))
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
HTTP_TIMEOUT = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
|
||||
HTTP_TIMEOUT = 60
|
||||
IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag']
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
|
||||
7
compose/errors.py
Normal file
7
compose/errors.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
class OperationFailedError(Exception):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
@@ -12,6 +12,7 @@ from six.moves.queue import Empty
|
||||
from six.moves.queue import Queue
|
||||
|
||||
from compose.cli.signals import ShutdownException
|
||||
from compose.errors import OperationFailedError
|
||||
from compose.utils import get_output_stream
|
||||
|
||||
|
||||
@@ -47,6 +48,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
|
||||
elif isinstance(exception, APIError):
|
||||
errors[get_name(obj)] = exception.explanation
|
||||
writer.write(get_name(obj), 'error')
|
||||
elif isinstance(exception, OperationFailedError):
|
||||
errors[get_name(obj)] = exception.msg
|
||||
writer.write(get_name(obj), 'error')
|
||||
elif isinstance(exception, UpstreamError):
|
||||
writer.write(get_name(obj), 'error')
|
||||
else:
|
||||
@@ -59,7 +63,7 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
|
||||
if error_to_reraise:
|
||||
raise error_to_reraise
|
||||
|
||||
return results
|
||||
return results, errors
|
||||
|
||||
|
||||
def _no_deps(x):
|
||||
|
||||
@@ -91,3 +91,22 @@ def print_output_event(event, stream, is_terminal):
|
||||
stream.write("%s%s" % (event['stream'], terminator))
|
||||
else:
|
||||
stream.write("%s%s\n" % (status, terminator))
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
for event in events:
|
||||
status = event.get('status')
|
||||
if not status or 'Digest' not in status:
|
||||
continue
|
||||
|
||||
_, digest = status.split(':', 1)
|
||||
return digest.strip()
|
||||
return None
|
||||
|
||||
|
||||
def get_digest_from_push(events):
|
||||
for event in events:
|
||||
digest = event.get('aux', {}).get('Digest')
|
||||
if digest:
|
||||
return digest
|
||||
return None
|
||||
|
||||
@@ -342,7 +342,10 @@ class Project(object):
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
if event['status'] in IMAGE_EVENTS:
|
||||
# The first part of this condition is a guard against some events
|
||||
# broadcasted by swarm that don't have a status field.
|
||||
# See https://github.com/docker/compose/issues/3316
|
||||
if 'status' not in event or event['status'] in IMAGE_EVENTS:
|
||||
# We don't receive any image events because labels aren't applied
|
||||
# to images
|
||||
continue
|
||||
@@ -366,6 +369,8 @@ class Project(object):
|
||||
detached=False,
|
||||
remove_orphans=False):
|
||||
|
||||
warn_for_swarm_mode(self.client)
|
||||
|
||||
self.initialize()
|
||||
self.find_orphan_containers(remove_orphans)
|
||||
|
||||
@@ -387,13 +392,18 @@ class Project(object):
|
||||
def get_deps(service):
|
||||
return {self.get_service(dep) for dep in service.get_dependency_names()}
|
||||
|
||||
results = parallel.parallel_execute(
|
||||
results, errors = parallel.parallel_execute(
|
||||
services,
|
||||
do,
|
||||
operator.attrgetter('name'),
|
||||
None,
|
||||
get_deps
|
||||
)
|
||||
if errors:
|
||||
raise ProjectError(
|
||||
'Encountered errors while bringing up the project.'
|
||||
)
|
||||
|
||||
return [
|
||||
container
|
||||
for svc_containers in results
|
||||
@@ -432,6 +442,10 @@ class Project(object):
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.pull(ignore_pull_failures)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.push(ignore_push_failures)
|
||||
|
||||
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
|
||||
return list(filter(None, [
|
||||
Container.from_ps(self.client, container)
|
||||
@@ -521,6 +535,20 @@ def get_volumes_from(project, service_dict):
|
||||
return [build_volume_from(vf) for vf in volumes_from]
|
||||
|
||||
|
||||
def warn_for_swarm_mode(client):
|
||||
info = client.info()
|
||||
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
|
||||
log.warn(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use the bundle feature of the Docker experimental build.\n\n"
|
||||
"More info:\n"
|
||||
"https://docs.docker.com/compose/bundles\n"
|
||||
)
|
||||
|
||||
|
||||
class NoSuchService(Exception):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
@@ -528,3 +556,8 @@ class NoSuchService(Exception):
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class ProjectError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
@@ -15,6 +15,7 @@ from docker.utils.ports import build_port_bindings
|
||||
from docker.utils.ports import split_port
|
||||
|
||||
from . import __version__
|
||||
from . import progress_stream
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import merge_environment
|
||||
from .config.types import VolumeSpec
|
||||
@@ -26,6 +27,7 @@ from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
from .const import LABEL_VERSION
|
||||
from .container import Container
|
||||
from .errors import OperationFailedError
|
||||
from .parallel import parallel_execute
|
||||
from .parallel import parallel_start
|
||||
from .progress_stream import stream_output
|
||||
@@ -179,7 +181,7 @@ class Service(object):
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
|
||||
if self.specifies_host_port():
|
||||
if self.specifies_host_port() and desired_num > 1:
|
||||
log.warn('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
@@ -276,7 +278,11 @@ class Service(object):
|
||||
if 'name' in container_options and not quiet:
|
||||
log.info("Creating %s" % container_options['name'])
|
||||
|
||||
return Container.create(self.client, **container_options)
|
||||
try:
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError as ex:
|
||||
raise OperationFailedError("Cannot create container for service %s: %s" %
|
||||
(self.name, ex.explanation))
|
||||
|
||||
def ensure_image_exists(self, do_build=BuildAction.none):
|
||||
if self.can_be_built() and do_build == BuildAction.force:
|
||||
@@ -446,27 +452,30 @@ class Service(object):
|
||||
|
||||
def start_container(self, container):
|
||||
self.connect_container_to_networks(container)
|
||||
container.start()
|
||||
try:
|
||||
container.start()
|
||||
except APIError as ex:
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
|
||||
return container
|
||||
|
||||
def connect_container_to_networks(self, container):
|
||||
connected_networks = container.get('NetworkSettings.Networks')
|
||||
|
||||
for network, netdefs in self.networks.items():
|
||||
aliases = netdefs.get('aliases', [])
|
||||
ipv4_address = netdefs.get('ipv4_address', None)
|
||||
ipv6_address = netdefs.get('ipv6_address', None)
|
||||
if network in connected_networks:
|
||||
if short_id_alias_exists(container, network):
|
||||
continue
|
||||
|
||||
self.client.disconnect_container_from_network(
|
||||
container.id, network)
|
||||
container.id,
|
||||
network)
|
||||
|
||||
self.client.connect_container_to_network(
|
||||
container.id, network,
|
||||
aliases=list(self._get_aliases(container).union(aliases)),
|
||||
ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address,
|
||||
links=self._get_links(False)
|
||||
)
|
||||
aliases=self._get_aliases(netdefs, container),
|
||||
ipv4_address=netdefs.get('ipv4_address', None),
|
||||
ipv6_address=netdefs.get('ipv6_address', None),
|
||||
links=self._get_links(False))
|
||||
|
||||
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
|
||||
for c in self.duplicate_containers():
|
||||
@@ -533,11 +542,32 @@ class Service(object):
|
||||
numbers = [c.number for c in containers]
|
||||
return 1 if not numbers else max(numbers) + 1
|
||||
|
||||
def _get_aliases(self, container):
|
||||
if container.labels.get(LABEL_ONE_OFF) == "True":
|
||||
return set()
|
||||
def _get_aliases(self, network, container=None):
|
||||
if container and container.labels.get(LABEL_ONE_OFF) == "True":
|
||||
return []
|
||||
|
||||
return {self.name, container.short_id}
|
||||
return list(
|
||||
{self.name} |
|
||||
({container.short_id} if container else set()) |
|
||||
set(network.get('aliases', ()))
|
||||
)
|
||||
|
||||
def build_default_networking_config(self):
|
||||
if not self.networks:
|
||||
return {}
|
||||
|
||||
network = self.networks[self.network_mode.id]
|
||||
endpoint = {
|
||||
'Aliases': self._get_aliases(network),
|
||||
'IPAMConfig': {},
|
||||
}
|
||||
|
||||
if network.get('ipv4_address'):
|
||||
endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
|
||||
if network.get('ipv6_address'):
|
||||
endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
|
||||
|
||||
return {"EndpointsConfig": {self.network_mode.id: endpoint}}
|
||||
|
||||
def _get_links(self, link_to_self):
|
||||
links = {}
|
||||
@@ -633,6 +663,10 @@ class Service(object):
|
||||
override_options,
|
||||
one_off=one_off)
|
||||
|
||||
networking_config = self.build_default_networking_config()
|
||||
if networking_config:
|
||||
container_options['networking_config'] = networking_config
|
||||
|
||||
container_options['environment'] = format_environment(
|
||||
container_options['environment'])
|
||||
return container_options
|
||||
@@ -781,20 +815,41 @@ class Service(object):
|
||||
repo, tag, separator = parse_repository_tag(self.options['image'])
|
||||
tag = tag or 'latest'
|
||||
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
output = self.client.pull(
|
||||
repo,
|
||||
tag=tag,
|
||||
stream=True,
|
||||
)
|
||||
output = self.client.pull(repo, tag=tag, stream=True)
|
||||
|
||||
try:
|
||||
stream_output(output, sys.stdout)
|
||||
return progress_stream.get_digest_from_pull(
|
||||
stream_output(output, sys.stdout))
|
||||
except StreamOutputError as e:
|
||||
if not ignore_pull_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
def push(self, ignore_push_failures=False):
|
||||
if 'image' not in self.options or 'build' not in self.options:
|
||||
return
|
||||
|
||||
repo, tag, separator = parse_repository_tag(self.options['image'])
|
||||
tag = tag or 'latest'
|
||||
log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
output = self.client.push(repo, tag=tag, stream=True)
|
||||
|
||||
try:
|
||||
return progress_stream.get_digest_from_push(
|
||||
stream_output(output, sys.stdout))
|
||||
except StreamOutputError as e:
|
||||
if not ignore_push_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
|
||||
def short_id_alias_exists(container, network):
|
||||
aliases = container.get(
|
||||
'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
|
||||
return container.short_id in aliases
|
||||
|
||||
|
||||
class NetworkMode(object):
|
||||
"""A `standard` network mode (ex: host, bridge)"""
|
||||
|
||||
@@ -95,4 +95,4 @@ def microseconds_from_time_nano(time_nano):
|
||||
|
||||
|
||||
def build_string_dict(source_dict):
|
||||
return dict((k, str(v)) for k, v in source_dict.items())
|
||||
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
|
||||
|
||||
@@ -109,6 +109,18 @@ _docker_compose_build() {
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_bundle() {
|
||||
case "$prev" in
|
||||
--output|-o)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--fetch-digests --help --output -o" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_config() {
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services" -- "$cur" ) )
|
||||
}
|
||||
@@ -304,6 +316,18 @@ _docker_compose_pull() {
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_push() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_services_all
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_restart() {
|
||||
case "$prev" in
|
||||
--timeout|-t)
|
||||
@@ -325,7 +349,7 @@ _docker_compose_restart() {
|
||||
_docker_compose_rm() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--all -a --force -f --help -v" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_services_stopped
|
||||
@@ -455,6 +479,7 @@ _docker_compose() {
|
||||
|
||||
local commands=(
|
||||
build
|
||||
bundle
|
||||
config
|
||||
create
|
||||
down
|
||||
@@ -467,6 +492,7 @@ _docker_compose() {
|
||||
port
|
||||
ps
|
||||
pull
|
||||
push
|
||||
restart
|
||||
rm
|
||||
run
|
||||
|
||||
@@ -19,52 +19,49 @@
|
||||
# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# For compatibility reasons, Compose and therefore its completion supports several
|
||||
# stack compositon files as listed here, in descending priority.
|
||||
# Support for these filenames might be dropped in some future version.
|
||||
__docker-compose_compose_file() {
|
||||
local file
|
||||
for file in docker-compose.y{,a}ml ; do
|
||||
[ -e $file ] && {
|
||||
echo $file
|
||||
return
|
||||
}
|
||||
done
|
||||
echo docker-compose.yml
|
||||
__docker-compose_q() {
|
||||
docker-compose 2>/dev/null $compose_options "$@"
|
||||
}
|
||||
|
||||
# Extracts all service names from docker-compose.yml.
|
||||
___docker-compose_all_services_in_compose_file() {
|
||||
# All services defined in docker-compose.yml
|
||||
__docker-compose_all_services_in_compose_file() {
|
||||
local already_selected
|
||||
local -a services
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected"
|
||||
__docker-compose_q config --services \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
# All services, even those without an existing container
|
||||
__docker-compose_services_all() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
services=$(___docker-compose_all_services_in_compose_file)
|
||||
services=$(__docker-compose_all_services_in_compose_file)
|
||||
_alternative "args:services:($services)" && ret=0
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
# All services that have an entry with the given key in their docker-compose.yml section
|
||||
___docker-compose_services_with_key() {
|
||||
__docker-compose_services_with_key() {
|
||||
local already_selected
|
||||
local -a buildable
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
# flatten sections to one line, then filter lines containing the key and return section name.
|
||||
awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected"
|
||||
__docker-compose_q config \
|
||||
| sed -n -e '/^services:/,/^[^ ]/p' \
|
||||
| sed -n 's/^ //p' \
|
||||
| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
|
||||
| grep " \+$1:" \
|
||||
| cut -d: -f1 \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
# All services that are defined by a Dockerfile reference
|
||||
__docker-compose_services_from_build() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
buildable=$(___docker-compose_services_with_key build)
|
||||
buildable=$(__docker-compose_services_with_key build)
|
||||
_alternative "args:buildable services:($buildable)" && ret=0
|
||||
|
||||
return ret
|
||||
@@ -74,7 +71,7 @@ __docker-compose_services_from_build() {
|
||||
__docker-compose_services_from_image() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
pullable=$(___docker-compose_services_with_key image)
|
||||
pullable=$(__docker-compose_services_with_key image)
|
||||
_alternative "args:pullable services:($pullable)" && ret=0
|
||||
|
||||
return ret
|
||||
@@ -96,7 +93,7 @@ __docker-compose_get_services() {
|
||||
shift
|
||||
[[ $kind =~ (stopped|all) ]] && args=($args -a)
|
||||
|
||||
lines=(${(f)"$(_call_program commands docker ps $args)"})
|
||||
lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"})
|
||||
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
|
||||
|
||||
# Parse header line to find columns
|
||||
@@ -185,7 +182,17 @@ __docker-compose_commands() {
|
||||
}
|
||||
|
||||
__docker-compose_subcommand() {
|
||||
local opts_help='(: -)--help[Print usage]'
|
||||
local opts_help opts_force_recreate opts_no_recreate opts_no_build opts_remove_orphans opts_timeout opts_no_color opts_no_deps
|
||||
|
||||
opts_help='(: -)--help[Print usage]'
|
||||
opts_force_recreate="(--no-recreate)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]"
|
||||
opts_no_recreate="(--force-recreate)--no-recreate[If containers already exist, don't recreate them. Incompatible with --force-recreate.]"
|
||||
opts_no_build="(--build)--no-build[Don't build an image, even if it's missing.]"
|
||||
opts_remove_orphans="--remove-orphans[Remove containers for services not defined in the Compose file]"
|
||||
opts_timeout=('(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: ")
|
||||
opts_no_color='--no-color[Produce monochrome output.]'
|
||||
opts_no_deps="--no-deps[Don't start linked services.]"
|
||||
|
||||
integer ret=1
|
||||
|
||||
case "$words[1]" in
|
||||
@@ -193,10 +200,15 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--force-rm[Always remove intermediate containers.]' \
|
||||
'--no-cache[Do not use cache when building the image]' \
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
|
||||
;;
|
||||
(config)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
@@ -206,21 +218,23 @@ __docker-compose_subcommand() {
|
||||
(create)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
"(--no-recreate --no-build)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \
|
||||
"(--force-recreate)--no-build[If containers already exist, don't recreate them. Incompatible with --force-recreate.]" \
|
||||
"(--force-recreate)--no-recreate[Don't build an image, even if it's missing]" \
|
||||
$opts_force_recreate \
|
||||
$opts_no_recreate \
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before creating containers.]" \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(down)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
"--rmi[Remove images, type may be one of: 'all' to remove all images, or 'local' to remove only images that don't have an custom name set by the 'image' field]:type:(all local)" \
|
||||
'(-v --volumes)'{-v,--volumes}"[Remove data volumes]" && ret=0
|
||||
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
|
||||
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
|
||||
$opts_remove_orphans && ret=0
|
||||
;;
|
||||
(events)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--json[Output events as a stream of json objects.]' \
|
||||
'--json[Output events as a stream of json objects]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(exec)
|
||||
@@ -230,7 +244,7 @@ __docker-compose_subcommand() {
|
||||
'--privileged[Give extended privileges to the process.]' \
|
||||
'--user=[Run the command as this user.]:username:_users' \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
|
||||
'--index=[Index of the container if there are multiple instances of a service (default: 1)]:index: ' \
|
||||
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||
'(-):running services:__docker-compose_runningservices' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -248,7 +262,7 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-f --follow)'{-f,--follow}'[Follow log output]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
$opts_no_color \
|
||||
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
|
||||
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
@@ -261,8 +275,8 @@ __docker-compose_subcommand() {
|
||||
(port)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \
|
||||
'--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \
|
||||
'--protocol=[tcp or udp \[default: tcp\]]:protocol:(tcp udp)' \
|
||||
'--index=[index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||
'1:running services:__docker-compose_runningservices' \
|
||||
'2:port:_ports' && ret=0
|
||||
;;
|
||||
@@ -278,12 +292,17 @@ __docker-compose_subcommand() {
|
||||
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
|
||||
'*:services:__docker-compose_services_from_image' && ret=0
|
||||
;;
|
||||
(push)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--ignore-push-failures[Push what it can and ignores images with push failures.]' \
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(rm)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-a --all)'{-a,--all}"[Also remove one-off containers]" \
|
||||
'(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
|
||||
'-v[Remove volumes associated with containers]' \
|
||||
'-v[Remove any anonymous volumes attached to containers]' \
|
||||
'*:stopped services:__docker-compose_stoppedservices' && ret=0
|
||||
;;
|
||||
(run)
|
||||
@@ -292,14 +311,14 @@ __docker-compose_subcommand() {
|
||||
'-d[Detached mode: Run container in the background, print new container name.]' \
|
||||
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
||||
'--name[Assign a name to the container]:name: ' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
'(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \
|
||||
'--name=[Assign a name to the container]:name: ' \
|
||||
$opts_no_deps \
|
||||
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
|
||||
'--rm[Remove container after run. Ignored in detached mode.]' \
|
||||
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
|
||||
'(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \
|
||||
'(-w --workdir)'{-w=,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
|
||||
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
'(-):services:__docker-compose_services' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -307,7 +326,7 @@ __docker-compose_subcommand() {
|
||||
(scale)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
$opts_timeout \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(start)
|
||||
@@ -318,7 +337,7 @@ __docker-compose_subcommand() {
|
||||
(stop|restart)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
$opts_timeout \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(unpause)
|
||||
@@ -329,15 +348,16 @@ __docker-compose_subcommand() {
|
||||
(up)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names.]' \
|
||||
'--build[Build images before starting containers.]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
"--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \
|
||||
"--no-recreate[If containers already exist, don't recreate them.]" \
|
||||
"--no-build[Don't build an image, even if it's missing]" \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
|
||||
$opts_no_color \
|
||||
$opts_no_deps \
|
||||
$opts_force_recreate \
|
||||
$opts_no_recreate \
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before starting containers.]" \
|
||||
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
||||
$opts_remove_orphans \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(version)
|
||||
@@ -367,16 +387,57 @@ _docker-compose() {
|
||||
|
||||
_arguments -C \
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'--verbose[Show more output]' \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
'--verbose[Show more output]' \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
|
||||
'--tls[Use TLS; implied by --tlsverify]' \
|
||||
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
|
||||
'--tlscert=[Path to TLS certificate file]:client cert path:' \
|
||||
'--tlskey=[Path to TLS key file]:tls key path:' \
|
||||
'--tlsverify[Use TLS and verify the remote]' \
|
||||
"--skip-hostname-check[Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)]" \
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local compose_file=${opt_args[-f]}${opt_args[--file]}
|
||||
local compose_project=${opt_args[-p]}${opt_args[--project-name]}
|
||||
local compose_options="${compose_file:+--file $compose_file} ${compose_project:+--project-name $compose_project}"
|
||||
local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--file" "-f"
|
||||
"--host" "-H"
|
||||
"--project-name" "-p"
|
||||
"--tls"
|
||||
"--tlscacert"
|
||||
"--tlscert"
|
||||
"--tlskey"
|
||||
"--tlsverify"
|
||||
"--skip-hostname-check"
|
||||
)
|
||||
|
||||
relevant_docker_flags=(
|
||||
"--host" "-H"
|
||||
"--tls"
|
||||
"--tlscacert"
|
||||
"--tlscert"
|
||||
"--tlskey"
|
||||
"--tlsverify"
|
||||
)
|
||||
|
||||
for k in "${(@k)opt_args}"; do
|
||||
if [[ -n "${relevant_docker_flags[(r)$k]}" ]]; then
|
||||
docker_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
docker_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
case $state in
|
||||
(command)
|
||||
|
||||
@@ -1,18 +1,8 @@
|
||||
FROM docs/base:latest
|
||||
MAINTAINER Mary Anthony <mary@docker.com> (@moxiegirl)
|
||||
|
||||
RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine
|
||||
RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm
|
||||
RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine
|
||||
RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry
|
||||
RUN svn checkout https://github.com/docker/notary/trunk/docs /docs/content/notary
|
||||
RUN svn checkout https://github.com/docker/kitematic/trunk/docs /docs/content/kitematic
|
||||
RUN svn checkout https://github.com/docker/toolbox/trunk/docs /docs/content/toolbox
|
||||
RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/project
|
||||
|
||||
FROM docs/base:oss
|
||||
MAINTAINER Docker Docs <docs@docker.com>
|
||||
|
||||
ENV PROJECT=compose
|
||||
# To get the git info for this repo
|
||||
COPY . /src
|
||||
|
||||
RUN rm -rf /docs/content/$PROJECT/
|
||||
COPY . /docs/content/$PROJECT/
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
|
||||
# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
DOCKER_ENVS := \
|
||||
-e BUILDFLAGS \
|
||||
-e DOCKER_CLIENTONLY \
|
||||
-e DOCKER_EXECDRIVER \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TIMEOUT
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
.PHONY: all default docs docs-build docs-shell shell test
|
||||
|
||||
# to allow `make DOCSDIR=1 docs-shell` (to create a bind mount in docs)
|
||||
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR):/docs/content/compose)
|
||||
@@ -25,9 +12,8 @@ HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER
|
||||
HUGO_BIND_IP=0.0.0.0
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
||||
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
|
||||
|
||||
@@ -42,14 +28,11 @@ docs: docs-build
|
||||
docs-draft: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
||||
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
|
||||
test: docs-build
|
||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)"
|
||||
|
||||
docs-build:
|
||||
# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files
|
||||
# echo "$(GIT_BRANCH)" > GIT_BRANCH
|
||||
# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET
|
||||
# echo "$(GITCOMMIT)" > GITCOMMIT
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" .
|
||||
|
||||
200
docs/bundles.md
Normal file
200
docs/bundles.md
Normal file
@@ -0,0 +1,200 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Docker Stacks and Distributed Application Bundles"
|
||||
description = "Description of Docker and Compose's experimental support for application bundles"
|
||||
keywords = ["documentation, docs, docker, compose, bundles, stacks"]
|
||||
advisory = "experimental"
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Docker Stacks and Distributed Application Bundles (experimental)
|
||||
|
||||
> **Note**: This is a copy of the [Docker Stacks and Distributed Application
|
||||
> Bundles](https://github.com/docker/docker/blob/v1.12.0-rc4/experimental/docker-stacks-and-bundles.md)
|
||||
> document in the [docker/docker repo](https://github.com/docker/docker).
|
||||
|
||||
## Overview
|
||||
|
||||
Docker Stacks and Distributed Application Bundles are experimental features
|
||||
introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of
|
||||
swarm mode, and Nodes and Services in the Engine API.
|
||||
|
||||
A Dockerfile can be built into an image, and containers can be created from
|
||||
that image. Similarly, a docker-compose.yml can be built into a **distributed
|
||||
application bundle**, and **stacks** can be created from that bundle. In that
|
||||
sense, the bundle is a multi-services distributable image format.
|
||||
|
||||
As of Docker 1.12 and Compose 1.8, the features are experimental. Neither
|
||||
Docker Engine nor the Docker Registry support distribution of bundles.
|
||||
|
||||
## Producing a bundle
|
||||
|
||||
The easiest way to produce a bundle is to generate it using `docker-compose`
|
||||
from an existing `docker-compose.yml`. Of course, that's just *one* possible way
|
||||
to proceed, in the same way that `docker build` isn't the only way to produce a
|
||||
Docker image.
|
||||
|
||||
From `docker-compose`:
|
||||
|
||||
```bash
|
||||
$ docker-compose bundle
|
||||
WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring
|
||||
WARNING: Unsupported key 'links' in services.nsqd - ignoring
|
||||
WARNING: Unsupported key 'volumes' in services.nsqd - ignoring
|
||||
[...]
|
||||
Wrote bundle to vossibility-stack.dab
|
||||
```
|
||||
|
||||
## Creating a stack from a bundle
|
||||
|
||||
A stack is created using the `docker deploy` command:
|
||||
|
||||
```bash
|
||||
# docker deploy --help
|
||||
|
||||
Usage: docker deploy [OPTIONS] STACK
|
||||
|
||||
Create and update a stack
|
||||
|
||||
Options:
|
||||
--file string Path to a Distributed Application Bundle file (Default: STACK.dab)
|
||||
--help Print usage
|
||||
--with-registry-auth Send registry authentication details to Swarm agents
|
||||
```
|
||||
|
||||
Let's deploy the stack created before:
|
||||
|
||||
```bash
|
||||
# docker deploy vossibility-stack
|
||||
Loading bundle from vossibility-stack.dab
|
||||
Creating service vossibility-stack_elasticsearch
|
||||
Creating service vossibility-stack_kibana
|
||||
Creating service vossibility-stack_logstash
|
||||
Creating service vossibility-stack_lookupd
|
||||
Creating service vossibility-stack_nsqd
|
||||
Creating service vossibility-stack_vossibility-collector
|
||||
```
|
||||
|
||||
We can verify that services were correctly created:
|
||||
|
||||
```bash
|
||||
# docker service ls
|
||||
ID NAME REPLICAS IMAGE
|
||||
COMMAND
|
||||
29bv0vnlm903 vossibility-stack_lookupd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd
|
||||
4awt47624qwh vossibility-stack_nsqd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160
|
||||
4tjx9biia6fs vossibility-stack_elasticsearch 1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
|
||||
7563uuzr9eys vossibility-stack_kibana 1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
|
||||
9gc5m4met4he vossibility-stack_logstash 1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe logstash -f /etc/logstash/conf.d/logstash.conf
|
||||
axqh55ipl40h vossibility-stack_vossibility-collector 1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba --config /config/config.toml --debug
|
||||
```
|
||||
|
||||
## Managing stacks
|
||||
|
||||
Stacks are managed using the `docker stack` command:
|
||||
|
||||
```bash
|
||||
# docker stack --help
|
||||
|
||||
Usage: docker stack COMMAND
|
||||
|
||||
Manage Docker stacks
|
||||
|
||||
Options:
|
||||
--help Print usage
|
||||
|
||||
Commands:
|
||||
config Print the stack configuration
|
||||
deploy Create and update a stack
|
||||
rm Remove the stack
|
||||
services List the services in the stack
|
||||
tasks List the tasks in the stack
|
||||
|
||||
Run 'docker stack COMMAND --help' for more information on a command.
|
||||
```
|
||||
|
||||
## Bundle file format
|
||||
|
||||
Distributed application bundles are described in a JSON format. When bundles
|
||||
are persisted as files, the file extension is `.dab`.
|
||||
|
||||
A bundle has two top-level fields: `version` and `services`. The version used
|
||||
by Docker 1.12 tools is `0.1`.
|
||||
|
||||
`services` in the bundle are the services that comprise the app. They
|
||||
correspond to the new `Service` object introduced in the 1.12 Docker Engine API.
|
||||
|
||||
A service has the following fields:
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
Image (required) <code>string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
The image that the service will run. Docker images should be referenced
|
||||
with full content hash to fully specify the deployment artifact for the
|
||||
service. Example:
|
||||
<code>postgres@sha256:e0a230a9f5b4e1b8b03bb3e8cf7322b0e42b7838c5c87f4545edb48f5eb8f077</code>
|
||||
</dd>
|
||||
<dt>
|
||||
Command <code>[]string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Command to run in service containers.
|
||||
</dd>
|
||||
<dt>
|
||||
Args <code>[]string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Arguments passed to the service containers.
|
||||
</dd>
|
||||
<dt>
|
||||
Env <code>[]string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Environment variables.
|
||||
</dd>
|
||||
<dt>
|
||||
Labels <code>map[string]string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Labels used for setting meta data on services.
|
||||
</dd>
|
||||
<dt>
|
||||
Ports <code>[]Port</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Service ports (composed of <code>Port</code> (<code>int</code>) and
|
||||
<code>Protocol</code> (<code>string</code>). A service description can
|
||||
only specify the container port to be exposed. These ports can be
|
||||
mapped on runtime hosts at the operator's discretion.
|
||||
</dd>
|
||||
|
||||
<dt>
|
||||
WorkingDir <code>string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Working directory inside the service containers.
|
||||
</dd>
|
||||
|
||||
<dt>
|
||||
User <code>string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Username or UID (format: <code><name|uid>[:<group|gid>]</code>).
|
||||
</dd>
|
||||
|
||||
<dt>
|
||||
Networks <code>[]string</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Networks that the service containers should be connected to. An entity
|
||||
deploying a bundle should create networks as needed.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
> **Note:** Some configuration options are not yet supported in the DAB format,
|
||||
> including volume mounts.
|
||||
@@ -59,13 +59,13 @@ optionally [dockerfile](#dockerfile) and [args](#args).
|
||||
args:
|
||||
buildno: 1
|
||||
|
||||
If you specify `image` as well as `build`, then Compose tags the built image
|
||||
with the tag specified in `image`:
|
||||
If you specify `image` as well as `build`, then Compose names the built image
|
||||
with the `webapp` and optional `tag` specified in `image`:
|
||||
|
||||
build: ./dir
|
||||
image: webapp
|
||||
image: webapp:tag
|
||||
|
||||
This will result in an image tagged `webapp`, built from `./dir`.
|
||||
This will result in an image named `webapp` and tagged `tag`, built from `./dir`.
|
||||
|
||||
> **Note**: In the [version 1 file format](#version-1), `build` is different in
|
||||
> two ways:
|
||||
@@ -115,22 +115,41 @@ specified.
|
||||
|
||||
> [Version 2 file format](#version-2) only.
|
||||
|
||||
Add build arguments. You can use either an array or a dictionary. Any
|
||||
boolean values; true, false, yes, no, need to be enclosed in quotes to ensure
|
||||
they are not converted to True or False by the YML parser.
|
||||
Add build arguments, which are environment variables accessible only during the
|
||||
build process.
|
||||
|
||||
Build arguments with only a key are resolved to their environment value on the
|
||||
machine Compose is running on.
|
||||
First, specify the arguments in your Dockerfile:
|
||||
|
||||
ARG buildno
|
||||
ARG password
|
||||
|
||||
RUN echo "Build number: $buildno"
|
||||
RUN script-requiring-password.sh "$password"
|
||||
|
||||
Then specify the arguments under the `build` key. You can pass either a mapping
|
||||
or a list:
|
||||
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
buildno: 1
|
||||
user: someuser
|
||||
password: secret
|
||||
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
- buildno=1
|
||||
- user=someuser
|
||||
- password=secret
|
||||
|
||||
You can omit the value when specifying a build argument, in which case its value
|
||||
at build time is the value in the environment where Compose is running.
|
||||
|
||||
args:
|
||||
- buildno
|
||||
- password
|
||||
|
||||
> **Note**: YAML boolean values (`true`, `false`, `yes`, `no`, `on`, `off`) must
|
||||
> be enclosed in quotes, so that the parser interprets them as strings.
|
||||
|
||||
### cap_add, cap_drop
|
||||
|
||||
@@ -274,6 +293,11 @@ beginning with `#` (i.e. comments) are ignored, as are blank lines.
|
||||
# Set Rails/Rack environment
|
||||
RACK_ENV=development
|
||||
|
||||
> **Note:** If your service specifies a [build](#build) option, variables
|
||||
> defined in environment files will _not_ be automatically visible during the
|
||||
> build. Use the [args](#args) sub-option of `build` to define build-time
|
||||
> environment variables.
|
||||
|
||||
### environment
|
||||
|
||||
Add environment variables. You can use either an array or a dictionary. Any
|
||||
@@ -293,6 +317,11 @@ machine Compose is running on, which can be helpful for secret or host-specific
|
||||
- SHOW=true
|
||||
- SESSION_SECRET
|
||||
|
||||
> **Note:** If your service specifies a [build](#build) option, variables
|
||||
> defined in `environment` will _not_ be automatically visible during the
|
||||
> build. Use the [args](#args) sub-option of `build` to define build-time
|
||||
> environment variables.
|
||||
|
||||
### expose
|
||||
|
||||
Expose ports without publishing them to the host machine - they'll only be
|
||||
@@ -502,9 +531,11 @@ the special form `service:[service name]`.
|
||||
Networks to join, referencing entries under the
|
||||
[top-level `networks` key](#network-configuration-reference).
|
||||
|
||||
networks:
|
||||
- some-network
|
||||
- other-network
|
||||
services:
|
||||
some-service:
|
||||
networks:
|
||||
- some-network
|
||||
- other-network
|
||||
|
||||
#### aliases
|
||||
|
||||
@@ -516,14 +547,16 @@ Since `aliases` is network-scoped, the same service can have different aliases o
|
||||
|
||||
The general format is shown here.
|
||||
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- alias1
|
||||
- alias3
|
||||
other-network:
|
||||
aliases:
|
||||
- alias2
|
||||
services:
|
||||
some-service:
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- alias1
|
||||
- alias3
|
||||
other-network:
|
||||
aliases:
|
||||
- alias2
|
||||
|
||||
In the example below, three services are provided (`web`, `worker`, and `db`), along with two networks (`new` and `legacy`). The `db` service is reachable at the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on the `legacy` network.
|
||||
|
||||
@@ -1079,7 +1112,7 @@ It's more complicated if you're using particular configuration features:
|
||||
data: {}
|
||||
|
||||
By default, Compose creates a volume whose name is prefixed with your
|
||||
project name. If you want it to just be called `data`, declared it as
|
||||
project name. If you want it to just be called `data`, declare it as
|
||||
external:
|
||||
|
||||
volumes:
|
||||
@@ -1089,21 +1122,24 @@ It's more complicated if you're using particular configuration features:
|
||||
## Variable substitution
|
||||
|
||||
Your configuration options can contain environment variables. Compose uses the
|
||||
variable values from the shell environment in which `docker-compose` is run. For
|
||||
example, suppose the shell contains `POSTGRES_VERSION=9.3` and you supply this
|
||||
configuration:
|
||||
variable values from the shell environment in which `docker-compose` is run.
|
||||
For example, suppose the shell contains `EXTERNAL_PORT=8000` and you supply
|
||||
this configuration:
|
||||
|
||||
db:
|
||||
image: "postgres:${POSTGRES_VERSION}"
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "${EXTERNAL_PORT}:5000"
|
||||
|
||||
When you run `docker-compose up` with this configuration, Compose looks for the
|
||||
`POSTGRES_VERSION` environment variable in the shell and substitutes its value
|
||||
in. For this example, Compose resolves the `image` to `postgres:9.3` before
|
||||
running the configuration.
|
||||
When you run `docker-compose up` with this configuration, Compose looks for
|
||||
the `EXTERNAL_PORT` environment variable in the shell and substitutes its
|
||||
value in. In this example, Compose resolves the port mapping to `"8000:5000"`
|
||||
before creating the `web` container.
|
||||
|
||||
If an environment variable is not set, Compose substitutes with an empty
|
||||
string. In the example above, if `POSTGRES_VERSION` is not set, the value for
|
||||
the `image` option is `postgres:`.
|
||||
string. In the example above, if `EXTERNAL_PORT` is not set, the value for the
|
||||
port mapping is `:5000` (which is of course an invalid port mapping, and will
|
||||
result in an error when attempting to create the container).
|
||||
|
||||
Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Extended shell-style
|
||||
features, such as `${VARIABLE-default}` and `${VARIABLE/foo/bar}`, are not
|
||||
|
||||
@@ -15,7 +15,7 @@ weight=4
|
||||
This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have
|
||||
[Compose installed](install.md).
|
||||
|
||||
## Define the project components
|
||||
### Define the project components
|
||||
|
||||
For this project, you need to create a Dockerfile, a Python dependencies file,
|
||||
and a `docker-compose.yml` file.
|
||||
@@ -29,8 +29,8 @@ and a `docker-compose.yml` file.
|
||||
The Dockerfile defines an application's image content via one or more build
|
||||
commands that configure that image. Once built, you can run the image in a
|
||||
container. For more information on `Dockerfiles`, see the [Docker user
|
||||
guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile)
|
||||
and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile)
|
||||
and the [Dockerfile reference](/engine/reference/builder.md).
|
||||
|
||||
3. Add the following content to the `Dockerfile`.
|
||||
|
||||
@@ -89,7 +89,7 @@ and a `docker-compose.yml` file.
|
||||
|
||||
10. Save and close the `docker-compose.yml` file.
|
||||
|
||||
## Create a Django project
|
||||
### Create a Django project
|
||||
|
||||
In this step, you create a Django started project by building the image from the build context defined in the previous procedure.
|
||||
|
||||
@@ -137,7 +137,7 @@ In this step, you create a Django started project by building the image from the
|
||||
-rw-r--r-- 1 user staff 16 Feb 13 23:01 requirements.txt
|
||||
|
||||
|
||||
## Connect the database
|
||||
### Connect the database
|
||||
|
||||
In this section, you set up the database connection for Django.
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ weight=10
|
||||
# Environment file
|
||||
|
||||
Compose supports declaring default environment variables in an environment
|
||||
file named `.env` and placed in the same folder as your
|
||||
[compose file](compose-file.md).
|
||||
file named `.env` placed in the folder `docker-compose` command is executed from
|
||||
*(current working directory)*.
|
||||
|
||||
Compose expects each line in an env file to be in `VAR=VAL` format. Lines
|
||||
beginning with `#` (i.e. comments) are ignored, as are blank lines.
|
||||
|
||||
107
docs/environment-variables.md
Normal file
107
docs/environment-variables.md
Normal file
@@ -0,0 +1,107 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Environment variables in Compose"
|
||||
description = "How to set, use and manage environment variables in Compose"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, environment, variables, env file"]
|
||||
[menu.main]
|
||||
parent = "workw_compose"
|
||||
weight=10
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Environment variables in Compose
|
||||
|
||||
There are multiple parts of Compose that deal with environment variables in one sense or another. This page should help you find the information you need.
|
||||
|
||||
|
||||
## Substituting environment variables in Compose files
|
||||
|
||||
It's possible to use environment variables in your shell to populate values inside a Compose file:
|
||||
|
||||
web:
|
||||
image: "webapp:${TAG}"
|
||||
|
||||
For more information, see the [Variable substitution](compose-file.md#variable-substitution) section in the Compose file reference.
|
||||
|
||||
|
||||
## Setting environment variables in containers
|
||||
|
||||
You can set environment variables in a service's containers with the ['environment' key](compose-file.md#environment), just like with `docker run -e VARIABLE=VALUE ...`:
|
||||
|
||||
web:
|
||||
environment:
|
||||
- DEBUG=1
|
||||
|
||||
|
||||
## Passing environment variables through to containers
|
||||
|
||||
You can pass environment variables from your shell straight through to a service's containers with the ['environment' key](compose-file.md#environment) by not giving them a value, just like with `docker run -e VARIABLE ...`:
|
||||
|
||||
web:
|
||||
environment:
|
||||
- DEBUG
|
||||
|
||||
The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
|
||||
|
||||
|
||||
## The “env_file” configuration option
|
||||
|
||||
You can pass multiple environment variables from an external file through to a service's containers with the ['env_file' option](compose-file.md#env-file), just like with `docker run --env-file=FILE ...`:
|
||||
|
||||
web:
|
||||
env_file:
|
||||
- web-variables.env
|
||||
|
||||
|
||||
## Setting environment variables with 'docker-compose run'
|
||||
|
||||
Just like with `docker run -e`, you can set environment variables on a one-off container with `docker-compose run -e`:
|
||||
|
||||
$ docker-compose run -e DEBUG=1 web python console.py
|
||||
|
||||
You can also pass a variable through from the shell by not giving it a value:
|
||||
|
||||
$ docker-compose run -e DEBUG web python console.py
|
||||
|
||||
The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
|
||||
|
||||
|
||||
## The “.env” file
|
||||
|
||||
You can set default values for any environment variables referenced in the Compose file, or used to configure Compose, in an [environment file](env-file.md) named `.env`:
|
||||
|
||||
$ cat .env
|
||||
TAG=v1.5
|
||||
|
||||
$ cat docker-compose.yml
|
||||
version: '2.0'
|
||||
services:
|
||||
web:
|
||||
image: "webapp:${TAG}"
|
||||
|
||||
When you run `docker-compose up`, the `web` service defined above uses the image `webapp:v1.5`. You can verify this with the [config command](reference/config.md), which prints your resolved application config to the terminal:
|
||||
|
||||
$ docker-compose config
|
||||
version: '2.0'
|
||||
services:
|
||||
web:
|
||||
image: 'webapp:v1.5'
|
||||
|
||||
Values in the shell take precedence over those specified in the `.env` file. If you set `TAG` to a different value in your shell, the substitution in `image` uses that instead:
|
||||
|
||||
$ export TAG=v2.0
|
||||
|
||||
$ docker-compose config
|
||||
version: '2.0'
|
||||
services:
|
||||
web:
|
||||
image: 'webapp:v2.0'
|
||||
|
||||
## Configuring Compose using environment variables
|
||||
|
||||
Several environment variables are available for you to configure the Docker Compose command-line behaviour. They begin with `COMPOSE_` or `DOCKER_`, and are documented in [CLI Environment Variables](reference/envvars.md).
|
||||
|
||||
|
||||
## Environment variables created by links
|
||||
|
||||
When using the ['links' option](compose-file.md#links) in a [v1 Compose file](compose-file.md#version-1), environment variables will be created for each link. They are documented in the [Link environment variables reference](link-env-deprecated.md). Please note, however, that these variables are deprecated - you should just use the link alias as a hostname instead.
|
||||
@@ -77,7 +77,7 @@ dependencies the Python application requires, including Python itself.
|
||||
* Install the Python dependencies.
|
||||
* Set the default command for the container to `python app.py`
|
||||
|
||||
For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
|
||||
|
||||
2. Build the image.
|
||||
|
||||
@@ -137,8 +137,8 @@ The `redis` service uses the latest public [Redis](https://registry.hub.docker.c
|
||||
2. Enter `http://0.0.0.0:5000/` in a browser to see the application running.
|
||||
|
||||
If you're using Docker on Linux natively, then the web app should now be
|
||||
listening on port 5000 on your Docker daemon host. If http://0.0.0.0:5000
|
||||
doesn't resolve, you can also try http://localhost:5000.
|
||||
listening on port 5000 on your Docker daemon host. If `http://0.0.0.0:5000`
|
||||
doesn't resolve, you can also try `http://localhost:5000`.
|
||||
|
||||
If you're using Docker Machine on a Mac, use `docker-machine ip MACHINE_VM` to get
|
||||
the IP address of your Docker host. Then, `open http://MACHINE_VM_IP:5000` in a
|
||||
|
||||
@@ -39,7 +39,7 @@ which the release page specifies, in your terminal.
|
||||
|
||||
The following is an example command illustrating the format:
|
||||
|
||||
curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
||||
|
||||
If you have problems installing with `curl`, see
|
||||
[Alternative Install Options](#alternative-install-options).
|
||||
@@ -54,7 +54,7 @@ which the release page specifies, in your terminal.
|
||||
7. Test the installation.
|
||||
|
||||
$ docker-compose --version
|
||||
docker-compose version: 1.7.0
|
||||
docker-compose version: 1.8.0
|
||||
|
||||
|
||||
## Alternative install options
|
||||
@@ -77,7 +77,7 @@ to get started.
|
||||
Compose can also be run inside a container, from a small bash script wrapper.
|
||||
To install compose as a container run:
|
||||
|
||||
$ curl -L https://github.com/docker/compose/releases/download/1.7.0/run.sh > /usr/local/bin/docker-compose
|
||||
$ curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose
|
||||
$ chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
## Master builds
|
||||
|
||||
@@ -16,7 +16,9 @@ weight=89
|
||||
>
|
||||
> Environment variables will only be populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning).
|
||||
|
||||
Compose uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container.
|
||||
Compose uses [Docker links](/engine/userguide/networking/default_network/dockerlinks.md)
|
||||
to expose services' containers to one another. Each linked container injects a set of
|
||||
environment variables, each of which begins with the uppercase name of the container.
|
||||
|
||||
To see what environment variables are available to a service, run `docker-compose run SERVICE env`.
|
||||
|
||||
@@ -38,8 +40,6 @@ Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp`
|
||||
<b><i>name</i>\_NAME</b><br>
|
||||
Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1`
|
||||
|
||||
[Docker links]: https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/
|
||||
|
||||
## Related Information
|
||||
|
||||
- [User guide](index.md)
|
||||
|
||||
@@ -159,8 +159,8 @@ and destroy isolated testing environments for your test suite. By defining the f
|
||||
|
||||
Compose has traditionally been focused on development and testing workflows,
|
||||
but with each release we're making progress on more production-oriented features. You can use Compose to deploy to a remote Docker Engine. The Docker Engine may be a single instance provisioned with
|
||||
[Docker Machine](https://docs.docker.com/machine/) or an entire
|
||||
[Docker Swarm](https://docs.docker.com/swarm/) cluster.
|
||||
[Docker Machine](/machine/overview.md) or an entire
|
||||
[Docker Swarm](/swarm/overview.md) cluster.
|
||||
|
||||
For details on using production-oriented features, see
|
||||
[compose in production](production.md) in this documentation.
|
||||
|
||||
@@ -65,7 +65,7 @@ recreating any services which `web` depends on.
|
||||
You can use Compose to deploy an app to a remote Docker host by setting the
|
||||
`DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables
|
||||
appropriately. For tasks like this,
|
||||
[Docker Machine](/machine/overview) makes managing local and
|
||||
[Docker Machine](/machine/overview.md) makes managing local and
|
||||
remote Docker hosts very easy, and is recommended even if you're not deploying
|
||||
remotely.
|
||||
|
||||
@@ -74,7 +74,7 @@ commands will work with no further configuration.
|
||||
|
||||
### Running Compose on a Swarm cluster
|
||||
|
||||
[Docker Swarm](/swarm/overview), a Docker-native clustering
|
||||
[Docker Swarm](/swarm/overview.md), a Docker-native clustering
|
||||
system, exposes the same API as a single Docker host, which means you can use
|
||||
Compose against a Swarm instance and run your apps across multiple hosts.
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ container. This is done using a file called `Dockerfile`. To begin with, the
|
||||
Dockerfile consists of:
|
||||
|
||||
FROM ruby:2.2.0
|
||||
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev
|
||||
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
|
||||
RUN mkdir /myapp
|
||||
WORKDIR /myapp
|
||||
ADD Gemfile /myapp/Gemfile
|
||||
@@ -32,7 +32,7 @@ Dockerfile consists of:
|
||||
|
||||
That'll put your application code inside an image that will build a container
|
||||
with Ruby, Bundler and all your dependencies inside it. For more information on
|
||||
how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
|
||||
|
||||
Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
|
||||
|
||||
@@ -152,7 +152,7 @@ Finally, you need to create the database. In another terminal, run:
|
||||
|
||||
$ docker-compose run web rake db:create
|
||||
|
||||
That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
|
||||
That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](/machine/overview.md), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
|
||||
|
||||

|
||||
|
||||
|
||||
31
docs/reference/bundle.md
Normal file
31
docs/reference/bundle.md
Normal file
@@ -0,0 +1,31 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "bundle"
|
||||
description = "Create a distributed application bundle from the Compose file."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, bundle"]
|
||||
[menu.main]
|
||||
identifier="bundle.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# bundle
|
||||
|
||||
```
|
||||
Usage: bundle [options]
|
||||
|
||||
Options:
|
||||
--push-images Automatically push images for any services
|
||||
which have a `build` option specified.
|
||||
|
||||
-o, --output PATH Path to write the bundle file to.
|
||||
Defaults to "<project name>.dab".
|
||||
```
|
||||
|
||||
Generate a Distributed Application Bundle (DAB) from the Compose file.
|
||||
|
||||
Images must have digests stored, which requires interaction with a
|
||||
Docker registry. If digests aren't stored for all images, you can fetch
|
||||
them with `docker-compose pull` or `docker-compose push`. To push images
|
||||
automatically when bundling, pass `--push-images`. Only services with
|
||||
a `build` option specified will have their images pushed.
|
||||
@@ -12,17 +12,27 @@ parent = "smn_compose_cli"
|
||||
# down
|
||||
|
||||
```
|
||||
Stop containers and remove containers, networks, volumes, and images
|
||||
created by `up`. Only containers and networks are removed by default.
|
||||
|
||||
Usage: down [options]
|
||||
|
||||
Options:
|
||||
--rmi type Remove images, type may be one of: 'all' to remove
|
||||
all images, or 'local' to remove only images that
|
||||
don't have an custom name set by the `image` field
|
||||
-v, --volumes Remove data volumes
|
||||
|
||||
--rmi type Remove images. Type must be one of:
|
||||
'all': Remove all images used by any service.
|
||||
'local': Remove only images that don't have a custom tag
|
||||
set by the `image` field.
|
||||
-v, --volumes Remove named volumes declared in the `volumes` section
|
||||
of the Compose file and anonymous volumes
|
||||
attached to containers.
|
||||
--remove-orphans Remove containers for services not defined in the
|
||||
Compose file
|
||||
```
|
||||
|
||||
Stops containers and removes containers, networks, volumes, and images
|
||||
created by `up`.
|
||||
|
||||
By default, the only things removed are:
|
||||
|
||||
- Containers for services defined in the Compose file
|
||||
- Networks defined in the `networks` section of the Compose file
|
||||
- The default network, if one is used
|
||||
|
||||
Networks and volumes defined as `external` are never removed.
|
||||
|
||||
@@ -78,6 +78,11 @@ Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TL
|
||||
Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers
|
||||
it failed. Defaults to 60 seconds.
|
||||
|
||||
## COMPOSE\_TLS\_VERSION
|
||||
|
||||
Configure which TLS version is used for TLS communication with the `docker`
|
||||
daemon. Defaults to `TLSv1`.
|
||||
Supported values are: `TLSv1`, `TLSv1_1`, `TLSv1_2`.
|
||||
|
||||
## Related Information
|
||||
|
||||
|
||||
21
docs/reference/push.md
Normal file
21
docs/reference/push.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "push"
|
||||
description = "Pushes service images."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, push"]
|
||||
[menu.main]
|
||||
identifier="push.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# push
|
||||
|
||||
```
|
||||
Usage: push [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--ignore-push-failures Push what it can and ignores images with push failures.
|
||||
```
|
||||
|
||||
Pushes images for services.
|
||||
@@ -15,14 +15,15 @@ parent = "smn_compose_cli"
|
||||
Usage: rm [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove volumes associated with containers
|
||||
-a, --all Also remove one-off containers
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove any anonymous volumes attached to containers
|
||||
-a, --all Also remove one-off containers created by
|
||||
docker-compose run
|
||||
```
|
||||
|
||||
Removes stopped service containers.
|
||||
|
||||
By default, volumes attached to containers will not be removed. You can see all
|
||||
volumes with `docker volume ls`.
|
||||
By default, anonymous volumes attached to containers will not be removed. You
|
||||
can override this with `-v`. To list all volumes, use `docker volume ls`.
|
||||
|
||||
Any data which is not in a volume will be lost.
|
||||
|
||||
@@ -11,7 +11,7 @@ parent="workw_compose"
|
||||
|
||||
# Using Compose with Swarm
|
||||
|
||||
Docker Compose and [Docker Swarm](/swarm/overview) aim to have full integration, meaning
|
||||
Docker Compose and [Docker Swarm](/swarm/overview.md) aim to have full integration, meaning
|
||||
you can point a Compose app at a Swarm cluster and have it all just work as if
|
||||
you were using a single Docker host.
|
||||
|
||||
@@ -30,7 +30,7 @@ format](compose-file.md#versioning) you are using:
|
||||
or a custom driver which supports multi-host networking.
|
||||
|
||||
Read [Get started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to
|
||||
set up a Swarm cluster with [Docker Machine](/machine/overview) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as:
|
||||
set up a Swarm cluster with [Docker Machine](/machine/overview.md) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as:
|
||||
|
||||
$ eval "$(docker-machine env --swarm <name of swarm master machine>)"
|
||||
$ docker-compose up
|
||||
|
||||
@@ -16,13 +16,13 @@ You can use Docker Compose to easily run WordPress in an isolated environment bu
|
||||
with Docker containers. This quick-start guide demonstrates how to use Compose to set up and run WordPress. Before starting, you'll need to have
|
||||
[Compose installed](install.md).
|
||||
|
||||
## Define the project
|
||||
### Define the project
|
||||
|
||||
1. Create an empty project directory.
|
||||
|
||||
You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
|
||||
|
||||
This project directory will contain a `Dockerfile`, a `docker-compose.yaml` file, along with a downloaded `wordpress` directory and a custom `wp-config.php`, all of which you will create in the following steps.
|
||||
This project directory will contain a `docker-compose.yaml` file which will be complete in itself for a good starter wordpress project.
|
||||
|
||||
2. Change directories into your project directory.
|
||||
|
||||
@@ -30,109 +30,72 @@ with Docker containers. This quick-start guide demonstrates how to use Compose t
|
||||
|
||||
$ cd my-wordpress/
|
||||
|
||||
3. Create a `Dockerfile`, a file that defines the environment in which your application will run.
|
||||
|
||||
For more information on how to write Dockerfiles, see the [Docker Engine user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
|
||||
In this case, your Dockerfile should include these two lines:
|
||||
|
||||
FROM orchardup/php5
|
||||
ADD . /code
|
||||
|
||||
This tells the Docker Engine daemon how to build an image defining a container that contains PHP and WordPress.
|
||||
|
||||
4. Create a `docker-compose.yml` file that will start your web service and a separate MySQL instance:
|
||||
3. Create a `docker-compose.yml` file that will start your `Wordpress` blog and a separate `MySQL` instance with a volume mount for data persistence:
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
command: php -S 0.0.0.0:8000 -t /code/wordpress/
|
||||
ports:
|
||||
- "8000:8000"
|
||||
db:
|
||||
image: mysql:5.7
|
||||
volumes:
|
||||
- "./.data/db:/var/lib/mysql"
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: wordpress
|
||||
MYSQL_DATABASE: wordpress
|
||||
MYSQL_USER: wordpress
|
||||
MYSQL_PASSWORD: wordpress
|
||||
|
||||
wordpress:
|
||||
depends_on:
|
||||
- db
|
||||
volumes:
|
||||
- .:/code
|
||||
db:
|
||||
image: orchardup/mysql
|
||||
image: wordpress:latest
|
||||
links:
|
||||
- db
|
||||
ports:
|
||||
- "8000:80"
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_DATABASE: wordpress
|
||||
WORDPRESS_DB_HOST: db:3306
|
||||
WORDPRESS_DB_PASSWORD: wordpress
|
||||
|
||||
5. Download WordPress into the current directory:
|
||||
|
||||
$ curl https://wordpress.org/latest.tar.gz | tar -xvzf -
|
||||
|
||||
This creates a directory called `wordpress` in your project directory.
|
||||
|
||||
6. Create a `wp-config.php` file within the `wordpress` directory.
|
||||
|
||||
A supporting file is needed to get this working. At the top level of the wordpress directory, add a new file called `wp-config.php` as shown. This is the standard WordPress config file with a single change to point the database configuration at the `db` container:
|
||||
|
||||
<?php
|
||||
define('DB_NAME', 'wordpress');
|
||||
define('DB_USER', 'root');
|
||||
define('DB_PASSWORD', '');
|
||||
define('DB_HOST', "db:3306");
|
||||
define('DB_CHARSET', 'utf8');
|
||||
define('DB_COLLATE', '');
|
||||
|
||||
define('AUTH_KEY', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_KEY', 'put your unique phrase here');
|
||||
define('LOGGED_IN_KEY', 'put your unique phrase here');
|
||||
define('NONCE_KEY', 'put your unique phrase here');
|
||||
define('AUTH_SALT', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_SALT', 'put your unique phrase here');
|
||||
define('LOGGED_IN_SALT', 'put your unique phrase here');
|
||||
define('NONCE_SALT', 'put your unique phrase here');
|
||||
|
||||
$table_prefix = 'wp_';
|
||||
define('WPLANG', '');
|
||||
define('WP_DEBUG', false);
|
||||
|
||||
if ( !defined('ABSPATH') )
|
||||
define('ABSPATH', dirname(__FILE__) . '/');
|
||||
|
||||
require_once(ABSPATH . 'wp-settings.php');
|
||||
?>
|
||||
|
||||
7. Verify the contents and structure of your project directory.
|
||||
<!--
|
||||
Dockerfile
|
||||
docker-compose.yaml
|
||||
wordpress/
|
||||
index.php
|
||||
license.txt
|
||||
readme.html
|
||||
wp-activate.php
|
||||
wp-admin/
|
||||
wp-blog-header.php
|
||||
wp-comments-post.php
|
||||
wp-config-sample.php
|
||||
wp-config.php
|
||||
wp-content/
|
||||
wp-cron.php
|
||||
wp-includes/
|
||||
wp-links-opml.php
|
||||
wp-load.php
|
||||
wp-login.php
|
||||
wp-mail.php
|
||||
wp-settings.php
|
||||
wp-signup.php
|
||||
wp-trackback.php
|
||||
xmlrpc.php
|
||||
-->
|
||||
|
||||

|
||||
**NOTE**: The folder `./.data/db` will be automatically created in the project directory
|
||||
alongside the `docker-compose.yml` which will persist any updates made by wordpress to the
|
||||
database.
|
||||
|
||||
### Build the project
|
||||
|
||||
With those four new files in place, run `docker-compose up` from your project directory. This will pull and build the needed images, and then start the web and database containers.
|
||||
Now, run `docker-compose up -d` from your project directory.
|
||||
|
||||
This pulls the needed images, and starts the wordpress and database containers, as shown in the example below.
|
||||
|
||||
$ docker-compose up -d
|
||||
Creating network "my_wordpress_default" with the default driver
|
||||
Pulling db (mysql:5.7)...
|
||||
5.7: Pulling from library/mysql
|
||||
efd26ecc9548: Pull complete
|
||||
a3ed95caeb02: Pull complete
|
||||
...
|
||||
Digest: sha256:34a0aca88e85f2efa5edff1cea77cf5d3147ad93545dbec99cfe705b03c520de
|
||||
Status: Downloaded newer image for mysql:5.7
|
||||
Pulling wordpress (wordpress:latest)...
|
||||
latest: Pulling from library/wordpress
|
||||
efd26ecc9548: Already exists
|
||||
a3ed95caeb02: Pull complete
|
||||
589a9d9a7c64: Pull complete
|
||||
...
|
||||
Digest: sha256:ed28506ae44d5def89075fd5c01456610cd6c64006addfe5210b8c675881aff6
|
||||
Status: Downloaded newer image for wordpress:latest
|
||||
Creating my_wordpress_db_1
|
||||
Creating my_wordpress_wordpress_1
|
||||
|
||||
### Bring up WordPress in a web browser
|
||||
|
||||
If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser.
|
||||
|
||||
At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator.
|
||||
|
||||
**NOTE**: The Wordpress site will not be immediately available on port `8000` because the containers are still being initialized and may take a couple of minutes before the first load.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
PyYAML==3.11
|
||||
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
||||
cached-property==1.2.0
|
||||
docker-py==1.8.0
|
||||
docker-py==1.9.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.1
|
||||
enum34==1.0.4
|
||||
enum34==1.0.4; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
ipaddress==1.0.16
|
||||
jsonschema==2.5.1
|
||||
requests==2.7.0
|
||||
six==1.7.3
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.7.0"
|
||||
VERSION="1.8.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ desired_python_version="2.7.9"
|
||||
desired_python_brew_version="2.7.9"
|
||||
python_formula="https://raw.githubusercontent.com/Homebrew/homebrew/1681e193e4d91c9620c4901efd4458d9b6fcda8e/Library/Formula/python.rb"
|
||||
|
||||
desired_openssl_version="1.0.1j"
|
||||
desired_openssl_brew_version="1.0.1j_1"
|
||||
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew/62fc2a1a65e83ba9dbb30b2e0a2b7355831c714b/Library/Formula/openssl.rb"
|
||||
desired_openssl_version="1.0.2h"
|
||||
desired_openssl_brew_version="1.0.2h"
|
||||
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from __future__ import unicode_literals
|
||||
import argparse
|
||||
import itertools
|
||||
import operator
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import requests
|
||||
@@ -103,6 +104,14 @@ def get_default(versions):
|
||||
return version
|
||||
|
||||
|
||||
def get_versions(tags):
|
||||
for tag in tags:
|
||||
try:
|
||||
yield Version.parse(tag['name'])
|
||||
except ValueError:
|
||||
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
|
||||
|
||||
|
||||
def get_github_releases(project):
|
||||
"""Query the Github API for a list of version tags and return them in
|
||||
sorted order.
|
||||
@@ -112,7 +121,7 @@ def get_github_releases(project):
|
||||
url = '{}/{}/tags'.format(GITHUB_API, project)
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
versions = [Version.parse(tag['name']) for tag in response.json()]
|
||||
versions = get_versions(response.json())
|
||||
return sorted(versions, reverse=True, key=operator.attrgetter('order'))
|
||||
|
||||
|
||||
|
||||
@@ -6,5 +6,5 @@ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
tox -e py27,py34 -- tests/unit
|
||||
else
|
||||
# TODO: we could also install py34 and test against it
|
||||
python -m tox -e py27 -- tests/unit
|
||||
tox -e py27 -- tests/unit
|
||||
fi
|
||||
|
||||
@@ -5,5 +5,6 @@ set -ex
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
pip install tox==2.1.1
|
||||
else
|
||||
pip install --user tox==2.1.1
|
||||
sudo pip install --upgrade pip tox==2.1.1 virtualenv
|
||||
pip --version
|
||||
fi
|
||||
|
||||
2
setup.py
2
setup.py
@@ -34,7 +34,7 @@ install_requires = [
|
||||
'requests >= 2.6.1, < 2.8',
|
||||
'texttable >= 0.8.1, < 0.9',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker-py > 1.7.2, < 2',
|
||||
'docker-py >= 1.9.0, < 2.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
@@ -12,6 +11,7 @@ from collections import Counter
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
|
||||
import py
|
||||
import yaml
|
||||
from docker import errors
|
||||
|
||||
@@ -140,20 +140,23 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_help(self):
|
||||
self.base_dir = 'tests/fixtures/no-composefile'
|
||||
result = self.dispatch(['help', 'up'], returncode=1)
|
||||
assert 'Usage: up [options] [SERVICE...]' in result.stderr
|
||||
result = self.dispatch(['help', 'up'], returncode=0)
|
||||
assert 'Usage: up [options] [SERVICE...]' in result.stdout
|
||||
# Prevent tearDown from trying to create a project
|
||||
self.base_dir = None
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_shorthand_host_opt(self):
|
||||
self.dispatch(
|
||||
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
'up', '-d'],
|
||||
returncode=0
|
||||
)
|
||||
|
||||
def test_config_list_services(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config', '--services'])
|
||||
assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_quiet_with_error(self):
|
||||
self.base_dir = None
|
||||
result = self.dispatch([
|
||||
@@ -162,14 +165,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
], returncode=1)
|
||||
assert "'notaservice' must be a mapping" in result.stderr
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_quiet(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
assert self.dispatch(['config', '-q']).stdout == ''
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_default(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config'])
|
||||
@@ -198,6 +197,72 @@ class CLITestCase(DockerClientTestCase):
|
||||
}
|
||||
assert output == expected
|
||||
|
||||
def test_config_restart(self):
|
||||
self.base_dir = 'tests/fixtures/restart'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.0',
|
||||
'services': {
|
||||
'never': {
|
||||
'image': 'busybox',
|
||||
'restart': 'no',
|
||||
},
|
||||
'always': {
|
||||
'image': 'busybox',
|
||||
'restart': 'always',
|
||||
},
|
||||
'on-failure': {
|
||||
'image': 'busybox',
|
||||
'restart': 'on-failure',
|
||||
},
|
||||
'on-failure-5': {
|
||||
'image': 'busybox',
|
||||
'restart': 'on-failure:5',
|
||||
},
|
||||
},
|
||||
'networks': {},
|
||||
'volumes': {},
|
||||
}
|
||||
|
||||
def test_config_external_network(self):
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
result = self.dispatch(['-f', 'external-networks.yml', 'config'])
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'networks' in json_result
|
||||
assert json_result['networks'] == {
|
||||
'networks_foo': {
|
||||
'external': True # {'name': 'networks_foo'}
|
||||
},
|
||||
'bar': {
|
||||
'external': {'name': 'networks_bar'}
|
||||
}
|
||||
}
|
||||
|
||||
def test_config_v1(self):
|
||||
self.base_dir = 'tests/fixtures/v1-config'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.0',
|
||||
'services': {
|
||||
'net': {
|
||||
'image': 'busybox',
|
||||
'network_mode': 'bridge',
|
||||
},
|
||||
'volume': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/data:rw'],
|
||||
'network_mode': 'bridge',
|
||||
},
|
||||
'app': {
|
||||
'image': 'busybox',
|
||||
'volumes_from': ['service:volume:rw'],
|
||||
'network_mode': 'service:net',
|
||||
},
|
||||
},
|
||||
'networks': {},
|
||||
'volumes': {},
|
||||
}
|
||||
|
||||
def test_ps(self):
|
||||
self.project.get_service('simple').create_container()
|
||||
result = self.dispatch(['ps'])
|
||||
@@ -313,6 +378,32 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
assert not containers
|
||||
|
||||
def test_bundle_with_digests(self):
|
||||
self.base_dir = 'tests/fixtures/bundle-with-digests/'
|
||||
tmpdir = py.test.ensuretemp('cli_test_bundle')
|
||||
self.addCleanup(tmpdir.remove)
|
||||
filename = str(tmpdir.join('example.dab'))
|
||||
|
||||
self.dispatch(['bundle', '--output', filename])
|
||||
with open(filename, 'r') as fh:
|
||||
bundle = json.load(fh)
|
||||
|
||||
assert bundle == {
|
||||
'Version': '0.1',
|
||||
'Services': {
|
||||
'web': {
|
||||
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
|
||||
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
|
||||
'Networks': ['default'],
|
||||
},
|
||||
'redis': {
|
||||
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
|
||||
'374b2b7392de1e7d77be26ef8f7b'),
|
||||
'Networks': ['default'],
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
self.dispatch(['create'])
|
||||
service = self.project.get_service('simple')
|
||||
@@ -683,9 +774,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
['-f', 'v2-invalid.yml', 'up', '-d'],
|
||||
returncode=1)
|
||||
|
||||
# TODO: fix validation error messages for v2 files
|
||||
# assert "Unsupported config option for service 'web': 'net'" in exc.exconly()
|
||||
assert "Unsupported config option" in result.stderr
|
||||
assert "Unsupported config option for services.bar: 'net'" in result.stderr
|
||||
|
||||
def test_up_with_net_v1(self):
|
||||
self.base_dir = 'tests/fixtures/net-container'
|
||||
@@ -875,16 +964,54 @@ class CLITestCase(DockerClientTestCase):
|
||||
[u'/bin/true'],
|
||||
)
|
||||
|
||||
def test_run_service_with_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
|
||||
name = 'service'
|
||||
self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld'])
|
||||
service = self.project.get_service(name)
|
||||
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
self.assertEqual(
|
||||
shlex.split(container.human_readable_command),
|
||||
[u'/bin/echo', u'helloworld'],
|
||||
)
|
||||
def test_run_service_with_dockerfile_entrypoint(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['printf']
|
||||
assert container.get('Config.Cmd') == ['default', 'args']
|
||||
|
||||
def test_run_service_with_dockerfile_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert not container.get('Config.Cmd')
|
||||
|
||||
def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['foo']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['printf']
|
||||
assert container.get('Config.Cmd') == ['default', 'args']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert not container.get('Config.Cmd')
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['foo']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['']
|
||||
|
||||
def test_run_service_with_user_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/user-composefile'
|
||||
@@ -1072,7 +1199,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
|
||||
for _, config in networks.items():
|
||||
assert not config['Aliases']
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
@v2_only()
|
||||
def test_run_detached_connects_to_network(self):
|
||||
@@ -1089,7 +1219,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
|
||||
for _, config in networks.items():
|
||||
assert not config['Aliases']
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
assert self.lookup(container, 'app')
|
||||
assert self.lookup(container, 'db')
|
||||
@@ -1120,6 +1253,18 @@ class CLITestCase(DockerClientTestCase):
|
||||
'simplecomposefile_simple_run_1',
|
||||
'exited'))
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_run_env_values_from_system(self):
|
||||
os.environ['FOO'] = 'bar'
|
||||
os.environ['BAR'] = 'baz'
|
||||
|
||||
self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
|
||||
|
||||
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
|
||||
environment = container.get('Config.Env')
|
||||
assert 'FOO=bar' in environment
|
||||
assert 'BAR=baz' not in environment
|
||||
|
||||
def test_rm(self):
|
||||
service = self.project.get_service('simple')
|
||||
service.create_container()
|
||||
@@ -1143,8 +1288,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
|
||||
self.dispatch(['rm', '-f'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True)), 0)
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
|
||||
self.dispatch(['rm', '-f', '-a'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
|
||||
|
||||
service.create_container(one_off=False)
|
||||
@@ -1257,13 +1400,14 @@ class CLITestCase(DockerClientTestCase):
|
||||
'logscomposefile_another_1',
|
||||
'exited'))
|
||||
|
||||
# sleep for a short period to allow the tailing thread to receive the
|
||||
# event. This is not great, but there isn't an easy way to do this
|
||||
# without being able to stream stdout from the process.
|
||||
time.sleep(0.5)
|
||||
os.kill(proc.pid, signal.SIGINT)
|
||||
result = wait_on_process(proc, returncode=1)
|
||||
self.dispatch(['kill', 'simple'])
|
||||
|
||||
result = wait_on_process(proc)
|
||||
|
||||
assert 'hello' in result.stdout
|
||||
assert 'test' in result.stdout
|
||||
assert 'logscomposefile_another_1 exited with code 0' in result.stdout
|
||||
assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
|
||||
|
||||
def test_logs_default(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
@@ -1425,6 +1569,17 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2}
|
||||
|
||||
def test_events_human_readable(self):
|
||||
|
||||
def has_timestamp(string):
|
||||
str_iso_date, str_iso_time, container_info = string.split(' ', 2)
|
||||
try:
|
||||
return isinstance(datetime.datetime.strptime(
|
||||
'%s %s' % (str_iso_date, str_iso_time),
|
||||
'%Y-%m-%d %H:%M:%S.%f'),
|
||||
datetime.datetime)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
events_proc = start_process(self.base_dir, ['events'])
|
||||
self.dispatch(['up', '-d', 'simple'])
|
||||
wait_on_condition(ContainerCountCondition(self.project, 1))
|
||||
@@ -1441,7 +1596,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
assert expected_template.format('create', container.id) in lines[0]
|
||||
assert expected_template.format('start', container.id) in lines[1]
|
||||
assert lines[0].startswith(datetime.date.today().isoformat())
|
||||
|
||||
assert has_timestamp(lines[0])
|
||||
|
||||
def test_env_file_relative_to_compose_file(self):
|
||||
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
|
||||
|
||||
9
tests/fixtures/bundle-with-digests/docker-compose.yml
vendored
Normal file
9
tests/fixtures/bundle-with-digests/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
version: '2.0'
|
||||
|
||||
services:
|
||||
web:
|
||||
image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
|
||||
|
||||
redis:
|
||||
image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b
|
||||
@@ -1,2 +0,0 @@
|
||||
service:
|
||||
build: .
|
||||
6
tests/fixtures/entrypoint-composefile/docker-compose.yml
vendored
Normal file
6
tests/fixtures/entrypoint-composefile/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: "2"
|
||||
services:
|
||||
test:
|
||||
image: busybox
|
||||
entrypoint: printf
|
||||
command: default args
|
||||
@@ -1,3 +1,4 @@
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
ENTRYPOINT echo "From prebuilt entrypoint"
|
||||
ENTRYPOINT ["printf"]
|
||||
CMD ["default", "args"]
|
||||
4
tests/fixtures/entrypoint-dockerfile/docker-compose.yml
vendored
Normal file
4
tests/fixtures/entrypoint-dockerfile/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
version: "2"
|
||||
services:
|
||||
test:
|
||||
build: .
|
||||
2
tests/fixtures/extends/invalid-links.yml
vendored
2
tests/fixtures/extends/invalid-links.yml
vendored
@@ -1,3 +1,5 @@
|
||||
mydb:
|
||||
build: '.'
|
||||
myweb:
|
||||
build: '.'
|
||||
extends:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: sh -c "echo hello && sleep 200"
|
||||
command: sh -c "echo hello && tail -f /dev/null"
|
||||
another:
|
||||
image: busybox:latest
|
||||
command: sh -c "echo test"
|
||||
|
||||
14
tests/fixtures/restart/docker-compose.yml
vendored
Normal file
14
tests/fixtures/restart/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
version: "2"
|
||||
services:
|
||||
never:
|
||||
image: busybox
|
||||
restart: "no"
|
||||
always:
|
||||
image: busybox
|
||||
restart: always
|
||||
on-failure:
|
||||
image: busybox
|
||||
restart: on-failure
|
||||
on-failure-5:
|
||||
image: busybox
|
||||
restart: "on-failure:5"
|
||||
10
tests/fixtures/v1-config/docker-compose.yml
vendored
Normal file
10
tests/fixtures/v1-config/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
net:
|
||||
image: busybox
|
||||
volume:
|
||||
image: busybox
|
||||
volumes:
|
||||
- /data
|
||||
app:
|
||||
image: busybox
|
||||
net: "container:net"
|
||||
volumes_from: ["volume"]
|
||||
@@ -19,6 +19,7 @@ from compose.const import LABEL_PROJECT
|
||||
from compose.const import LABEL_SERVICE
|
||||
from compose.container import Container
|
||||
from compose.project import Project
|
||||
from compose.project import ProjectError
|
||||
from compose.service import ConvergenceStrategy
|
||||
from tests.integration.testcases import v2_only
|
||||
|
||||
@@ -565,7 +566,11 @@ class ProjectTest(DockerClientTestCase):
|
||||
'name': 'web',
|
||||
'image': 'busybox:latest',
|
||||
'command': 'top',
|
||||
'networks': {'foo': None, 'bar': None, 'baz': None},
|
||||
'networks': {
|
||||
'foo': None,
|
||||
'bar': None,
|
||||
'baz': {'aliases': ['extra']},
|
||||
},
|
||||
}],
|
||||
volumes={},
|
||||
networks={
|
||||
@@ -581,15 +586,23 @@ class ProjectTest(DockerClientTestCase):
|
||||
config_data=config_data,
|
||||
)
|
||||
project.up()
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
|
||||
containers = project.containers()
|
||||
assert len(containers) == 1
|
||||
container, = containers
|
||||
|
||||
for net_name in ['foo', 'bar', 'baz']:
|
||||
full_net_name = 'composetest_{}'.format(net_name)
|
||||
network_data = self.client.inspect_network(full_net_name)
|
||||
self.assertEqual(network_data['Name'], full_net_name)
|
||||
assert network_data['Name'] == full_net_name
|
||||
|
||||
aliases_key = 'NetworkSettings.Networks.{net}.Aliases'
|
||||
assert 'web' in container.get(aliases_key.format(net='composetest_foo'))
|
||||
assert 'web' in container.get(aliases_key.format(net='composetest_baz'))
|
||||
assert 'extra' in container.get(aliases_key.format(net='composetest_baz'))
|
||||
|
||||
foo_data = self.client.inspect_network('composetest_foo')
|
||||
self.assertEqual(foo_data['Driver'], 'bridge')
|
||||
assert foo_data['Driver'] == 'bridge'
|
||||
|
||||
@v2_only()
|
||||
def test_up_with_ipam_config(self):
|
||||
@@ -740,7 +753,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
config_data=config_data,
|
||||
)
|
||||
|
||||
assert len(project.up()) == 0
|
||||
with self.assertRaises(ProjectError):
|
||||
project.up()
|
||||
|
||||
@v2_only()
|
||||
def test_project_up_volumes(self):
|
||||
@@ -820,6 +834,42 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertTrue(log_config)
|
||||
self.assertEqual(log_config.get('Type'), 'none')
|
||||
|
||||
@v2_only()
|
||||
def test_project_up_port_mappings_with_multiple_files(self):
|
||||
base_file = config.ConfigFile(
|
||||
'base.yml',
|
||||
{
|
||||
'version': V2_0,
|
||||
'services': {
|
||||
'simple': {
|
||||
'image': 'busybox:latest',
|
||||
'command': 'top',
|
||||
'ports': ['1234:1234']
|
||||
},
|
||||
},
|
||||
|
||||
})
|
||||
override_file = config.ConfigFile(
|
||||
'override.yml',
|
||||
{
|
||||
'version': V2_0,
|
||||
'services': {
|
||||
'simple': {
|
||||
'ports': ['1234:1234']
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
details = config.ConfigDetails('.', [base_file, override_file])
|
||||
|
||||
config_data = config.load(details)
|
||||
project = Project.from_config(
|
||||
name='composetest', config_data=config_data, client=self.client
|
||||
)
|
||||
project.up()
|
||||
containers = project.containers()
|
||||
self.assertEqual(len(containers), 1)
|
||||
|
||||
@v2_only()
|
||||
def test_initialize_volumes(self):
|
||||
vol_name = '{0:x}'.format(random.getrandbits(32))
|
||||
|
||||
@@ -397,7 +397,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
|
||||
assert not mock_log.warn.called
|
||||
assert (
|
||||
[mount['Destination'] for mount in new_container.get('Mounts')],
|
||||
[mount['Destination'] for mount in new_container.get('Mounts')] ==
|
||||
['/data']
|
||||
)
|
||||
assert new_container.get_mount('/data')['Source'] != host_path
|
||||
@@ -738,7 +738,10 @@ class ServiceTest(DockerClientTestCase):
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
self.assertIn("ERROR: for composetest_web_2 Boom", mock_stderr.getvalue())
|
||||
self.assertIn(
|
||||
"ERROR: for composetest_web_2 Cannot create container for service web: Boom",
|
||||
mock_stderr.getvalue()
|
||||
)
|
||||
|
||||
def test_scale_with_unexpected_exception(self):
|
||||
"""Test that when scaling if the API returns an error, that is not of type
|
||||
|
||||
218
tests/unit/bundle_test.py
Normal file
218
tests/unit/bundle_test.py
Normal file
@@ -0,0 +1,218 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import docker
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
from compose import bundle
|
||||
from compose import service
|
||||
from compose.cli.errors import UserError
|
||||
from compose.config.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_service():
|
||||
return mock.create_autospec(
|
||||
service.Service,
|
||||
client=mock.create_autospec(docker.Client),
|
||||
options={})
|
||||
|
||||
|
||||
def test_get_image_digest_exists(mock_service):
|
||||
mock_service.options['image'] = 'abcd'
|
||||
mock_service.image.return_value = {'RepoDigests': ['digest1']}
|
||||
digest = bundle.get_image_digest(mock_service)
|
||||
assert digest == 'digest1'
|
||||
|
||||
|
||||
def test_get_image_digest_image_uses_digest(mock_service):
|
||||
mock_service.options['image'] = image_id = 'redis@sha256:digest'
|
||||
|
||||
digest = bundle.get_image_digest(mock_service)
|
||||
assert digest == image_id
|
||||
assert not mock_service.image.called
|
||||
|
||||
|
||||
def test_get_image_digest_no_image(mock_service):
|
||||
with pytest.raises(UserError) as exc:
|
||||
bundle.get_image_digest(service.Service(name='theservice'))
|
||||
|
||||
assert "doesn't define an image tag" in exc.exconly()
|
||||
|
||||
|
||||
def test_push_image_with_saved_digest(mock_service):
|
||||
mock_service.options['build'] = '.'
|
||||
mock_service.options['image'] = image_id = 'abcd'
|
||||
mock_service.push.return_value = expected = 'sha256:thedigest'
|
||||
mock_service.image.return_value = {'RepoDigests': ['digest1']}
|
||||
|
||||
digest = bundle.push_image(mock_service)
|
||||
assert digest == image_id + '@' + expected
|
||||
|
||||
mock_service.push.assert_called_once_with()
|
||||
assert not mock_service.client.push.called
|
||||
|
||||
|
||||
def test_push_image(mock_service):
|
||||
mock_service.options['build'] = '.'
|
||||
mock_service.options['image'] = image_id = 'abcd'
|
||||
mock_service.push.return_value = expected = 'sha256:thedigest'
|
||||
mock_service.image.return_value = {'RepoDigests': []}
|
||||
|
||||
digest = bundle.push_image(mock_service)
|
||||
assert digest == image_id + '@' + expected
|
||||
|
||||
mock_service.push.assert_called_once_with()
|
||||
mock_service.client.pull.assert_called_once_with(digest)
|
||||
|
||||
|
||||
def test_to_bundle():
|
||||
image_digests = {'a': 'aaaa', 'b': 'bbbb'}
|
||||
services = [
|
||||
{'name': 'a', 'build': '.', },
|
||||
{'name': 'b', 'build': './b'},
|
||||
]
|
||||
config = Config(
|
||||
version=2,
|
||||
services=services,
|
||||
volumes={'special': {}},
|
||||
networks={'extra': {}})
|
||||
|
||||
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
|
||||
output = bundle.to_bundle(config, image_digests)
|
||||
|
||||
assert mock_log.mock_calls == [
|
||||
mock.call("Unsupported top level key 'networks' - ignoring"),
|
||||
mock.call("Unsupported top level key 'volumes' - ignoring"),
|
||||
]
|
||||
|
||||
assert output == {
|
||||
'Version': '0.1',
|
||||
'Services': {
|
||||
'a': {'Image': 'aaaa', 'Networks': ['default']},
|
||||
'b': {'Image': 'bbbb', 'Networks': ['default']},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def test_convert_service_to_bundle():
|
||||
name = 'theservice'
|
||||
image_digest = 'thedigest'
|
||||
service_dict = {
|
||||
'ports': ['80'],
|
||||
'expose': ['1234'],
|
||||
'networks': {'extra': {}},
|
||||
'command': 'foo',
|
||||
'entrypoint': 'entry',
|
||||
'environment': {'BAZ': 'ENV'},
|
||||
'build': '.',
|
||||
'working_dir': '/tmp',
|
||||
'user': 'root',
|
||||
'labels': {'FOO': 'LABEL'},
|
||||
'privileged': True,
|
||||
}
|
||||
|
||||
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
|
||||
config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
|
||||
|
||||
mock_log.assert_called_once_with(
|
||||
"Unsupported key 'privileged' in services.theservice - ignoring")
|
||||
|
||||
assert config == {
|
||||
'Image': image_digest,
|
||||
'Ports': [
|
||||
{'Protocol': 'tcp', 'Port': 80},
|
||||
{'Protocol': 'tcp', 'Port': 1234},
|
||||
],
|
||||
'Networks': ['extra'],
|
||||
'Command': ['entry', 'foo'],
|
||||
'Env': ['BAZ=ENV'],
|
||||
'WorkingDir': '/tmp',
|
||||
'User': 'root',
|
||||
'Labels': {'FOO': 'LABEL'},
|
||||
}
|
||||
|
||||
|
||||
def test_set_command_and_args_none():
|
||||
config = {}
|
||||
bundle.set_command_and_args(config, [], [])
|
||||
assert config == {}
|
||||
|
||||
|
||||
def test_set_command_and_args_from_command():
|
||||
config = {}
|
||||
bundle.set_command_and_args(config, [], "echo ok")
|
||||
assert config == {'Args': ['echo', 'ok']}
|
||||
|
||||
|
||||
def test_set_command_and_args_from_entrypoint():
|
||||
config = {}
|
||||
bundle.set_command_and_args(config, "echo entry", [])
|
||||
assert config == {'Command': ['echo', 'entry']}
|
||||
|
||||
|
||||
def test_set_command_and_args_from_both():
|
||||
config = {}
|
||||
bundle.set_command_and_args(config, "echo entry", ["extra", "arg"])
|
||||
assert config == {'Command': ['echo', 'entry', "extra", "arg"]}
|
||||
|
||||
|
||||
def test_make_service_networks_default():
|
||||
name = 'theservice'
|
||||
service_dict = {}
|
||||
|
||||
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
|
||||
networks = bundle.make_service_networks(name, service_dict)
|
||||
|
||||
assert not mock_log.called
|
||||
assert networks == ['default']
|
||||
|
||||
|
||||
def test_make_service_networks():
|
||||
name = 'theservice'
|
||||
service_dict = {
|
||||
'networks': {
|
||||
'foo': {
|
||||
'aliases': ['one', 'two'],
|
||||
},
|
||||
'bar': {}
|
||||
},
|
||||
}
|
||||
|
||||
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
|
||||
networks = bundle.make_service_networks(name, service_dict)
|
||||
|
||||
mock_log.assert_called_once_with(
|
||||
"Unsupported key 'aliases' in services.theservice.networks.foo - ignoring")
|
||||
assert sorted(networks) == sorted(service_dict['networks'])
|
||||
|
||||
|
||||
def test_make_port_specs():
|
||||
service_dict = {
|
||||
'expose': ['80', '500/udp'],
|
||||
'ports': [
|
||||
'400:80',
|
||||
'222',
|
||||
'127.0.0.1:8001:8001',
|
||||
'127.0.0.1:5000-5001:3000-3001'],
|
||||
}
|
||||
port_specs = bundle.make_port_specs(service_dict)
|
||||
assert port_specs == [
|
||||
{'Protocol': 'tcp', 'Port': 80},
|
||||
{'Protocol': 'tcp', 'Port': 222},
|
||||
{'Protocol': 'tcp', 'Port': 8001},
|
||||
{'Protocol': 'tcp', 'Port': 3000},
|
||||
{'Protocol': 'tcp', 'Port': 3001},
|
||||
{'Protocol': 'udp', 'Port': 500},
|
||||
]
|
||||
|
||||
|
||||
def test_make_port_spec_with_protocol():
|
||||
port_spec = bundle.make_port_spec("5000/udp")
|
||||
assert port_spec == {'Protocol': 'udp', 'Port': 5000}
|
||||
|
||||
|
||||
def test_make_port_spec_default_protocol():
|
||||
port_spec = bundle.make_port_spec("50000")
|
||||
assert port_spec == {'Protocol': 'tcp', 'Port': 50000}
|
||||
@@ -2,10 +2,12 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import ssl
|
||||
|
||||
import pytest
|
||||
|
||||
from compose.cli.command import get_config_path_from_options
|
||||
from compose.cli.command import get_tls_version
|
||||
from compose.config.environment import Environment
|
||||
from compose.const import IS_WINDOWS_PLATFORM
|
||||
from tests import mock
|
||||
@@ -46,3 +48,21 @@ class TestGetConfigPathFromOptions(object):
|
||||
def test_no_path(self):
|
||||
environment = Environment.from_env_file('.')
|
||||
assert not get_config_path_from_options('.', {}, environment)
|
||||
|
||||
|
||||
class TestGetTlsVersion(object):
|
||||
def test_get_tls_version_default(self):
|
||||
environment = {}
|
||||
assert get_tls_version(environment) is None
|
||||
|
||||
@pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
|
||||
def test_get_tls_version_upgrade(self):
|
||||
environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
|
||||
assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
|
||||
|
||||
def test_get_tls_version_unavailable(self):
|
||||
environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
|
||||
with mock.patch('compose.cli.command.log') as mock_log:
|
||||
tls_version = get_tls_version(environment)
|
||||
mock_log.warn.assert_called_once_with(mock.ANY)
|
||||
assert tls_version is None
|
||||
|
||||
@@ -2,10 +2,13 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import platform
|
||||
|
||||
import docker
|
||||
import pytest
|
||||
|
||||
import compose
|
||||
from compose.cli import errors
|
||||
from compose.cli.docker_client import docker_client
|
||||
from compose.cli.docker_client import tls_config_from_options
|
||||
from tests import mock
|
||||
@@ -19,11 +22,35 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
del os.environ['HOME']
|
||||
docker_client(os.environ)
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_docker_client_with_custom_timeout(self):
|
||||
timeout = 300
|
||||
with mock.patch('compose.cli.docker_client.HTTP_TIMEOUT', 300):
|
||||
client = docker_client(os.environ)
|
||||
self.assertEqual(client.timeout, int(timeout))
|
||||
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
|
||||
client = docker_client(os.environ)
|
||||
assert client.timeout == 123
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_custom_timeout_error(self):
|
||||
os.environ['COMPOSE_HTTP_TIMEOUT'] = '123'
|
||||
client = docker_client(os.environ)
|
||||
|
||||
with mock.patch('compose.cli.errors.log') as fake_log:
|
||||
with pytest.raises(errors.ConnectionError):
|
||||
with errors.handle_connection_errors(client):
|
||||
raise errors.RequestsConnectionError(
|
||||
errors.ReadTimeoutError(None, None, None))
|
||||
|
||||
assert fake_log.error.call_count == 1
|
||||
assert '123' in fake_log.error.call_args[0][0]
|
||||
|
||||
def test_user_agent(self):
|
||||
client = docker_client(os.environ)
|
||||
expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
|
||||
compose.__version__,
|
||||
docker.__version__,
|
||||
platform.system(),
|
||||
platform.release()
|
||||
)
|
||||
self.assertEqual(client.headers['User-Agent'], expected)
|
||||
|
||||
|
||||
class TLSConfigTestCase(unittest.TestCase):
|
||||
|
||||
@@ -5,6 +5,7 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from io import StringIO
|
||||
|
||||
import docker
|
||||
import py
|
||||
@@ -83,10 +84,10 @@ class CLITestCase(unittest.TestCase):
|
||||
self.assertTrue(project.services)
|
||||
|
||||
def test_command_help(self):
|
||||
with pytest.raises(SystemExit) as exc:
|
||||
with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
|
||||
TopLevelCommand.help({'COMMAND': 'up'})
|
||||
|
||||
assert 'Usage: up' in exc.exconly()
|
||||
assert "Usage: up" in fake_stdout.getvalue()
|
||||
|
||||
def test_command_help_nonexistent(self):
|
||||
with pytest.raises(NoSuchCommand):
|
||||
|
||||
@@ -715,7 +715,35 @@ class ConfigTest(unittest.TestCase):
|
||||
).services[0]
|
||||
assert 'args' in service['build']
|
||||
assert 'foo' in service['build']['args']
|
||||
assert service['build']['args']['foo'] == 'None'
|
||||
assert service['build']['args']['foo'] == ''
|
||||
|
||||
# If build argument is None then it will be converted to the empty
|
||||
# string. Make sure that int zero kept as it is, i.e. not converted to
|
||||
# the empty string
|
||||
def test_build_args_check_zero_preserved(self):
|
||||
service = config.load(
|
||||
build_config_details(
|
||||
{
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {
|
||||
'build': {
|
||||
'context': '.',
|
||||
'dockerfile': 'Dockerfile-alt',
|
||||
'args': {
|
||||
'foo': 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
'tests/fixtures/extends',
|
||||
'filename.yml'
|
||||
)
|
||||
).services[0]
|
||||
assert 'args' in service['build']
|
||||
assert 'foo' in service['build']['args']
|
||||
assert service['build']['args']['foo'] == '0'
|
||||
|
||||
def test_load_with_multiple_files_mismatched_networks_format(self):
|
||||
base_file = config.ConfigFile(
|
||||
@@ -1360,6 +1388,17 @@ class ConfigTest(unittest.TestCase):
|
||||
config.load(config_details)
|
||||
assert "Service 'one' depends on service 'three'" in exc.exconly()
|
||||
|
||||
def test_linked_service_is_undefined(self):
|
||||
with self.assertRaises(ConfigurationError):
|
||||
config.load(
|
||||
build_config_details({
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {'image': 'busybox', 'links': ['db:db']},
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
def test_load_dockerfile_without_context(self):
|
||||
config_details = build_config_details({
|
||||
'version': '2',
|
||||
@@ -1901,6 +1940,14 @@ class MergePortsTest(unittest.TestCase, MergeListsTest):
|
||||
base_config = ['10:8000', '9000']
|
||||
override_config = ['20:8000']
|
||||
|
||||
def test_duplicate_port_mappings(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: self.base_config},
|
||||
{self.config_name: self.base_config},
|
||||
DEFAULT_VERSION
|
||||
)
|
||||
assert set(service_dict[self.config_name]) == set(self.base_config)
|
||||
|
||||
|
||||
class MergeNetworksTest(unittest.TestCase, MergeListsTest):
|
||||
config_name = 'networks'
|
||||
@@ -2647,15 +2694,28 @@ class ExpandPathTest(unittest.TestCase):
|
||||
|
||||
|
||||
class VolumePathTest(unittest.TestCase):
|
||||
|
||||
@pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
|
||||
def test_split_path_mapping_with_windows_path(self):
|
||||
host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config"
|
||||
windows_volume_path = host_path + ":/opt/connect/config:ro"
|
||||
expected_mapping = ("/opt/connect/config:ro", host_path)
|
||||
|
||||
mapping = config.split_path_mapping(windows_volume_path)
|
||||
self.assertEqual(mapping, expected_mapping)
|
||||
assert mapping == expected_mapping
|
||||
|
||||
def test_split_path_mapping_with_windows_path_in_container(self):
|
||||
host_path = 'c:\\Users\\remilia\\data'
|
||||
container_path = 'c:\\scarletdevil\\data'
|
||||
expected_mapping = (container_path, host_path)
|
||||
|
||||
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
|
||||
assert mapping == expected_mapping
|
||||
|
||||
def test_split_path_mapping_with_root_mount(self):
|
||||
host_path = '/'
|
||||
container_path = '/var/hostroot'
|
||||
expected_mapping = (container_path, host_path)
|
||||
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
|
||||
assert mapping == expected_mapping
|
||||
|
||||
|
||||
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
|
||||
|
||||
@@ -29,7 +29,7 @@ def get_deps(obj):
|
||||
|
||||
|
||||
def test_parallel_execute():
|
||||
results = parallel_execute(
|
||||
results, errors = parallel_execute(
|
||||
objects=[1, 2, 3, 4, 5],
|
||||
func=lambda x: x * 2,
|
||||
get_name=six.text_type,
|
||||
@@ -37,6 +37,7 @@ def test_parallel_execute():
|
||||
)
|
||||
|
||||
assert sorted(results) == [2, 4, 6, 8, 10]
|
||||
assert errors == {}
|
||||
|
||||
|
||||
def test_parallel_execute_with_deps():
|
||||
|
||||
@@ -65,3 +65,23 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
|
||||
events = progress_stream.stream_output(events, output)
|
||||
self.assertTrue(len(output.getvalue()) > 0)
|
||||
|
||||
|
||||
def test_get_digest_from_push():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"progressDetail": {}, "aux": {"Digest": digest}},
|
||||
]
|
||||
assert progress_stream.get_digest_from_push(events) == digest
|
||||
|
||||
|
||||
def test_get_digest_from_pull():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"status": "Digest: %s" % digest},
|
||||
]
|
||||
assert progress_stream.get_digest_from_pull(events) == digest
|
||||
|
||||
@@ -510,3 +510,35 @@ class ProjectTest(unittest.TestCase):
|
||||
|
||||
project.down(ImageType.all, True)
|
||||
self.mock_client.remove_image.assert_called_once_with("busybox:latest")
|
||||
|
||||
def test_warning_in_swarm_mode(self):
|
||||
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
||||
with mock.patch('compose.project.log') as fake_log:
|
||||
project.up()
|
||||
assert fake_log.warn.call_count == 1
|
||||
|
||||
def test_no_warning_on_stop(self):
|
||||
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
||||
with mock.patch('compose.project.log') as fake_log:
|
||||
project.stop()
|
||||
assert fake_log.warn.call_count == 0
|
||||
|
||||
def test_no_warning_in_normal_mode(self):
|
||||
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'inactive'}}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
||||
with mock.patch('compose.project.log') as fake_log:
|
||||
project.up()
|
||||
assert fake_log.warn.call_count == 0
|
||||
|
||||
def test_no_warning_with_no_swarm_info(self):
|
||||
self.mock_client.info.return_value = {}
|
||||
project = Project('composetest', [], self.mock_client)
|
||||
|
||||
with mock.patch('compose.project.log') as fake_log:
|
||||
project.up()
|
||||
assert fake_log.warn.call_count == 0
|
||||
|
||||
@@ -642,6 +642,55 @@ class ServiceTest(unittest.TestCase):
|
||||
service = Service('foo', project='testing')
|
||||
assert service.image_name == 'testing_foo'
|
||||
|
||||
@mock.patch('compose.service.log', autospec=True)
|
||||
def test_only_log_warning_when_host_ports_clash(self, mock_log):
|
||||
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
|
||||
name = 'foo'
|
||||
service = Service(
|
||||
name,
|
||||
client=self.mock_client,
|
||||
ports=["8080:80"])
|
||||
|
||||
service.scale(0)
|
||||
self.assertFalse(mock_log.warn.called)
|
||||
|
||||
service.scale(1)
|
||||
self.assertFalse(mock_log.warn.called)
|
||||
|
||||
service.scale(2)
|
||||
mock_log.warn.assert_called_once_with(
|
||||
'The "{}" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'.format(name))
|
||||
|
||||
|
||||
class TestServiceNetwork(object):
|
||||
|
||||
def test_connect_container_to_networks_short_aliase_exists(self):
|
||||
mock_client = mock.create_autospec(docker.Client)
|
||||
service = Service(
|
||||
'db',
|
||||
mock_client,
|
||||
'myproject',
|
||||
image='foo',
|
||||
networks={'project_default': {}})
|
||||
container = Container(
|
||||
None,
|
||||
{
|
||||
'Id': 'abcdef',
|
||||
'NetworkSettings': {
|
||||
'Networks': {
|
||||
'project_default': {
|
||||
'Aliases': ['analias', 'abcdef'],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
True)
|
||||
service.connect_container_to_networks(container)
|
||||
|
||||
assert not mock_client.disconnect_container_from_network.call_count
|
||||
assert not mock_client.connect_container_to_network.call_count
|
||||
|
||||
|
||||
def sort_by_name(dictionary_list):
|
||||
return sorted(dictionary_list, key=lambda k: k['name'])
|
||||
|
||||
Reference in New Issue
Block a user