mirror of
https://github.com/docker/compose.git
synced 2026-02-14 04:29:29 +08:00
Compare commits
282 Commits
1.22.0
...
1.24.1-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3fbb9fe51e | ||
|
|
d9fa8158c3 | ||
|
|
0aa590649c | ||
|
|
eb2fdf81b4 | ||
|
|
917c2701f2 | ||
|
|
3a3288c54b | ||
|
|
428942498b | ||
|
|
c54341758a | ||
|
|
662761dbba | ||
|
|
0e05ac6d2c | ||
|
|
295dd9abda | ||
|
|
81b30c4380 | ||
|
|
360753ecc1 | ||
|
|
3fae0119ca | ||
|
|
0fdb9783cd | ||
|
|
0dec6b5ff1 | ||
|
|
e0412a2488 | ||
|
|
3fc5c6f563 | ||
|
|
28310b3ba4 | ||
|
|
4585db124a | ||
|
|
1f9b20d97b | ||
|
|
82a89aef1c | ||
|
|
3934617e37 | ||
|
|
82db4fd4f2 | ||
|
|
0f3d4ddaa7 | ||
|
|
2007951731 | ||
|
|
60f8ce09f9 | ||
|
|
cf96fcb4af | ||
|
|
bcccac69fa | ||
|
|
2ec7615ed6 | ||
|
|
2ed171cae9 | ||
|
|
325637d9d5 | ||
|
|
bab8b3985e | ||
|
|
532d00fede | ||
|
|
ab0a0d69d9 | ||
|
|
56fbd22825 | ||
|
|
8419a670ae | ||
|
|
4bd93b95a9 | ||
|
|
47ff8d710c | ||
|
|
d980d170a6 | ||
|
|
f9061720b5 | ||
|
|
01eb4b6250 | ||
|
|
b7374b6271 | ||
|
|
6b3855335e | ||
|
|
6e697c3b97 | ||
|
|
fee5261014 | ||
|
|
0612d973c7 | ||
|
|
0323920957 | ||
|
|
5232100331 | ||
|
|
8b293d486e | ||
|
|
a2bcf52665 | ||
|
|
afc161a0b1 | ||
|
|
14e7a11b3c | ||
|
|
c139455fce | ||
|
|
d3933cd34a | ||
|
|
5b2092688a | ||
|
|
64633a81cc | ||
|
|
fc3df83d39 | ||
|
|
0fc3b51b50 | ||
|
|
7b82b2e8c7 | ||
|
|
cfa5d02b52 | ||
|
|
dd240787c2 | ||
|
|
d563a66405 | ||
|
|
b0c10cb876 | ||
|
|
dd927e0fdd | ||
|
|
1110ad0108 | ||
|
|
f266e3459d | ||
|
|
bffb6094da | ||
|
|
66ed9b492e | ||
|
|
07e2717bee | ||
|
|
dce70a5566 | ||
|
|
4682e766a3 | ||
|
|
8a0090c18c | ||
|
|
a7894ddfea | ||
|
|
516eae0f5a | ||
|
|
4bc1cbc32a | ||
|
|
d9e05f262f | ||
|
|
d1bf27e73a | ||
|
|
b8b6199958 | ||
|
|
dbe3a6e9a9 | ||
|
|
61bb1ea484 | ||
|
|
eedbb28d5e | ||
|
|
2e20097f56 | ||
|
|
10864ba687 | ||
|
|
6421ae5ea3 | ||
|
|
6ea20e43f6 | ||
|
|
ccc777831c | ||
|
|
2975b5a279 | ||
|
|
e7f82d2989 | ||
|
|
6559af7660 | ||
|
|
c32bc095f3 | ||
|
|
1affc55b17 | ||
|
|
e86e10fb6b | ||
|
|
e0e06a4b56 | ||
|
|
05efe52ccd | ||
|
|
ba1e0311a7 | ||
|
|
8edb0d872d | ||
|
|
d5eb209be0 | ||
|
|
f009de025c | ||
|
|
5b02922455 | ||
|
|
2b604c1e8b | ||
|
|
db819bf0b2 | ||
|
|
3727fd3fb9 | ||
|
|
afa5d93c90 | ||
|
|
fb8cd7d813 | ||
|
|
b02f130684 | ||
|
|
176a4efaf2 | ||
|
|
187f48e338 | ||
|
|
8f4d56a648 | ||
|
|
9b12f489aa | ||
|
|
03bdd67eb5 | ||
|
|
69fe42027a | ||
|
|
7925f8cfa8 | ||
|
|
147a8e9ab8 | ||
|
|
91182ccb34 | ||
|
|
a7ca78d854 | ||
|
|
9194b8783e | ||
|
|
c8524dc1aa | ||
|
|
fd83791d55 | ||
|
|
140431d3b9 | ||
|
|
3104597e7d | ||
|
|
1c002b5844 | ||
|
|
8f9ead34d3 | ||
|
|
f0264e1991 | ||
|
|
e008db5c97 | ||
|
|
4368b8ac05 | ||
|
|
2f5d5fc93f | ||
|
|
98bb68e404 | ||
|
|
de8717cd07 | ||
|
|
7bd4291f90 | ||
|
|
ea3d406eed | ||
|
|
ca8ab06571 | ||
|
|
45189c134d | ||
|
|
5ab3e47b42 | ||
|
|
0fa1462b0f | ||
|
|
5e4098d228 | ||
|
|
12f7e0d2fb | ||
|
|
23beeb353c | ||
|
|
da25be8f99 | ||
|
|
c9107cff39 | ||
|
|
51d44c7ebc | ||
|
|
e722190d50 | ||
|
|
fe347321c9 | ||
|
|
9bccfa8dd0 | ||
|
|
5cf25f519e | ||
|
|
956434504c | ||
|
|
7712d19b32 | ||
|
|
b1adcfb7e3 | ||
|
|
5017b25f14 | ||
|
|
12ed765af8 | ||
|
|
62057d098f | ||
|
|
fdb7a16212 | ||
|
|
5b869b1ad5 | ||
|
|
4cb92294a3 | ||
|
|
9df0a4f3a9 | ||
|
|
3844ff2fde | ||
|
|
d82190025a | ||
|
|
013cb51582 | ||
|
|
402060e419 | ||
|
|
bd67b90869 | ||
|
|
297bee897b | ||
|
|
be324d57a2 | ||
|
|
c7c5b5e8c4 | ||
|
|
9018511750 | ||
|
|
7107431ae0 | ||
|
|
82e265b806 | ||
|
|
21a51bcd60 | ||
|
|
350a555e04 | ||
|
|
099c887b59 | ||
|
|
90625cf31b | ||
|
|
970f8317c5 | ||
|
|
30c91388f3 | ||
|
|
eb86881af1 | ||
|
|
b64184e388 | ||
|
|
d5c314b382 | ||
|
|
18c2d08011 | ||
|
|
bb87a3d040 | ||
|
|
62aeb767d3 | ||
|
|
5629f62644 | ||
|
|
756eae0f01 | ||
|
|
6a35663781 | ||
|
|
9d7202d122 | ||
|
|
e3e93d40a8 | ||
|
|
feccc03e4a | ||
|
|
b21a06cd6f | ||
|
|
7b02f4c3a7 | ||
|
|
cc595a65f0 | ||
|
|
25e419c763 | ||
|
|
abf67565f6 | ||
|
|
7208a50bdc | ||
|
|
8493540a1c | ||
|
|
15089886c2 | ||
|
|
48a6f2132b | ||
|
|
467d910959 | ||
|
|
5b9b519e8a | ||
|
|
b29ffb49e9 | ||
|
|
c5d5d42158 | ||
|
|
c17274d014 | ||
|
|
320e4819d8 | ||
|
|
772a307192 | ||
|
|
bf46a6cc60 | ||
|
|
39b0518850 | ||
|
|
de1958c5ff | ||
|
|
bbcfce4029 | ||
|
|
879f7cb1ed | ||
|
|
c327a498b0 | ||
|
|
47d740b800 | ||
|
|
54c3136e34 | ||
|
|
cc2462e6f4 | ||
|
|
6194d78813 | ||
|
|
4b4c250638 | ||
|
|
ec4ea8d2f1 | ||
|
|
936e6971f9 | ||
|
|
de2be2bf37 | ||
|
|
2a7beb6350 | ||
|
|
30afcc4994 | ||
|
|
834acca497 | ||
|
|
7d0fb7d3f3 | ||
|
|
1b668973a2 | ||
|
|
a2ec572fdf | ||
|
|
0fb6cd1139 | ||
|
|
96a49a0253 | ||
|
|
f80630ffcf | ||
|
|
9f9122cd95 | ||
|
|
a5f42ae9e4 | ||
|
|
17d4845dbb | ||
|
|
a7c05f41f1 | ||
|
|
265d9dae4b | ||
|
|
5916639383 | ||
|
|
4e2de3c1ff | ||
|
|
bd8b2dfbbc | ||
|
|
d491a81cec | ||
|
|
bdd2c80d98 | ||
|
|
58c5b92f09 | ||
|
|
7e6275219b | ||
|
|
373c83ccd7 | ||
|
|
b66782b412 | ||
|
|
5713215e84 | ||
|
|
a541d88d57 | ||
|
|
db391c03ad | ||
|
|
2038bb5cf7 | ||
|
|
3a93e85762 | ||
|
|
901ee4e77b | ||
|
|
eb63e9f3c7 | ||
|
|
ed245474c2 | ||
|
|
5ad50dc0b3 | ||
|
|
f207d94b3c | ||
|
|
ee878aee4c | ||
|
|
861031b9b7 | ||
|
|
707e21183f | ||
|
|
541fb65259 | ||
|
|
473703d0d9 | ||
|
|
6e95eb7437 | ||
|
|
89f2bfe4f3 | ||
|
|
635c77db6c | ||
|
|
c956785cdc | ||
|
|
7f9c042300 | ||
|
|
ebad981bcc | ||
|
|
5d0fe7bcd3 | ||
|
|
450efd557a | ||
|
|
88d88d1998 | ||
|
|
6cb17b90ef | ||
|
|
bb00352c34 | ||
|
|
1396cdb4be | ||
|
|
9271f9f46f | ||
|
|
e6d18b1881 | ||
|
|
8c4fc4bc2e | ||
|
|
64918235d2 | ||
|
|
d7f5220292 | ||
|
|
0b5f68098c | ||
|
|
8a7ee5a7d5 | ||
|
|
e9aaece40d | ||
|
|
9c2ffe6384 | ||
|
|
28085ebee2 | ||
|
|
40631f9a01 | ||
|
|
e8713d7cef | ||
|
|
7ae632a9ee | ||
|
|
b00db08aa9 | ||
|
|
6e30c130d5 | ||
|
|
a82986943b | ||
|
|
73663e46b9 | ||
|
|
47584a37c9 |
@@ -2,7 +2,7 @@ version: 2
|
||||
jobs:
|
||||
test:
|
||||
macos:
|
||||
xcode: "8.3.3"
|
||||
xcode: "9.4.1"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
@@ -10,33 +10,32 @@ jobs:
|
||||
command: ./script/setup/osx
|
||||
- run:
|
||||
name: install tox
|
||||
command: sudo pip install --upgrade tox==2.1.1
|
||||
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
|
||||
- run:
|
||||
name: unit tests
|
||||
command: tox -e py27,py36 -- tests/unit
|
||||
command: tox -e py27,py36,py37 -- tests/unit
|
||||
|
||||
build-osx-binary:
|
||||
macos:
|
||||
xcode: "8.3.3"
|
||||
xcode: "9.4.1"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: upgrade python tools
|
||||
command: sudo pip install --upgrade pip virtualenv
|
||||
command: sudo pip install --upgrade pip virtualenv==16.2.0
|
||||
- run:
|
||||
name: setup script
|
||||
command: ./script/setup/osx
|
||||
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||
- run:
|
||||
name: build script
|
||||
command: ./script/build/osx
|
||||
- store_artifacts:
|
||||
path: dist/docker-compose-Darwin-x86_64
|
||||
destination: docker-compose-Darwin-x86_64
|
||||
# - deploy:
|
||||
# name: Deploy binary to bintray
|
||||
# command: |
|
||||
# OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
|
||||
|
||||
- deploy:
|
||||
name: Deploy binary to bintray
|
||||
command: |
|
||||
OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
|
||||
|
||||
build-linux-binary:
|
||||
machine:
|
||||
@@ -54,28 +53,6 @@ jobs:
|
||||
command: |
|
||||
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
|
||||
|
||||
trigger-osx-binary-deploy:
|
||||
# We use a separate repo to build OSX binaries meant for distribution
|
||||
# with support for OSSX 10.11 (xcode 7). This job triggers a build on
|
||||
# that repo.
|
||||
docker:
|
||||
- image: alpine:3.6
|
||||
|
||||
steps:
|
||||
- run:
|
||||
name: install curl
|
||||
command: apk update && apk add curl
|
||||
|
||||
- run:
|
||||
name: API trigger
|
||||
command: |
|
||||
curl -X POST -H "Content-Type: application/json" -d "{\
|
||||
\"build_parameters\": {\
|
||||
\"COMPOSE_BRANCH\": \"${CIRCLE_BRANCH}\"\
|
||||
}\
|
||||
}" https://circleci.com/api/v1.1/project/github/docker/compose-osx-release?circle-token=${OSX_RELEASE_TOKEN} \
|
||||
> /dev/null
|
||||
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -84,9 +61,3 @@ workflows:
|
||||
- test
|
||||
- build-linux-binary
|
||||
- build-osx-binary
|
||||
- trigger-osx-binary-deploy:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /bump-.*/
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
*.egg-info
|
||||
.coverage
|
||||
.git
|
||||
.github
|
||||
.tox
|
||||
build
|
||||
binaries
|
||||
coverage-html
|
||||
docs/_site
|
||||
venv
|
||||
*venv
|
||||
.tox
|
||||
**/__pycache__
|
||||
*.pyc
|
||||
|
||||
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
60
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug encountered while using docker-compose
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
3. When making a bug report, make sure you provide all required information. The easier it is for
|
||||
maintainers to reproduce, the faster it'll be fixed.
|
||||
-->
|
||||
|
||||
## Description of the issue
|
||||
|
||||
## Context information (for bug reports)
|
||||
|
||||
**Output of `docker-compose version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker-compose config`**
|
||||
(Make sure to add the relevant `-f` and other flags)
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
|
||||
## Steps to reproduce the issue
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Observed result
|
||||
|
||||
### Expected result
|
||||
|
||||
### Stacktrace / full error message
|
||||
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
## Additional information
|
||||
|
||||
OS version / distribution, `docker-compose` install method, etc.
|
||||
29
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea to improve Compose
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
-->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
9
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
name: Question about using Compose
|
||||
about: This is not the appropriate channel
|
||||
|
||||
---
|
||||
|
||||
Please post on our forums: https://forums.docker.com for questions about using `docker-compose`.
|
||||
|
||||
Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker.
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,15 +1,18 @@
|
||||
*.egg-info
|
||||
*.pyc
|
||||
*.swo
|
||||
*.swp
|
||||
.cache
|
||||
.coverage*
|
||||
.DS_Store
|
||||
.idea
|
||||
|
||||
/.tox
|
||||
/binaries
|
||||
/build
|
||||
/compose/GITSHA
|
||||
/coverage-html
|
||||
/dist
|
||||
/docs/_site
|
||||
/venv
|
||||
README.rst
|
||||
compose/GITSHA
|
||||
*.swo
|
||||
*.swp
|
||||
.DS_Store
|
||||
.cache
|
||||
/README.rst
|
||||
/*venv
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: git://github.com/asottile/reorder_python_imports
|
||||
sha: v0.3.5
|
||||
sha: v1.3.4
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
language_version: 'python2.7'
|
||||
|
||||
169
CHANGELOG.md
169
CHANGELOG.md
@@ -1,6 +1,169 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.24.0 (2019-03-22)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Added support for connecting to the Docker Engine using the `ssh` protocol.
|
||||
|
||||
- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
|
||||
in the command's output.
|
||||
|
||||
- Add bash completion for `ps --all|-a`
|
||||
|
||||
- Support for credential_spec
|
||||
|
||||
- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where some valid credential helpers weren't properly handled by Compose
|
||||
when attempting to pull images from private registries.
|
||||
|
||||
- Fixed an issue where the output of `docker-compose start` before containers were created
|
||||
was misleading
|
||||
|
||||
- To match the Docker CLI behavior and to avoid confusing issues, Compose will no longer
|
||||
accept whitespace in variable names sourced from environment files.
|
||||
|
||||
- Compose will now report a configuration error if a service attempts to declare
|
||||
duplicate mount points in the volumes section.
|
||||
|
||||
- Fixed an issue with the containerized version of Compose that prevented users from
|
||||
writing to stdin during interactive sessions started by `run` or `exec`.
|
||||
|
||||
- One-off containers started by `run` no longer adopt the restart policy of the service,
|
||||
and are instead set to never restart.
|
||||
|
||||
- Fixed an issue that caused some container events to not appear in the output of
|
||||
the `docker-compose events` command.
|
||||
|
||||
- Missing images will no longer stop the execution of `docker-compose down` commands
|
||||
(a warning will be displayed instead).
|
||||
|
||||
- Force `virtualenv` version for macOS CI
|
||||
|
||||
- Fix merging of compose files when network has `None` config
|
||||
|
||||
- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
|
||||
|
||||
- Bump `docker-py` version to `3.7.2` to fix SSH and proxy config issues
|
||||
|
||||
- Fix release script and some typos on release documentation
|
||||
|
||||
1.23.2 (2018-11-28)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Reverted a 1.23.0 change that appended random strings to container names
|
||||
created by `docker-compose up`, causing addressability issues.
|
||||
Note: Containers created by `docker-compose run` will continue to use
|
||||
randomly generated names to avoid collisions during parallel runs.
|
||||
|
||||
- Fixed an issue where some `dockerfile` paths would fail unexpectedly when
|
||||
attempting to build on Windows.
|
||||
|
||||
- Fixed a bug where build context URLs would fail to build on Windows.
|
||||
|
||||
- Fixed a bug that caused `run` and `exec` commands to fail for some otherwise
|
||||
accepted values of the `--host` parameter.
|
||||
|
||||
- Fixed an issue where overrides for the `storage_opt` and `isolation` keys in
|
||||
service definitions weren't properly applied.
|
||||
|
||||
- Fixed a bug where some invalid Compose files would raise an uncaught
|
||||
exception during validation.
|
||||
|
||||
1.23.1 (2018-11-01)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where working with containers created with a previous (< 1.23.0)
|
||||
version of Compose would cause unexpected crashes
|
||||
|
||||
- Fixed an issue where the behavior of the `--project-directory` flag would
|
||||
vary depending on which subcommand was being used.
|
||||
|
||||
1.23.0 (2018-10-30)
|
||||
-------------------
|
||||
|
||||
### Important note
|
||||
|
||||
The default naming scheme for containers created by Compose in this version
|
||||
has changed from `<project>_<service>_<index>` to
|
||||
`<project>_<service>_<index>_<slug>`, where `<slug>` is a randomly-generated
|
||||
hexadecimal string. Please make sure to update scripts relying on the old
|
||||
naming scheme accordingly before upgrading.
|
||||
|
||||
### Features
|
||||
|
||||
- Logs for containers restarting after a crash will now appear in the output
|
||||
of the `up` and `logs` commands.
|
||||
|
||||
- Added `--hash` option to the `docker-compose config` command, allowing users
|
||||
to print a hash string for each service's configuration to facilitate rolling
|
||||
updates.
|
||||
|
||||
- Added `--parallel` flag to the `docker-compose build` command, allowing
|
||||
Compose to build up to 5 images simultaneously.
|
||||
|
||||
- Output for the `pull` command now reports status / progress even when pulling
|
||||
multiple images in parallel.
|
||||
|
||||
- For images with multiple names, Compose will now attempt to match the one
|
||||
present in the service configuration in the output of the `images` command.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Parallel `run` commands for the same service will no longer fail due to name
|
||||
collisions.
|
||||
|
||||
- Fixed an issue where paths longer than 260 characters on Windows clients would
|
||||
cause `docker-compose build` to fail.
|
||||
|
||||
- Fixed a bug where attempting to mount `/var/run/docker.sock` with
|
||||
Docker Desktop for Windows would result in failure.
|
||||
|
||||
- The `--project-directory` option is now used by Compose to determine where to
|
||||
look for the `.env` file.
|
||||
|
||||
- `docker-compose build` no longer fails when attempting to pull an image with
|
||||
credentials provided by the gcloud credential helper.
|
||||
|
||||
- Fixed the `--exit-code-from` option in `docker-compose up` to always report
|
||||
the actual exit code even when the watched container isn't the cause of the
|
||||
exit.
|
||||
|
||||
- Fixed an issue that would prevent recreating a service in some cases where
|
||||
a volume would be mapped to the same mountpoint as a volume declared inside
|
||||
the image's Dockerfile.
|
||||
|
||||
- Fixed a bug that caused hash configuration with multiple networks to be
|
||||
inconsistent, causing some services to be unnecessarily restarted.
|
||||
|
||||
- Fixed a bug that would cause failures with variable substitution for services
|
||||
with a name containing one or more dot characters
|
||||
|
||||
- Fixed a pipe handling issue when using the containerized version of Compose.
|
||||
|
||||
- Fixed a bug causing `external: false` entries in the Compose file to be
|
||||
printed as `external: true` in the output of `docker-compose config`
|
||||
|
||||
- Fixed a bug where issuing a `docker-compose pull` command on services
|
||||
without a defined image key would cause Compose to crash
|
||||
|
||||
- Volumes and binds are now mounted in the order they're declared in the
|
||||
service definition
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
- The `zsh` completion script has been updated with new options, and no
|
||||
longer suggests container names where service names are expected.
|
||||
|
||||
1.22.0 (2018-07-17)
|
||||
-------------------
|
||||
|
||||
@@ -60,7 +223,7 @@ Change log
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where the ip_range attirbute in IPAM configs was prevented
|
||||
- Fixed a bug where the ip_range attribute in IPAM configs was prevented
|
||||
from passing validation
|
||||
|
||||
1.21.1 (2018-04-27)
|
||||
@@ -285,7 +448,7 @@ Change log
|
||||
preventing Compose from recovering volume data from previous containers for
|
||||
anonymous volumes
|
||||
|
||||
- Added limit for number of simulatenous parallel operations, which should
|
||||
- Added limit for number of simultaneous parallel operations, which should
|
||||
prevent accidental resource exhaustion of the server. Default is 64 and
|
||||
can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
|
||||
|
||||
@@ -583,7 +746,7 @@ Change log
|
||||
### Bugfixes
|
||||
|
||||
- Volumes specified through the `--volume` flag of `docker-compose run` now
|
||||
complement volumes declared in the service's defintion instead of replacing
|
||||
complement volumes declared in the service's definition instead of replacing
|
||||
them
|
||||
|
||||
- Fixed a bug where using multiple Compose files would unset the scale value
|
||||
|
||||
13
Dockerfile
13
Dockerfile
@@ -1,20 +1,14 @@
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM python:3.6
|
||||
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
curl \
|
||||
python-dev \
|
||||
git
|
||||
|
||||
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.0-ce.tgz" && \
|
||||
SHA256=692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d; \
|
||||
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
|
||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||
mv docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm dockerbins.tgz
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
@@ -23,6 +17,8 @@ ENV LANG en_US.UTF-8
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
|
||||
RUN pip install virtualenv==16.2.0
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
@@ -31,6 +27,7 @@ ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
ADD README.md /code/
|
||||
RUN tox --notest
|
||||
|
||||
ADD . /code/
|
||||
|
||||
@@ -1,55 +1,21 @@
|
||||
FROM armhf/debian:wheezy
|
||||
FROM python:3.6
|
||||
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
gcc \
|
||||
make \
|
||||
zlib1g \
|
||||
zlib1g-dev \
|
||||
libssl-dev \
|
||||
git \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libsqlite3-dev \
|
||||
libbz2-dev \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
python-dev \
|
||||
git
|
||||
|
||||
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
|
||||
SHA256=f8de6378dad825b9fd5c3c2f949e791d22f918623c27a72c84fd6975a0e5d0a2; \
|
||||
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
|
||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||
mv docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm dockerbins.tgz
|
||||
|
||||
# Build Python 2.7.13 from source
|
||||
RUN set -ex; \
|
||||
curl -L https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz | tar -xz; \
|
||||
cd Python-2.7.13; \
|
||||
./configure --enable-shared; \
|
||||
make; \
|
||||
make install; \
|
||||
cd ..; \
|
||||
rm -rf /Python-2.7.13
|
||||
|
||||
# Build python 3.6 from source
|
||||
RUN set -ex; \
|
||||
curl -L https://www.python.org/ftp/python/3.6.4/Python-3.6.4.tgz | tar -xz; \
|
||||
cd Python-3.6.4; \
|
||||
./configure --enable-shared; \
|
||||
make; \
|
||||
make install; \
|
||||
cd ..; \
|
||||
rm -rf /Python-3.6.4
|
||||
|
||||
# Make libpython findable
|
||||
ENV LD_LIBRARY_PATH /usr/local/lib
|
||||
|
||||
# Install pip
|
||||
RUN set -ex; \
|
||||
curl -L https://bootstrap.pypa.io/get-pip.py | python
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
ENV LANG en_US.UTF-8
|
||||
@@ -70,4 +36,4 @@ RUN tox --notest
|
||||
ADD . /code/
|
||||
RUN chown -R user /code/
|
||||
|
||||
ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
|
||||
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||
|
||||
@@ -1,23 +1,19 @@
|
||||
FROM alpine:3.6
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM alpine:3.8
|
||||
|
||||
ENV GLIBC 2.27-r0
|
||||
ENV DOCKERBINS_SHA 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054
|
||||
ENV GLIBC 2.28-r0
|
||||
|
||||
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
|
||||
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
|
||||
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
|
||||
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
|
||||
apk add --no-cache glibc-$GLIBC.apk && \
|
||||
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
|
||||
ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
|
||||
ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
|
||||
curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.1-ce.tgz" && \
|
||||
echo "${DOCKERBINS_SHA} dockerbins.tgz" | sha256sum -c - && \
|
||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||
mv docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm dockerbins.tgz /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
|
||||
rm /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
|
||||
apk del curl
|
||||
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
|
||||
|
||||
ENTRYPOINT ["docker-compose"]
|
||||
|
||||
3
Jenkinsfile
vendored
3
Jenkinsfile
vendored
@@ -74,10 +74,11 @@ buildImage()
|
||||
def testMatrix = [failFast: true]
|
||||
def docker_versions = get_versions(2)
|
||||
|
||||
for (int i = 0 ;i < docker_versions.length ; i++) {
|
||||
for (int i = 0; i < docker_versions.length; i++) {
|
||||
def dockerVersion = docker_versions[i]
|
||||
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
|
||||
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
|
||||
testMatrix["${dockerVersion}_py37"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py37"])
|
||||
}
|
||||
|
||||
parallel(testMatrix)
|
||||
|
||||
@@ -4,8 +4,7 @@ include requirements.txt
|
||||
include requirements-dev.txt
|
||||
include tox.ini
|
||||
include *.md
|
||||
exclude README.md
|
||||
include README.rst
|
||||
include README.md
|
||||
include compose/config/*.json
|
||||
include compose/GITSHA
|
||||
recursive-include contrib/completion *
|
||||
|
||||
@@ -35,7 +35,7 @@ A `docker-compose.yml` looks like this:
|
||||
image: redis
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md).
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
@@ -48,9 +48,8 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
|
||||
- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose)
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose).
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new/choose). Thank you!
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
@@ -10,7 +10,7 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- "tox -e py27,py36 -- tests/unit"
|
||||
- "tox -e py27,py36,py37 -- tests/unit"
|
||||
- ps: ".\\script\\build\\windows.ps1"
|
||||
|
||||
artifacts:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.22.0'
|
||||
__version__ = '1.24.0'
|
||||
|
||||
@@ -23,7 +23,8 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def project_from_options(project_dir, options):
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
override_dir = options.get('--project-directory')
|
||||
environment = Environment.from_env_file(override_dir or project_dir)
|
||||
set_parallel_limit(environment)
|
||||
|
||||
host = options.get('--host')
|
||||
@@ -37,7 +38,7 @@ def project_from_options(project_dir, options):
|
||||
host=host,
|
||||
tls_config=tls_config_from_options(options, environment),
|
||||
environment=environment,
|
||||
override_dir=options.get('--project-directory'),
|
||||
override_dir=override_dir,
|
||||
compatibility=options.get('--compatibility'),
|
||||
)
|
||||
|
||||
@@ -59,12 +60,13 @@ def set_parallel_limit(environment):
|
||||
|
||||
|
||||
def get_config_from_options(base_dir, options):
|
||||
environment = Environment.from_env_file(base_dir)
|
||||
override_dir = options.get('--project-directory')
|
||||
environment = Environment.from_env_file(override_dir or base_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
base_dir, options, environment
|
||||
)
|
||||
return config.load(
|
||||
config.find(base_dir, config_path, environment),
|
||||
config.find(base_dir, config_path, environment, override_dir),
|
||||
options.get('--compatibility')
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ def handle_connection_errors(client):
|
||||
except APIError as e:
|
||||
log_api_error(e, client.api_version)
|
||||
raise ConnectionError()
|
||||
except (ReadTimeout, socket.timeout) as e:
|
||||
except (ReadTimeout, socket.timeout):
|
||||
log_timeout_error(client.timeout)
|
||||
raise ConnectionError()
|
||||
except Exception as e:
|
||||
@@ -67,7 +67,9 @@ def handle_connection_errors(client):
|
||||
|
||||
|
||||
def log_windows_pipe_error(exc):
|
||||
if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
if exc.winerror == 2:
|
||||
log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
|
||||
elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
log.error(
|
||||
"The current Compose file version is not compatible with your engine version. "
|
||||
"Please upgrade your Compose file to a more recent version, or set "
|
||||
|
||||
@@ -210,10 +210,15 @@ def start_producer_thread(thread_args):
|
||||
|
||||
|
||||
def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||
crashed_containers = set()
|
||||
for event in event_stream:
|
||||
if event['action'] == 'stop':
|
||||
thread_map.pop(event['id'], None)
|
||||
|
||||
if event['action'] == 'die':
|
||||
thread_map.pop(event['id'], None)
|
||||
crashed_containers.add(event['id'])
|
||||
|
||||
if event['action'] != 'start':
|
||||
continue
|
||||
|
||||
@@ -223,10 +228,16 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||
# Container was stopped and started, we need a new thread
|
||||
thread_map.pop(event['id'], None)
|
||||
|
||||
# Container crashed so we should reattach to it
|
||||
if event['id'] in crashed_containers:
|
||||
event['container'].attach_log_stream()
|
||||
crashed_containers.remove(event['id'])
|
||||
|
||||
thread_map[event['id']] = build_thread(
|
||||
event['container'],
|
||||
next(presenters),
|
||||
*thread_args)
|
||||
*thread_args
|
||||
)
|
||||
|
||||
|
||||
def consume_queue(queue, cascade_stop):
|
||||
|
||||
@@ -206,8 +206,8 @@ class TopLevelCommand(object):
|
||||
name specified in the client certificate
|
||||
--project-directory PATH Specify an alternate working directory
|
||||
(default: the path of the Compose file)
|
||||
--compatibility If set, Compose will attempt to convert deploy
|
||||
keys in v3 files to their non-Swarm equivalent
|
||||
--compatibility If set, Compose will attempt to convert keys
|
||||
in v3 files to their non-Swarm equivalent
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
@@ -238,11 +238,14 @@ class TopLevelCommand(object):
|
||||
version Show the Docker-Compose version information
|
||||
"""
|
||||
|
||||
def __init__(self, project, project_dir='.', options=None):
|
||||
def __init__(self, project, options=None):
|
||||
self.project = project
|
||||
self.project_dir = '.'
|
||||
self.toplevel_options = options or {}
|
||||
|
||||
@property
|
||||
def project_dir(self):
|
||||
return self.toplevel_options.get('--project-directory') or '.'
|
||||
|
||||
def build(self, options):
|
||||
"""
|
||||
Build or rebuild services.
|
||||
@@ -260,6 +263,7 @@ class TopLevelCommand(object):
|
||||
--pull Always attempt to pull a newer version of the image.
|
||||
-m, --memory MEM Sets memory limit for the build container.
|
||||
--build-arg key=val Set build-time variables for services.
|
||||
--parallel Build images in parallel.
|
||||
"""
|
||||
service_names = options['SERVICE']
|
||||
build_args = options.get('--build-arg', None)
|
||||
@@ -280,6 +284,7 @@ class TopLevelCommand(object):
|
||||
memory=options.get('--memory'),
|
||||
build_args=build_args,
|
||||
gzip=options.get('--compress', False),
|
||||
parallel_build=options.get('--parallel', False),
|
||||
)
|
||||
|
||||
def bundle(self, options):
|
||||
@@ -301,7 +306,7 @@ class TopLevelCommand(object):
|
||||
-o, --output PATH Path to write the bundle file to.
|
||||
Defaults to "<project name>.dab".
|
||||
"""
|
||||
compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
|
||||
compose_config = get_config_from_options('.', self.toplevel_options)
|
||||
|
||||
output = options["--output"]
|
||||
if not output:
|
||||
@@ -326,10 +331,12 @@ class TopLevelCommand(object):
|
||||
anything.
|
||||
--services Print the service names, one per line.
|
||||
--volumes Print the volume names, one per line.
|
||||
|
||||
--hash="*" Print the service config hash, one per line.
|
||||
Set "service1,service2" for a list of specified services
|
||||
or use the wildcard symbol to display all services
|
||||
"""
|
||||
|
||||
compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
|
||||
compose_config = get_config_from_options('.', self.toplevel_options)
|
||||
image_digests = None
|
||||
|
||||
if options['--resolve-image-digests']:
|
||||
@@ -348,6 +355,15 @@ class TopLevelCommand(object):
|
||||
print('\n'.join(volume for volume in compose_config.volumes))
|
||||
return
|
||||
|
||||
if options['--hash'] is not None:
|
||||
h = options['--hash']
|
||||
self.project = project_from_options('.', self.toplevel_options)
|
||||
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
for service in self.project.get_services(services):
|
||||
print('{} {}'.format(service.name, service.config_hash))
|
||||
return
|
||||
|
||||
print(serialize_config(compose_config, image_digests))
|
||||
|
||||
def create(self, options):
|
||||
@@ -552,31 +568,43 @@ class TopLevelCommand(object):
|
||||
if options['--quiet']:
|
||||
for image in set(c.image for c in containers):
|
||||
print(image.split(':')[1])
|
||||
else:
|
||||
headers = [
|
||||
'Container',
|
||||
'Repository',
|
||||
'Tag',
|
||||
'Image Id',
|
||||
'Size'
|
||||
]
|
||||
rows = []
|
||||
for container in containers:
|
||||
image_config = container.image_config
|
||||
repo_tags = (
|
||||
image_config['RepoTags'][0].rsplit(':', 1) if image_config['RepoTags']
|
||||
else ('<none>', '<none>')
|
||||
)
|
||||
image_id = image_config['Id'].split(':')[1][:12]
|
||||
size = human_readable_file_size(image_config['Size'])
|
||||
rows.append([
|
||||
container.name,
|
||||
repo_tags[0],
|
||||
repo_tags[1],
|
||||
image_id,
|
||||
size
|
||||
])
|
||||
print(Formatter().table(headers, rows))
|
||||
return
|
||||
|
||||
def add_default_tag(img_name):
|
||||
if ':' not in img_name.split('/')[-1]:
|
||||
return '{}:latest'.format(img_name)
|
||||
return img_name
|
||||
|
||||
headers = [
|
||||
'Container',
|
||||
'Repository',
|
||||
'Tag',
|
||||
'Image Id',
|
||||
'Size'
|
||||
]
|
||||
rows = []
|
||||
for container in containers:
|
||||
image_config = container.image_config
|
||||
service = self.project.get_service(container.service)
|
||||
index = 0
|
||||
img_name = add_default_tag(service.image_name)
|
||||
if img_name in image_config['RepoTags']:
|
||||
index = image_config['RepoTags'].index(img_name)
|
||||
repo_tags = (
|
||||
image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
|
||||
else ('<none>', '<none>')
|
||||
)
|
||||
|
||||
image_id = image_config['Id'].split(':')[1][:12]
|
||||
size = human_readable_file_size(image_config['Size'])
|
||||
rows.append([
|
||||
container.name,
|
||||
repo_tags[0],
|
||||
repo_tags[1],
|
||||
image_id,
|
||||
size
|
||||
])
|
||||
print(Formatter().table(headers, rows))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
@@ -666,6 +694,7 @@ class TopLevelCommand(object):
|
||||
-q, --quiet Only display IDs
|
||||
--services Display services
|
||||
--filter KEY=VAL Filter services by a property
|
||||
-a, --all Show all stopped containers (including those created by the run command)
|
||||
"""
|
||||
if options['--quiet'] and options['--services']:
|
||||
raise UserError('--quiet and --services cannot be combined')
|
||||
@@ -678,10 +707,14 @@ class TopLevelCommand(object):
|
||||
print('\n'.join(service.name for service in services))
|
||||
return
|
||||
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
if options['--all']:
|
||||
containers = sorted(self.project.containers(service_names=options['SERVICE'],
|
||||
one_off=OneOffFilter.include, stopped=True))
|
||||
else:
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
|
||||
if options['--quiet']:
|
||||
for container in containers:
|
||||
@@ -839,7 +872,7 @@ class TopLevelCommand(object):
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
container_options = build_container_options(options, detach, command)
|
||||
container_options = build_one_off_container_options(options, detach, command)
|
||||
run_one_off_container(
|
||||
container_options, self.project, service, options,
|
||||
self.toplevel_options, self.project_dir
|
||||
@@ -1085,12 +1118,15 @@ class TopLevelCommand(object):
|
||||
)
|
||||
|
||||
self.project.stop(service_names=service_names, timeout=timeout)
|
||||
if exit_value_from:
|
||||
exit_code = compute_service_exit_code(exit_value_from, attached_containers)
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
@classmethod
|
||||
def version(cls, options):
|
||||
"""
|
||||
Show version informations
|
||||
Show version information
|
||||
|
||||
Usage: version [--short]
|
||||
|
||||
@@ -1103,33 +1139,33 @@ class TopLevelCommand(object):
|
||||
print(get_version_info('full'))
|
||||
|
||||
|
||||
def compute_service_exit_code(exit_value_from, attached_containers):
|
||||
candidates = list(filter(
|
||||
lambda c: c.service == exit_value_from,
|
||||
attached_containers))
|
||||
if not candidates:
|
||||
log.error(
|
||||
'No containers matching the spec "{0}" '
|
||||
'were run.'.format(exit_value_from)
|
||||
)
|
||||
return 2
|
||||
if len(candidates) > 1:
|
||||
exit_values = filter(
|
||||
lambda e: e != 0,
|
||||
[c.inspect()['State']['ExitCode'] for c in candidates]
|
||||
)
|
||||
|
||||
return exit_values[0]
|
||||
return candidates[0].inspect()['State']['ExitCode']
|
||||
|
||||
|
||||
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
|
||||
exit_code = 0
|
||||
if exit_value_from:
|
||||
candidates = list(filter(
|
||||
lambda c: c.service == exit_value_from,
|
||||
attached_containers))
|
||||
if not candidates:
|
||||
log.error(
|
||||
'No containers matching the spec "{0}" '
|
||||
'were run.'.format(exit_value_from)
|
||||
)
|
||||
exit_code = 2
|
||||
elif len(candidates) > 1:
|
||||
exit_values = filter(
|
||||
lambda e: e != 0,
|
||||
[c.inspect()['State']['ExitCode'] for c in candidates]
|
||||
)
|
||||
|
||||
exit_code = exit_values[0]
|
||||
else:
|
||||
exit_code = candidates[0].inspect()['State']['ExitCode']
|
||||
else:
|
||||
for e in all_containers:
|
||||
if (not e.is_running and cascade_starter == e.name):
|
||||
if not e.exit_code == 0:
|
||||
exit_code = e.exit_code
|
||||
break
|
||||
for e in all_containers:
|
||||
if (not e.is_running and cascade_starter == e.name):
|
||||
if not e.exit_code == 0:
|
||||
exit_code = e.exit_code
|
||||
break
|
||||
|
||||
return exit_code
|
||||
|
||||
@@ -1231,7 +1267,7 @@ def build_action_from_opts(options):
|
||||
return BuildAction.none
|
||||
|
||||
|
||||
def build_container_options(options, detach, command):
|
||||
def build_one_off_container_options(options, detach, command):
|
||||
container_options = {
|
||||
'command': command,
|
||||
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
|
||||
@@ -1252,8 +1288,8 @@ def build_container_options(options, detach, command):
|
||||
[""] if options['--entrypoint'] == '' else options['--entrypoint']
|
||||
)
|
||||
|
||||
if options['--rm']:
|
||||
container_options['restart'] = None
|
||||
# Ensure that run command remains one-off (issue #6302)
|
||||
container_options['restart'] = None
|
||||
|
||||
if options['--user']:
|
||||
container_options['user'] = options.get('--user')
|
||||
@@ -1421,7 +1457,9 @@ def call_docker(args, dockeropts):
|
||||
if verify:
|
||||
tls_options.append('--tlsverify')
|
||||
if host:
|
||||
tls_options.extend(['--host', host.lstrip('=')])
|
||||
tls_options.extend(
|
||||
['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
|
||||
)
|
||||
|
||||
args = [executable_path] + tls_options + args
|
||||
log.debug(" ".join(map(pipes.quote, args)))
|
||||
|
||||
@@ -6,6 +6,7 @@ from . import environment
|
||||
from .config import ConfigurationError
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import find
|
||||
from .config import is_url
|
||||
from .config import load
|
||||
from .config import merge_environment
|
||||
from .config import merge_labels
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import string
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
|
||||
import six
|
||||
import yaml
|
||||
@@ -50,6 +51,7 @@ from .validation import match_named_volumes
|
||||
from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_cpu
|
||||
from .validation import validate_credential_spec
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_healthcheck
|
||||
@@ -91,6 +93,7 @@ DOCKER_CONFIG_KEYS = [
|
||||
'healthcheck',
|
||||
'image',
|
||||
'ipc',
|
||||
'isolation',
|
||||
'labels',
|
||||
'links',
|
||||
'mac_address',
|
||||
@@ -367,7 +370,6 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
)
|
||||
if not compatibility:
|
||||
check_swarm_only_key(service_dicts, 'deploy')
|
||||
check_swarm_only_key(service_dicts, 'credential_spec')
|
||||
check_swarm_only_key(service_dicts, 'configs')
|
||||
|
||||
|
||||
@@ -704,6 +706,7 @@ def validate_service(service_config, service_names, config_file):
|
||||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
validate_healthcheck(service_config)
|
||||
validate_credential_spec(service_config)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
@@ -834,6 +837,17 @@ def finalize_service_volumes(service_dict, environment):
|
||||
finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
|
||||
else:
|
||||
finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
|
||||
|
||||
duplicate_mounts = []
|
||||
mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
|
||||
for mount in mounts:
|
||||
if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
|
||||
duplicate_mounts.append(mount.repr())
|
||||
|
||||
if duplicate_mounts:
|
||||
raise ConfigurationError("Duplicate mount points: [%s]" % (
|
||||
', '.join(duplicate_mounts)))
|
||||
|
||||
service_dict['volumes'] = finalized_volumes
|
||||
|
||||
return service_dict
|
||||
@@ -881,6 +895,7 @@ def finalize_service(service_config, service_names, version, environment, compat
|
||||
normalize_build(service_dict, service_config.working_dir, environment)
|
||||
|
||||
if compatibility:
|
||||
service_dict = translate_credential_spec_to_security_opt(service_dict)
|
||||
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
|
||||
service_dict
|
||||
)
|
||||
@@ -917,6 +932,25 @@ def convert_restart_policy(name):
|
||||
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
|
||||
|
||||
|
||||
def convert_credential_spec_to_security_opt(credential_spec):
|
||||
if 'file' in credential_spec:
|
||||
return 'file://{file}'.format(file=credential_spec['file'])
|
||||
return 'registry://{registry}'.format(registry=credential_spec['registry'])
|
||||
|
||||
|
||||
def translate_credential_spec_to_security_opt(service_dict):
|
||||
result = []
|
||||
|
||||
if 'credential_spec' in service_dict:
|
||||
spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
|
||||
result.append('credentialspec={spec}'.format(spec=spec))
|
||||
|
||||
if result:
|
||||
service_dict['security_opt'] = result
|
||||
|
||||
return service_dict
|
||||
|
||||
|
||||
def translate_deploy_keys_to_container_config(service_dict):
|
||||
if 'credential_spec' in service_dict:
|
||||
del service_dict['credential_spec']
|
||||
@@ -1039,15 +1073,16 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_mapping('environment', parse_environment)
|
||||
md.merge_mapping('labels', parse_labels)
|
||||
md.merge_mapping('ulimits', parse_flat_dict)
|
||||
md.merge_mapping('networks', parse_networks)
|
||||
md.merge_mapping('sysctls', parse_sysctls)
|
||||
md.merge_mapping('depends_on', parse_depends_on)
|
||||
md.merge_mapping('storage_opt', parse_flat_dict)
|
||||
md.merge_sequence('links', ServiceLink.parse)
|
||||
md.merge_sequence('secrets', types.ServiceSecret.parse)
|
||||
md.merge_sequence('configs', types.ServiceConfig.parse)
|
||||
md.merge_sequence('security_opt', types.SecurityOpt.parse)
|
||||
md.merge_mapping('extra_hosts', parse_extra_hosts)
|
||||
|
||||
md.merge_field('networks', merge_networks, default={})
|
||||
for field in ['volumes', 'devices']:
|
||||
md.merge_field(field, merge_path_mappings)
|
||||
|
||||
@@ -1152,6 +1187,22 @@ def merge_deploy(base, override):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_networks(base, override):
|
||||
merged_networks = {}
|
||||
all_network_names = set(base) | set(override)
|
||||
base = {k: {} for k in base} if isinstance(base, list) else base
|
||||
override = {k: {} for k in override} if isinstance(override, list) else override
|
||||
for network_name in all_network_names:
|
||||
md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
|
||||
md.merge_field('aliases', merge_unique_items_lists, [])
|
||||
md.merge_field('link_local_ips', merge_unique_items_lists, [])
|
||||
md.merge_scalar('priority')
|
||||
md.merge_scalar('ipv4_address')
|
||||
md.merge_scalar('ipv6_address')
|
||||
merged_networks[network_name] = dict(md)
|
||||
return merged_networks
|
||||
|
||||
|
||||
def merge_reservations(base, override):
|
||||
md = MergeDict(base, override)
|
||||
md.merge_scalar('cpus')
|
||||
@@ -1281,7 +1332,7 @@ def resolve_volume_paths(working_dir, service_dict):
|
||||
|
||||
def resolve_volume_path(working_dir, volume):
|
||||
if isinstance(volume, dict):
|
||||
if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
|
||||
if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
|
||||
volume['source'] = expand_path(working_dir, volume['source'])
|
||||
return volume
|
||||
|
||||
|
||||
@@ -5,11 +5,13 @@ import codecs
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import six
|
||||
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
from .errors import ConfigurationError
|
||||
from .errors import EnvFileNotFound
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -17,10 +19,16 @@ log = logging.getLogger(__name__)
|
||||
def split_env(env):
|
||||
if isinstance(env, six.binary_type):
|
||||
env = env.decode('utf-8', 'replace')
|
||||
key = value = None
|
||||
if '=' in env:
|
||||
return env.split('=', 1)
|
||||
key, value = env.split('=', 1)
|
||||
else:
|
||||
return env, None
|
||||
key = env
|
||||
if re.search(r'\s', key):
|
||||
raise ConfigurationError(
|
||||
"environment variable name '{}' may not contains whitespace.".format(key)
|
||||
)
|
||||
return key, value
|
||||
|
||||
|
||||
def env_vars_from_file(filename):
|
||||
@@ -28,16 +36,19 @@ def env_vars_from_file(filename):
|
||||
Read in a line delimited file of environment variables.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError("Couldn't find env file: %s" % filename)
|
||||
raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
|
||||
elif not os.path.isfile(filename):
|
||||
raise ConfigurationError("%s is not a file." % (filename))
|
||||
raise EnvFileNotFound("{} is not a file.".format(filename))
|
||||
env = {}
|
||||
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
try:
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
except ConfigurationError as e:
|
||||
raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
|
||||
return env
|
||||
|
||||
|
||||
@@ -55,9 +66,10 @@ class Environment(dict):
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
try:
|
||||
return cls(env_vars_from_file(env_file_path))
|
||||
except ConfigurationError:
|
||||
except EnvFileNotFound:
|
||||
pass
|
||||
return result
|
||||
|
||||
instance = _initialize()
|
||||
instance.update(os.environ)
|
||||
return instance
|
||||
|
||||
@@ -19,6 +19,10 @@ class ConfigurationError(Exception):
|
||||
return self.msg
|
||||
|
||||
|
||||
class EnvFileNotFound(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
class DependencyError(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ def interpolate_environment_variables(version, config, section, environment):
|
||||
|
||||
|
||||
def get_config_path(config_key, section, name):
|
||||
return '{}.{}.{}'.format(section, name, config_key)
|
||||
return '{}/{}/{}'.format(section, name, config_key)
|
||||
|
||||
|
||||
def interpolate_value(name, config_key, value, section, interpolator):
|
||||
@@ -75,7 +75,7 @@ def interpolate_value(name, config_key, value, section, interpolator):
|
||||
|
||||
def recursive_interpolate(obj, interpolator, config_path):
|
||||
def append(config_path, key):
|
||||
return '{}.{}'.format(config_path, key)
|
||||
return '{}/{}'.format(config_path, key)
|
||||
|
||||
if isinstance(obj, six.string_types):
|
||||
return converter.convert(config_path, interpolator.interpolate(obj))
|
||||
@@ -160,12 +160,12 @@ class UnsetRequiredSubstitution(Exception):
|
||||
self.err = custom_err_msg
|
||||
|
||||
|
||||
PATH_JOKER = '[^.]+'
|
||||
PATH_JOKER = '[^/]+'
|
||||
FULL_JOKER = '.+'
|
||||
|
||||
|
||||
def re_path(*args):
|
||||
return re.compile('^{}$'.format('\.'.join(args)))
|
||||
return re.compile('^{}$'.format('/'.join(args)))
|
||||
|
||||
|
||||
def re_path_basic(section, name):
|
||||
@@ -288,7 +288,7 @@ class ConversionMap(object):
|
||||
except ValueError as e:
|
||||
raise ConfigurationError(
|
||||
'Error while attempting to convert {} to appropriate type: {}'.format(
|
||||
path, e
|
||||
path.replace('/', '.'), e
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
@@ -78,7 +78,7 @@ def denormalize_config(config, image_digests=None):
|
||||
config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
|
||||
del conf['name']
|
||||
elif 'external' in conf:
|
||||
conf['external'] = True
|
||||
conf['external'] = bool(conf['external'])
|
||||
|
||||
if 'attachable' in conf and config.version < V3_2:
|
||||
# For compatibility mode, this option is invalid in v2
|
||||
|
||||
@@ -125,7 +125,7 @@ def parse_extra_hosts(extra_hosts_config):
|
||||
|
||||
|
||||
def normalize_path_for_engine(path):
|
||||
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
|
||||
"""Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with
|
||||
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
|
||||
"""
|
||||
drive, tail = splitdrive(path)
|
||||
@@ -136,6 +136,20 @@ def normalize_path_for_engine(path):
|
||||
return path.replace('\\', '/')
|
||||
|
||||
|
||||
def normpath(path, win_host=False):
|
||||
""" Custom path normalizer that handles Compose-specific edge cases like
|
||||
UNIX paths on Windows hosts and vice-versa. """
|
||||
|
||||
sysnorm = ntpath.normpath if win_host else os.path.normpath
|
||||
# If a path looks like a UNIX absolute path on Windows, it probably is;
|
||||
# we'll need to revert the backslashes to forward slashes after normalization
|
||||
flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM
|
||||
path = sysnorm(path)
|
||||
if flip_slashes:
|
||||
path = path.replace('\\', '/')
|
||||
return path
|
||||
|
||||
|
||||
class MountSpec(object):
|
||||
options_map = {
|
||||
'volume': {
|
||||
@@ -152,12 +166,11 @@ class MountSpec(object):
|
||||
|
||||
@classmethod
|
||||
def parse(cls, mount_dict, normalize=False, win_host=False):
|
||||
normpath = ntpath.normpath if win_host else os.path.normpath
|
||||
if mount_dict.get('source'):
|
||||
if mount_dict['type'] == 'tmpfs':
|
||||
raise ConfigurationError('tmpfs mounts can not specify a source')
|
||||
|
||||
mount_dict['source'] = normpath(mount_dict['source'])
|
||||
mount_dict['source'] = normpath(mount_dict['source'], win_host)
|
||||
if normalize:
|
||||
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
|
||||
|
||||
@@ -247,7 +260,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
|
||||
else:
|
||||
external = parts[0]
|
||||
parts = separate_next_section(parts[1])
|
||||
external = ntpath.normpath(external)
|
||||
external = normpath(external, True)
|
||||
internal = parts[0]
|
||||
if len(parts) > 1:
|
||||
if ':' in parts[1]:
|
||||
|
||||
@@ -41,15 +41,15 @@ DOCKER_CONFIG_HINTS = {
|
||||
}
|
||||
|
||||
|
||||
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
|
||||
VALID_NAME_CHARS = r'[a-zA-Z0-9\._\-]'
|
||||
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
|
||||
|
||||
VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
|
||||
VALID_IPV4_ADDR = "({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
|
||||
VALID_REGEX_IPV4_CIDR = "^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
|
||||
VALID_IPV4_ADDR = r"({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
|
||||
VALID_REGEX_IPV4_CIDR = r"^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
|
||||
|
||||
VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
|
||||
VALID_REGEX_IPV6_CIDR = "".join("""
|
||||
VALID_REGEX_IPV6_CIDR = "".join(r"""
|
||||
^
|
||||
(
|
||||
(({IPV6_SEG}:){{7}}{IPV6_SEG})|
|
||||
@@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
|
||||
)
|
||||
|
||||
|
||||
def validate_credential_spec(service_config):
|
||||
credential_spec = service_config.config.get('credential_spec')
|
||||
if not credential_spec:
|
||||
return
|
||||
|
||||
if 'registry' not in credential_spec and 'file' not in credential_spec:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' is missing 'credential_spec.file' or "
|
||||
"credential_spec.registry'".format(s=service_config)
|
||||
)
|
||||
|
||||
|
||||
def get_unsupported_config_msg(path, error_key):
|
||||
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
|
||||
if error_key in DOCKER_CONFIG_HINTS:
|
||||
@@ -330,7 +342,10 @@ def handle_generic_error(error, path):
|
||||
|
||||
|
||||
def parse_key_from_error_msg(error):
|
||||
return error.message.split("'")[1]
|
||||
try:
|
||||
return error.message.split("'")[1]
|
||||
except IndexError:
|
||||
return error.message.split('(')[1].split(' ')[0].strip("'")
|
||||
|
||||
|
||||
def path_string(path):
|
||||
|
||||
@@ -7,7 +7,6 @@ from .version import ComposeVersion
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
HTTP_TIMEOUT = 60
|
||||
IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
|
||||
@@ -15,12 +14,14 @@ LABEL_PROJECT = 'com.docker.compose.project'
|
||||
LABEL_SERVICE = 'com.docker.compose.service'
|
||||
LABEL_NETWORK = 'com.docker.compose.network'
|
||||
LABEL_VERSION = 'com.docker.compose.version'
|
||||
LABEL_SLUG = 'com.docker.compose.slug'
|
||||
LABEL_VOLUME = 'com.docker.compose.volume'
|
||||
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
|
||||
NANOCPUS_SCALE = 1000000000
|
||||
PARALLEL_LIMIT = 64
|
||||
|
||||
SECRETS_PATH = '/run/secrets'
|
||||
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
|
||||
|
||||
COMPOSEFILE_V1 = ComposeVersion('1')
|
||||
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
|
||||
|
||||
@@ -7,9 +7,12 @@ import six
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from .const import LABEL_CONTAINER_NUMBER
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
from .const import LABEL_SLUG
|
||||
from .const import LABEL_VERSION
|
||||
from .utils import truncate_id
|
||||
from .version import ComposeVersion
|
||||
|
||||
|
||||
@@ -80,18 +83,36 @@ class Container(object):
|
||||
@property
|
||||
def name_without_project(self):
|
||||
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
||||
return '{0}_{1}'.format(self.service, self.number)
|
||||
return '{0}_{1}'.format(self.service, self.number if self.number is not None else self.slug)
|
||||
else:
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def number(self):
|
||||
if self.one_off:
|
||||
# One-off containers are no longer assigned numbers and use slugs instead.
|
||||
return None
|
||||
|
||||
number = self.labels.get(LABEL_CONTAINER_NUMBER)
|
||||
if not number:
|
||||
raise ValueError("Container {0} does not have a {1} label".format(
|
||||
self.short_id, LABEL_CONTAINER_NUMBER))
|
||||
return int(number)
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
if not self.full_slug:
|
||||
return None
|
||||
return truncate_id(self.full_slug)
|
||||
|
||||
@property
|
||||
def full_slug(self):
|
||||
return self.labels.get(LABEL_SLUG)
|
||||
|
||||
@property
|
||||
def one_off(self):
|
||||
return self.labels.get(LABEL_ONE_OFF) == 'True'
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
self.inspect_if_not_inspected()
|
||||
|
||||
@@ -323,7 +323,12 @@ def get_networks(service_dict, network_definitions):
|
||||
'Service "{}" uses an undefined network "{}"'
|
||||
.format(service_dict['name'], name))
|
||||
|
||||
return OrderedDict(sorted(
|
||||
networks.items(),
|
||||
key=lambda t: t[1].get('priority') or 0, reverse=True
|
||||
))
|
||||
if any([v.get('priority') for v in networks.values()]):
|
||||
return OrderedDict(sorted(
|
||||
networks.items(),
|
||||
key=lambda t: t[1].get('priority') or 0, reverse=True
|
||||
))
|
||||
else:
|
||||
# Ensure Compose will pick a consistent primary network if no
|
||||
# priority is set
|
||||
return OrderedDict(sorted(networks.items(), key=lambda t: t[0]))
|
||||
|
||||
@@ -43,14 +43,17 @@ class GlobalLimit(object):
|
||||
cls.global_limiter = Semaphore(value)
|
||||
|
||||
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
|
||||
""" Watch events from a parallel execution, update status and fill errors and results.
|
||||
Returns exception to re-raise.
|
||||
"""
|
||||
error_to_reraise = None
|
||||
for obj, result, exception in events:
|
||||
if exception is None:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
if fail_check is not None and fail_check(obj):
|
||||
writer.write(msg, get_name(obj), 'failed', red)
|
||||
else:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
results.append(result)
|
||||
elif isinstance(exception, ImageNotFound):
|
||||
# This is to bubble up ImageNotFound exceptions to the client so we
|
||||
@@ -72,12 +75,14 @@ def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
return error_to_reraise
|
||||
|
||||
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
|
||||
"""Runs func on objects in parallel while ensuring that func is
|
||||
ran on object only after it is ran on all its dependencies.
|
||||
|
||||
get_deps called on object must return a collection with its dependencies.
|
||||
get_name called on object must return its name.
|
||||
fail_check is an additional failure check for cases that should display as a failure
|
||||
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
|
||||
"""
|
||||
objects = list(objects)
|
||||
stream = get_output_stream(sys.stderr)
|
||||
@@ -96,7 +101,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
|
||||
errors = {}
|
||||
results = []
|
||||
error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
|
||||
error_to_reraise = parallel_execute_watch(
|
||||
events, writer, errors, results, msg, get_name, fail_check
|
||||
)
|
||||
|
||||
for obj_name, error in errors.items():
|
||||
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
|
||||
@@ -313,6 +320,13 @@ class ParallelStreamWriter(object):
|
||||
self._write_ansi(msg, obj_index, color_func(status))
|
||||
|
||||
|
||||
def get_stream_writer():
|
||||
instance = ParallelStreamWriter.instance
|
||||
if instance is None:
|
||||
raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
|
||||
return instance
|
||||
|
||||
|
||||
def parallel_operation(containers, operation, options, message):
|
||||
parallel_execute(
|
||||
containers,
|
||||
|
||||
@@ -19,12 +19,11 @@ def write_to_stream(s, stream):
|
||||
def stream_output(output, stream):
|
||||
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
|
||||
stream = utils.get_output_stream(stream)
|
||||
all_events = []
|
||||
lines = {}
|
||||
diff = 0
|
||||
|
||||
for event in utils.json_stream(output):
|
||||
all_events.append(event)
|
||||
yield event
|
||||
is_progress_event = 'progress' in event or 'progressDetail' in event
|
||||
|
||||
if not is_progress_event:
|
||||
@@ -57,8 +56,6 @@ def stream_output(output, stream):
|
||||
|
||||
stream.flush()
|
||||
|
||||
return all_events
|
||||
|
||||
|
||||
def print_output_event(event, stream, is_terminal):
|
||||
if 'errorDetail' in event:
|
||||
@@ -101,14 +98,14 @@ def print_output_event(event, stream, is_terminal):
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
digest = None
|
||||
for event in events:
|
||||
status = event.get('status')
|
||||
if not status or 'Digest' not in status:
|
||||
continue
|
||||
|
||||
_, digest = status.split(':', 1)
|
||||
return digest.strip()
|
||||
return None
|
||||
else:
|
||||
digest = status.split(':', 1)[1].strip()
|
||||
return digest
|
||||
|
||||
|
||||
def get_digest_from_push(events):
|
||||
|
||||
@@ -10,13 +10,13 @@ from functools import reduce
|
||||
import enum
|
||||
import six
|
||||
from docker.errors import APIError
|
||||
from docker.utils import version_lt
|
||||
|
||||
from . import parallel
|
||||
from .config import ConfigurationError
|
||||
from .config.config import V1
|
||||
from .config.sort_services import get_container_name_from_network_mode
|
||||
from .config.sort_services import get_service_name_from_network_mode
|
||||
from .const import IMAGE_EVENTS
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
@@ -29,12 +29,13 @@ from .service import ContainerNetworkMode
|
||||
from .service import ContainerPidMode
|
||||
from .service import ConvergenceStrategy
|
||||
from .service import NetworkMode
|
||||
from .service import parse_repository_tag
|
||||
from .service import PidMode
|
||||
from .service import Service
|
||||
from .service import ServiceName
|
||||
from .service import ServiceNetworkMode
|
||||
from .service import ServicePidMode
|
||||
from .utils import microseconds_from_time_nano
|
||||
from .utils import truncate_string
|
||||
from .volume import ProjectVolumes
|
||||
|
||||
|
||||
@@ -198,25 +199,6 @@ class Project(object):
|
||||
service.remove_duplicate_containers()
|
||||
return services
|
||||
|
||||
def get_scaled_services(self, services, scale_override):
|
||||
"""
|
||||
Returns a list of this project's services as scaled ServiceName objects.
|
||||
|
||||
services: a list of Service objects
|
||||
scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
|
||||
"""
|
||||
service_names = []
|
||||
for service in services:
|
||||
if service.name in scale_override:
|
||||
scale = scale_override[service.name]
|
||||
else:
|
||||
scale = service.scale_num
|
||||
|
||||
for i in range(1, scale + 1):
|
||||
service_names.append(ServiceName(self.name, service.name, i))
|
||||
|
||||
return service_names
|
||||
|
||||
def get_links(self, service_dict):
|
||||
links = []
|
||||
if 'links' in service_dict:
|
||||
@@ -298,6 +280,7 @@ class Project(object):
|
||||
operator.attrgetter('name'),
|
||||
'Starting',
|
||||
get_deps,
|
||||
fail_check=lambda obj: not obj.containers(),
|
||||
)
|
||||
|
||||
return containers
|
||||
@@ -372,13 +355,36 @@ class Project(object):
|
||||
return containers
|
||||
|
||||
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
|
||||
build_args=None, gzip=False):
|
||||
build_args=None, gzip=False, parallel_build=False):
|
||||
|
||||
services = []
|
||||
for service in self.get_services(service_names):
|
||||
if service.can_be_built():
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
||||
services.append(service)
|
||||
else:
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
def build_service(service):
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
||||
|
||||
if parallel_build:
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
build_service,
|
||||
operator.attrgetter('name'),
|
||||
'Building',
|
||||
limit=5,
|
||||
)
|
||||
if len(errors):
|
||||
combined_errors = '\n'.join([
|
||||
e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
|
||||
])
|
||||
raise ProjectError(combined_errors)
|
||||
|
||||
else:
|
||||
for service in services:
|
||||
build_service(service)
|
||||
|
||||
def create(
|
||||
self,
|
||||
service_names=None,
|
||||
@@ -397,11 +403,13 @@ class Project(object):
|
||||
detached=True,
|
||||
start=False)
|
||||
|
||||
def events(self, service_names=None):
|
||||
def _legacy_event_processor(self, service_names):
|
||||
# Only for v1 files or when Compose is forced to use an older API version
|
||||
def build_container_event(event, container):
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano']))
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
@@ -420,17 +428,15 @@ class Project(object):
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# The first part of this condition is a guard against some events
|
||||
# broadcasted by swarm that don't have a status field.
|
||||
# This is a guard against some events broadcasted by swarm that
|
||||
# don't have a status field.
|
||||
# See https://github.com/docker/compose/issues/3316
|
||||
if 'status' not in event or event['status'] in IMAGE_EVENTS:
|
||||
# We don't receive any image events because labels aren't applied
|
||||
# to images
|
||||
if 'status' not in event:
|
||||
continue
|
||||
|
||||
# TODO: get labels from the API v1.22 , see github issue 2618
|
||||
try:
|
||||
# this can fail if the container has been removed
|
||||
# this can fail if the container has been removed or if the event
|
||||
# refers to an image
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
continue
|
||||
@@ -438,6 +444,56 @@ class Project(object):
|
||||
continue
|
||||
yield build_container_event(event, container)
|
||||
|
||||
def events(self, service_names=None):
|
||||
if version_lt(self.client.api_version, '1.22'):
|
||||
# New, better event API was introduced in 1.22.
|
||||
return self._legacy_event_processor(service_names)
|
||||
|
||||
def build_container_event(event):
|
||||
container_attrs = event['Actor']['Attributes']
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
|
||||
container = None
|
||||
try:
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
# Container may have been removed (e.g. if this is a destroy event)
|
||||
pass
|
||||
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
'action': event['status'],
|
||||
'id': event['Actor']['ID'],
|
||||
'service': container_attrs.get(LABEL_SERVICE),
|
||||
'attributes': dict([
|
||||
(k, v) for k, v in container_attrs.items()
|
||||
if not k.startswith('com.docker.compose.')
|
||||
]),
|
||||
'container': container,
|
||||
}
|
||||
|
||||
def yield_loop(service_names):
|
||||
for event in self.client.events(
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# TODO: support other event types
|
||||
if event.get('Type') != 'container':
|
||||
continue
|
||||
|
||||
try:
|
||||
if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names:
|
||||
continue
|
||||
except KeyError:
|
||||
continue
|
||||
yield build_container_event(event)
|
||||
|
||||
return yield_loop(set(service_names) if service_names else self.service_names)
|
||||
|
||||
def up(self,
|
||||
service_names=None,
|
||||
start_deps=True,
|
||||
@@ -471,7 +527,6 @@ class Project(object):
|
||||
svc.ensure_image_exists(do_build=do_build, silent=silent)
|
||||
plans = self._get_convergence_plans(
|
||||
services, strategy, always_recreate_deps=always_recreate_deps)
|
||||
scaled_services = self.get_scaled_services(services, scale_override)
|
||||
|
||||
def do(service):
|
||||
|
||||
@@ -482,7 +537,6 @@ class Project(object):
|
||||
scale_override=scale_override.get(service.name),
|
||||
rescale=rescale,
|
||||
start=start,
|
||||
project_services=scaled_services,
|
||||
reset_container_image=reset_container_image,
|
||||
renew_anonymous_volumes=renew_anonymous_volumes,
|
||||
)
|
||||
@@ -548,16 +602,35 @@ class Project(object):
|
||||
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
|
||||
include_deps=False):
|
||||
services = self.get_services(service_names, include_deps)
|
||||
msg = not silent and 'Pulling' or None
|
||||
|
||||
if parallel_pull:
|
||||
def pull_service(service):
|
||||
service.pull(ignore_pull_failures, True)
|
||||
strm = service.pull(ignore_pull_failures, True, stream=True)
|
||||
if strm is None: # Attempting to pull service with no `image` key is a no-op
|
||||
return
|
||||
|
||||
writer = parallel.get_stream_writer()
|
||||
|
||||
for event in strm:
|
||||
if 'status' not in event:
|
||||
continue
|
||||
status = event['status'].lower()
|
||||
if 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail and 'total' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total'])
|
||||
status = '{} ({:.1%})'.format(status, percentage)
|
||||
|
||||
writer.write(
|
||||
msg, service.name, truncate_string(status), lambda s: s
|
||||
)
|
||||
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
pull_service,
|
||||
operator.attrgetter('name'),
|
||||
not silent and 'Pulling' or None,
|
||||
msg,
|
||||
limit=5,
|
||||
)
|
||||
if len(errors):
|
||||
@@ -571,8 +644,15 @@ class Project(object):
|
||||
service.pull(ignore_pull_failures, silent=silent)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
unique_images = set()
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.push(ignore_push_failures)
|
||||
# Considering <image> and <image:latest> as the same
|
||||
repo, tag, sep = parse_repository_tag(service.image_name)
|
||||
service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest'))
|
||||
|
||||
if service_image_name not in unique_images:
|
||||
service.push(ignore_push_failures)
|
||||
unique_images.add(service_image_name)
|
||||
|
||||
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
|
||||
ctnrs = list(filter(None, [
|
||||
|
||||
@@ -27,6 +27,7 @@ from . import __version__
|
||||
from . import const
|
||||
from . import progress_stream
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import is_url
|
||||
from .config import merge_environment
|
||||
from .config import merge_labels
|
||||
from .config.errors import DependencyError
|
||||
@@ -40,8 +41,10 @@ from .const import LABEL_CONTAINER_NUMBER
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
from .const import LABEL_SLUG
|
||||
from .const import LABEL_VERSION
|
||||
from .const import NANOCPUS_SCALE
|
||||
from .const import WINDOWS_LONGPATH_PREFIX
|
||||
from .container import Container
|
||||
from .errors import HealthCheckFailed
|
||||
from .errors import NoHealthCheckConfigured
|
||||
@@ -49,9 +52,12 @@ from .errors import OperationFailedError
|
||||
from .parallel import parallel_execute
|
||||
from .progress_stream import stream_output
|
||||
from .progress_stream import StreamOutputError
|
||||
from .utils import generate_random_id
|
||||
from .utils import json_hash
|
||||
from .utils import parse_bytes
|
||||
from .utils import parse_seconds_float
|
||||
from .utils import truncate_id
|
||||
from .utils import unique_everseen
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -80,6 +86,7 @@ HOST_CONFIG_KEYS = [
|
||||
'group_add',
|
||||
'init',
|
||||
'ipc',
|
||||
'isolation',
|
||||
'read_only',
|
||||
'log_driver',
|
||||
'log_opt',
|
||||
@@ -192,7 +199,9 @@ class Service(object):
|
||||
def __repr__(self):
|
||||
return '<Service: {}>'.format(self.name)
|
||||
|
||||
def containers(self, stopped=False, one_off=False, filters={}, labels=None):
|
||||
def containers(self, stopped=False, one_off=False, filters=None, labels=None):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
|
||||
|
||||
result = list(filter(None, [
|
||||
@@ -219,7 +228,6 @@ class Service(object):
|
||||
"""Return a :class:`compose.container.Container` for this service. The
|
||||
container must be active, and match `number`.
|
||||
"""
|
||||
|
||||
for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
|
||||
return container
|
||||
|
||||
@@ -283,7 +291,7 @@ class Service(object):
|
||||
c for c in stopped_containers if self._containers_have_diverged([c])
|
||||
]
|
||||
for c in divergent_containers:
|
||||
c.remove()
|
||||
c.remove()
|
||||
|
||||
all_containers = list(set(all_containers) - set(divergent_containers))
|
||||
|
||||
@@ -425,74 +433,78 @@ class Service(object):
|
||||
|
||||
return has_diverged
|
||||
|
||||
def _execute_convergence_create(self, scale, detached, start, project_services=None):
|
||||
i = self._next_container_number()
|
||||
def _execute_convergence_create(self, scale, detached, start):
|
||||
|
||||
def create_and_start(service, n):
|
||||
container = service.create_container(number=n, quiet=True)
|
||||
if not detached:
|
||||
container.attach_log_stream()
|
||||
if start:
|
||||
self.start_container(container)
|
||||
return container
|
||||
i = self._next_container_number()
|
||||
|
||||
containers, errors = parallel_execute(
|
||||
[ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
|
||||
lambda service_name: create_and_start(self, service_name.number),
|
||||
lambda service_name: self.get_container_name(service_name.service, service_name.number),
|
||||
"Creating"
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
def create_and_start(service, n):
|
||||
container = service.create_container(number=n, quiet=True)
|
||||
if not detached:
|
||||
container.attach_log_stream()
|
||||
if start:
|
||||
self.start_container(container)
|
||||
return container
|
||||
|
||||
return containers
|
||||
containers, errors = parallel_execute(
|
||||
[
|
||||
ServiceName(self.project, self.name, index)
|
||||
for index in range(i, i + scale)
|
||||
],
|
||||
lambda service_name: create_and_start(self, service_name.number),
|
||||
lambda service_name: self.get_container_name(service_name.service, service_name.number),
|
||||
"Creating"
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
return containers
|
||||
|
||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
||||
renew_anonymous_volumes):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _downscale(self, containers, timeout=None):
|
||||
def stop_and_remove(container):
|
||||
@@ -508,8 +520,8 @@ class Service(object):
|
||||
|
||||
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
||||
start=True, scale_override=None,
|
||||
rescale=True, project_services=None,
|
||||
reset_container_image=False, renew_anonymous_volumes=False):
|
||||
rescale=True, reset_container_image=False,
|
||||
renew_anonymous_volumes=False):
|
||||
(action, containers) = plan
|
||||
scale = scale_override if scale_override is not None else self.scale_num
|
||||
containers = sorted(containers, key=attrgetter('number'))
|
||||
@@ -518,7 +530,7 @@ class Service(object):
|
||||
|
||||
if action == 'create':
|
||||
return self._execute_convergence_create(
|
||||
scale, detached, start, project_services
|
||||
scale, detached, start
|
||||
)
|
||||
|
||||
# The create action needs always needs an initial scale, but otherwise,
|
||||
@@ -568,7 +580,7 @@ class Service(object):
|
||||
container.rename_to_tmp_name()
|
||||
new_container = self.create_container(
|
||||
previous_container=container if not renew_anonymous_volumes else None,
|
||||
number=container.labels.get(LABEL_CONTAINER_NUMBER),
|
||||
number=container.number,
|
||||
quiet=True,
|
||||
)
|
||||
if attach_logs:
|
||||
@@ -656,9 +668,15 @@ class Service(object):
|
||||
return json_hash(self.config_dict())
|
||||
|
||||
def config_dict(self):
|
||||
def image_id():
|
||||
try:
|
||||
return self.image()['Id']
|
||||
except NoSuchImageError:
|
||||
return None
|
||||
|
||||
return {
|
||||
'options': self.options,
|
||||
'image_id': self.image()['Id'],
|
||||
'image_id': image_id(),
|
||||
'links': self.get_link_names(),
|
||||
'net': self.network_mode.id,
|
||||
'networks': self.networks,
|
||||
@@ -717,19 +735,19 @@ class Service(object):
|
||||
def get_volumes_from_names(self):
|
||||
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
|
||||
|
||||
# TODO: this would benefit from github.com/docker/docker/pull/14699
|
||||
# to remove the need to inspect every container
|
||||
def _next_container_number(self, one_off=False):
|
||||
if one_off:
|
||||
return None
|
||||
containers = itertools.chain(
|
||||
self._fetch_containers(
|
||||
all=True,
|
||||
filters={'label': self.labels(one_off=one_off)}
|
||||
filters={'label': self.labels(one_off=False)}
|
||||
), self._fetch_containers(
|
||||
all=True,
|
||||
filters={'label': self.labels(one_off=one_off, legacy=True)}
|
||||
filters={'label': self.labels(one_off=False, legacy=True)}
|
||||
)
|
||||
)
|
||||
numbers = [c.number for c in containers]
|
||||
numbers = [c.number for c in containers if c.number is not None]
|
||||
return 1 if not numbers else max(numbers) + 1
|
||||
|
||||
def _fetch_containers(self, **fetch_options):
|
||||
@@ -807,6 +825,7 @@ class Service(object):
|
||||
one_off=False,
|
||||
previous_container=None):
|
||||
add_config_hash = (not one_off and not override_options)
|
||||
slug = generate_random_id() if one_off else None
|
||||
|
||||
container_options = dict(
|
||||
(k, self.options[k])
|
||||
@@ -815,7 +834,7 @@ class Service(object):
|
||||
container_options.update(override_options)
|
||||
|
||||
if not container_options.get('name'):
|
||||
container_options['name'] = self.get_container_name(self.name, number, one_off)
|
||||
container_options['name'] = self.get_container_name(self.name, number, slug)
|
||||
|
||||
container_options.setdefault('detach', True)
|
||||
|
||||
@@ -867,7 +886,9 @@ class Service(object):
|
||||
container_options.get('labels', {}),
|
||||
self.labels(one_off=one_off),
|
||||
number,
|
||||
self.config_hash if add_config_hash else None)
|
||||
self.config_hash if add_config_hash else None,
|
||||
slug
|
||||
)
|
||||
|
||||
# Delete options which are only used in HostConfig
|
||||
for key in HOST_CONFIG_KEYS:
|
||||
@@ -924,8 +945,9 @@ class Service(object):
|
||||
override_options['mounts'] = override_options.get('mounts') or []
|
||||
override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
|
||||
|
||||
# Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885)
|
||||
override_options['binds'] = list(set(binds))
|
||||
# Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885).
|
||||
# unique_everseen preserves order. (see https://github.com/docker/compose/issues/6091).
|
||||
override_options['binds'] = list(unique_everseen(binds))
|
||||
return container_options, override_options
|
||||
|
||||
def _get_container_host_config(self, override_options, one_off=False):
|
||||
@@ -1033,12 +1055,7 @@ class Service(object):
|
||||
for k, v in self._parse_proxy_config().items():
|
||||
build_args.setdefault(k, v)
|
||||
|
||||
# python2 os.stat() doesn't support unicode on some UNIX, so we
|
||||
# encode it to a bytestring to be safe
|
||||
path = build_opts.get('context')
|
||||
if not six.PY3 and not IS_WINDOWS_PLATFORM:
|
||||
path = path.encode('utf8')
|
||||
|
||||
path = rewrite_build_path(build_opts.get('context'))
|
||||
if self.platform and version_lt(self.client.api_version, '1.35'):
|
||||
raise OperationFailedError(
|
||||
'Impossible to perform platform-targeted builds for API version < 1.35'
|
||||
@@ -1068,7 +1085,7 @@ class Service(object):
|
||||
)
|
||||
|
||||
try:
|
||||
all_events = stream_output(build_output, sys.stdout)
|
||||
all_events = list(stream_output(build_output, sys.stdout))
|
||||
except StreamOutputError as e:
|
||||
raise BuildError(self, six.text_type(e))
|
||||
|
||||
@@ -1105,12 +1122,12 @@ class Service(object):
|
||||
def custom_container_name(self):
|
||||
return self.options.get('container_name')
|
||||
|
||||
def get_container_name(self, service_name, number, one_off=False):
|
||||
if self.custom_container_name and not one_off:
|
||||
def get_container_name(self, service_name, number, slug=None):
|
||||
if self.custom_container_name and slug is None:
|
||||
return self.custom_container_name
|
||||
|
||||
container_name = build_container_name(
|
||||
self.project, service_name, number, one_off,
|
||||
self.project, service_name, number, slug,
|
||||
)
|
||||
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
||||
if container_name in ext_links_origins:
|
||||
@@ -1131,6 +1148,9 @@ class Service(object):
|
||||
try:
|
||||
self.client.remove_image(self.image_name)
|
||||
return True
|
||||
except ImageNotFound:
|
||||
log.warning("Image %s not found.", self.image_name)
|
||||
return False
|
||||
except APIError as e:
|
||||
log.error("Failed to remove image for service %s: %s", self.name, e)
|
||||
return False
|
||||
@@ -1162,7 +1182,23 @@ class Service(object):
|
||||
|
||||
return any(has_host_port(binding) for binding in self.options.get('ports', []))
|
||||
|
||||
def pull(self, ignore_pull_failures=False, silent=False):
|
||||
def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures):
|
||||
try:
|
||||
output = self.client.pull(repo, **pull_kwargs)
|
||||
if silent:
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
for event in stream_output(output, devnull):
|
||||
yield event
|
||||
else:
|
||||
for event in stream_output(output, sys.stdout):
|
||||
yield event
|
||||
except (StreamOutputError, NotFound) as e:
|
||||
if not ignore_pull_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
def pull(self, ignore_pull_failures=False, silent=False, stream=False):
|
||||
if 'image' not in self.options:
|
||||
return
|
||||
|
||||
@@ -1179,20 +1215,11 @@ class Service(object):
|
||||
raise OperationFailedError(
|
||||
'Impossible to perform platform-targeted pulls for API version < 1.35'
|
||||
)
|
||||
try:
|
||||
output = self.client.pull(repo, **kwargs)
|
||||
if silent:
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
return progress_stream.get_digest_from_pull(
|
||||
stream_output(output, devnull))
|
||||
else:
|
||||
return progress_stream.get_digest_from_pull(
|
||||
stream_output(output, sys.stdout))
|
||||
except (StreamOutputError, NotFound) as e:
|
||||
if not ignore_pull_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures)
|
||||
if stream:
|
||||
return event_stream
|
||||
return progress_stream.get_digest_from_pull(event_stream)
|
||||
|
||||
def push(self, ignore_push_failures=False):
|
||||
if 'image' not in self.options or 'build' not in self.options:
|
||||
@@ -1360,11 +1387,13 @@ class ServiceNetworkMode(object):
|
||||
# Names
|
||||
|
||||
|
||||
def build_container_name(project, service, number, one_off=False):
|
||||
def build_container_name(project, service, number, slug=None):
|
||||
bits = [project.lstrip('-_'), service]
|
||||
if one_off:
|
||||
bits.append('run')
|
||||
return '_'.join(bits + [str(number)])
|
||||
if slug:
|
||||
bits.extend(['run', truncate_id(slug)])
|
||||
else:
|
||||
bits.append(str(number))
|
||||
return '_'.join(bits)
|
||||
|
||||
|
||||
# Images
|
||||
@@ -1407,7 +1436,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
|
||||
"""
|
||||
affinity = {}
|
||||
|
||||
volume_bindings = dict(
|
||||
volume_bindings = OrderedDict(
|
||||
build_volume_binding(volume)
|
||||
for volume in volumes
|
||||
if volume.external
|
||||
@@ -1467,6 +1496,11 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
|
||||
if not mount.get('Name'):
|
||||
continue
|
||||
|
||||
# Volume (probably an image volume) is overridden by a mount in the service's config
|
||||
# and would cause a duplicate mountpoint error
|
||||
if volume.internal in [m.target for m in mounts_option]:
|
||||
continue
|
||||
|
||||
# Copy existing volume from old container
|
||||
volume = volume._replace(external=mount['Name'])
|
||||
volumes.append(volume)
|
||||
@@ -1545,10 +1579,13 @@ def build_mount(mount_spec):
|
||||
# Labels
|
||||
|
||||
|
||||
def build_container_labels(label_options, service_labels, number, config_hash):
|
||||
def build_container_labels(label_options, service_labels, number, config_hash, slug):
|
||||
labels = dict(label_options or {})
|
||||
labels.update(label.split('=', 1) for label in service_labels)
|
||||
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
||||
if number is not None:
|
||||
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
||||
if slug is not None:
|
||||
labels[LABEL_SLUG] = slug
|
||||
labels[LABEL_VERSION] = __version__
|
||||
|
||||
if config_hash:
|
||||
@@ -1637,3 +1674,15 @@ def convert_blkio_config(blkio_config):
|
||||
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
|
||||
result[field] = arr
|
||||
return result
|
||||
|
||||
|
||||
def rewrite_build_path(path):
|
||||
# python2 os.stat() doesn't support unicode on some UNIX, so we
|
||||
# encode it to a bytestring to be safe
|
||||
if not six.PY3 and not IS_WINDOWS_PLATFORM:
|
||||
path = path.encode('utf8')
|
||||
|
||||
if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||
path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
|
||||
|
||||
return path
|
||||
|
||||
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
|
||||
|
||||
import codecs
|
||||
import hashlib
|
||||
import json
|
||||
import json.decoder
|
||||
import logging
|
||||
import ntpath
|
||||
import random
|
||||
|
||||
import six
|
||||
from docker.errors import DockerException
|
||||
@@ -151,3 +151,37 @@ def unquote_path(s):
|
||||
if s[0] == '"' and s[-1] == '"':
|
||||
return s[1:-1]
|
||||
return s
|
||||
|
||||
|
||||
def generate_random_id():
|
||||
while True:
|
||||
val = hex(random.getrandbits(32 * 8))[2:-1]
|
||||
try:
|
||||
int(truncate_id(val))
|
||||
continue
|
||||
except ValueError:
|
||||
return val
|
||||
|
||||
|
||||
def truncate_id(value):
|
||||
if ':' in value:
|
||||
value = value[value.index(':') + 1:]
|
||||
if len(value) > 12:
|
||||
return value[:12]
|
||||
return value
|
||||
|
||||
|
||||
def unique_everseen(iterable, key=lambda x: x):
|
||||
"List unique elements, preserving order. Remember all elements ever seen."
|
||||
seen = set()
|
||||
for element in iterable:
|
||||
unique_key = key(element)
|
||||
if unique_key not in seen:
|
||||
seen.add(unique_key)
|
||||
yield element
|
||||
|
||||
|
||||
def truncate_string(s, max_chars=35):
|
||||
if len(s) > max_chars:
|
||||
return s[:max_chars - 2] + '...'
|
||||
return s
|
||||
|
||||
@@ -114,7 +114,7 @@ _docker_compose_build() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services --filter source=build
|
||||
@@ -136,7 +136,18 @@ _docker_compose_bundle() {
|
||||
|
||||
|
||||
_docker_compose_config() {
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
case "$prev" in
|
||||
--hash)
|
||||
if [[ $cur == \\* ]] ; then
|
||||
COMPREPLY=( '\*' )
|
||||
else
|
||||
COMPREPLY=( $(compgen -W "$(__docker_compose_services) \\\* " -- "$cur") )
|
||||
fi
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--hash --help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
@@ -350,7 +361,7 @@ _docker_compose_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services
|
||||
|
||||
167
contrib/completion/zsh/_docker-compose
Normal file → Executable file
167
contrib/completion/zsh/_docker-compose
Normal file → Executable file
@@ -23,7 +23,7 @@ __docker-compose_all_services_in_compose_file() {
|
||||
local already_selected
|
||||
local -a services
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
__docker-compose_q config --services \
|
||||
__docker-compose_q ps --services "$@" \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
@@ -31,125 +31,42 @@ __docker-compose_all_services_in_compose_file() {
|
||||
__docker-compose_services_all() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
services=$(__docker-compose_all_services_in_compose_file)
|
||||
services=$(__docker-compose_all_services_in_compose_file "$@")
|
||||
_alternative "args:services:($services)" && ret=0
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
# All services that have an entry with the given key in their docker-compose.yml section
|
||||
__docker-compose_services_with_key() {
|
||||
local already_selected
|
||||
local -a buildable
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
# flatten sections to one line, then filter lines containing the key and return section name.
|
||||
__docker-compose_q config \
|
||||
| sed -n -e '/^services:/,/^[^ ]/p' \
|
||||
| sed -n 's/^ //p' \
|
||||
| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
|
||||
| grep " \+$1:" \
|
||||
| cut -d: -f1 \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
# All services that are defined by a Dockerfile reference
|
||||
__docker-compose_services_from_build() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
buildable=$(__docker-compose_services_with_key build)
|
||||
_alternative "args:buildable services:($buildable)" && ret=0
|
||||
|
||||
return ret
|
||||
__docker-compose_services_all --filter source=build
|
||||
}
|
||||
|
||||
# All services that are defined by an image
|
||||
__docker-compose_services_from_image() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
pullable=$(__docker-compose_services_with_key image)
|
||||
_alternative "args:pullable services:($pullable)" && ret=0
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
__docker-compose_get_services() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
local kind
|
||||
declare -a running paused stopped lines args services
|
||||
|
||||
docker_status=$(docker ps > /dev/null 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
_message "Error! Docker is not running."
|
||||
return 1
|
||||
fi
|
||||
|
||||
kind=$1
|
||||
shift
|
||||
[[ $kind =~ (stopped|all) ]] && args=($args -a)
|
||||
|
||||
lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' $args)"})
|
||||
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
|
||||
|
||||
# Parse header line to find columns
|
||||
local i=1 j=1 k header=${lines[1]}
|
||||
declare -A begin end
|
||||
while (( j < ${#header} - 1 )); do
|
||||
i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
|
||||
j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
|
||||
k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
|
||||
begin[${header[$i,$((j-1))]}]=$i
|
||||
end[${header[$i,$((j-1))]}]=$k
|
||||
done
|
||||
lines=(${lines[2,-1]})
|
||||
|
||||
# Container ID
|
||||
local line s name
|
||||
local -a names
|
||||
for line in $lines; do
|
||||
if [[ ${services[@]} == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
|
||||
names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
|
||||
for name in $names; do
|
||||
s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
|
||||
s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
|
||||
s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
|
||||
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
|
||||
stopped=($stopped $s)
|
||||
else
|
||||
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = *\(Paused\)* ]]; then
|
||||
paused=($paused $s)
|
||||
fi
|
||||
running=($running $s)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
[[ $kind =~ (running|all) ]] && _describe -t services-running "running services" running "$@" && ret=0
|
||||
[[ $kind =~ (paused|all) ]] && _describe -t services-paused "paused services" paused "$@" && ret=0
|
||||
[[ $kind =~ (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped "$@" && ret=0
|
||||
|
||||
return ret
|
||||
__docker-compose_services_all --filter source=image
|
||||
}
|
||||
|
||||
__docker-compose_pausedservices() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
__docker-compose_get_services paused "$@"
|
||||
__docker-compose_services_all --filter status=paused
|
||||
}
|
||||
|
||||
__docker-compose_stoppedservices() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
__docker-compose_get_services stopped "$@"
|
||||
__docker-compose_services_all --filter status=stopped
|
||||
}
|
||||
|
||||
__docker-compose_runningservices() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
__docker-compose_get_services running "$@"
|
||||
__docker-compose_services_all --filter status=running
|
||||
}
|
||||
|
||||
__docker-compose_services() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
__docker-compose_get_services all "$@"
|
||||
__docker-compose_services_all
|
||||
}
|
||||
|
||||
__docker-compose_caching_policy() {
|
||||
@@ -196,9 +113,11 @@ __docker-compose_subcommand() {
|
||||
$opts_help \
|
||||
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
|
||||
'--force-rm[Always remove intermediate containers.]' \
|
||||
'--memory[Memory limit for the build container.]' \
|
||||
'(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'--compress[Compress the build context using gzip.]' \
|
||||
'--parallel[Build images in parallel.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
@@ -213,7 +132,8 @@ __docker-compose_subcommand() {
|
||||
'(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
|
||||
'--resolve-image-digests[Pin image tags to digests.]' \
|
||||
'--services[Print the service names, one per line.]' \
|
||||
'--volumes[Print the volume names, one per line.]' && ret=0
|
||||
'--volumes[Print the volume names, one per line.]' \
|
||||
'--hash[Print the service config hash, one per line. Set "service1,service2" for a list of specified services.]' \ && ret=0
|
||||
;;
|
||||
(create)
|
||||
_arguments \
|
||||
@@ -222,11 +142,12 @@ __docker-compose_subcommand() {
|
||||
$opts_no_recreate \
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before creating containers.]" \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(down)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
$opts_timeout \
|
||||
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
|
||||
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
|
||||
$opts_remove_orphans && ret=0
|
||||
@@ -235,16 +156,18 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--json[Output events as a stream of json objects]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(exec)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'-d[Detached mode: Run command in the background.]' \
|
||||
'--privileged[Give extended privileges to the process.]' \
|
||||
'(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
|
||||
'(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
|
||||
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||
'*'{-e,--env}'[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
'(-):running services:__docker-compose_runningservices' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -252,12 +175,12 @@ __docker-compose_subcommand() {
|
||||
(help)
|
||||
_arguments ':subcommand:__docker-compose_commands' && ret=0
|
||||
;;
|
||||
(images)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'-q[Only display IDs]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(images)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'-q[Only display IDs]' \
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(kill)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
@@ -271,7 +194,7 @@ __docker-compose_subcommand() {
|
||||
$opts_no_color \
|
||||
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
|
||||
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(pause)
|
||||
_arguments \
|
||||
@@ -290,12 +213,16 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'-q[Only display IDs]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
'--filter KEY=VAL[Filter services by a property]:<filtername>=<value>:' \
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(pull)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
|
||||
'--no-parallel[Disable parallel pulling]' \
|
||||
'(-q --quiet)'{-q,--quiet}'[Pull without printing progress information]' \
|
||||
'--include-deps[Also pull services declared as dependencies]' \
|
||||
'*:services:__docker-compose_services_from_image' && ret=0
|
||||
;;
|
||||
(push)
|
||||
@@ -317,6 +244,7 @@ __docker-compose_subcommand() {
|
||||
$opts_no_deps \
|
||||
'-d[Detached mode: Run container in the background, print new container name.]' \
|
||||
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||
'*'{-l,--label}'[KEY=VAL Add or override a label (can be used multiple times)]:label KEY=VAL: ' \
|
||||
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
||||
'--name=[Assign a name to the container]:name: ' \
|
||||
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
|
||||
@@ -326,6 +254,7 @@ __docker-compose_subcommand() {
|
||||
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
|
||||
'(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
|
||||
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
"--use-aliases[Use the services network aliases in the network(s) the container connects to]" \
|
||||
'(-):services:__docker-compose_services' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -369,8 +298,10 @@ __docker-compose_subcommand() {
|
||||
"(--no-build)--build[Build images before starting containers.]" \
|
||||
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
||||
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
||||
'--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \
|
||||
'--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \
|
||||
$opts_remove_orphans \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(version)
|
||||
_arguments \
|
||||
@@ -409,8 +340,11 @@ _docker-compose() {
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
'--verbose[Show more output]' \
|
||||
"--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'--verbose[Show more output]' \
|
||||
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
|
||||
'--no-ansi[Do not print ANSI control characters]' \
|
||||
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
|
||||
'--tls[Use TLS; implied by --tlsverify]' \
|
||||
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
|
||||
@@ -421,7 +355,7 @@ _docker-compose() {
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
|
||||
local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--file" "-f"
|
||||
@@ -435,6 +369,10 @@ _docker-compose() {
|
||||
"--skip-hostname-check"
|
||||
)
|
||||
|
||||
relevant_compose_repeatable_flags=(
|
||||
"--file" "-f"
|
||||
)
|
||||
|
||||
relevant_docker_flags=(
|
||||
"--host" "-H"
|
||||
"--tls"
|
||||
@@ -452,9 +390,18 @@ _docker-compose() {
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
if [[ -n "${relevant_compose_repeatable_flags[(r)$k]}" ]]; then
|
||||
values=("${(@s/:/)opt_args[$k]}")
|
||||
for value in $values
|
||||
do
|
||||
compose_options+=$k
|
||||
compose_options+=$value
|
||||
done
|
||||
else
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -98,4 +98,5 @@ exe = EXE(pyz,
|
||||
debug=False,
|
||||
strip=None,
|
||||
upx=True,
|
||||
console=True)
|
||||
console=True,
|
||||
bootloader_ignore_signals=True)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
coverage==4.4.2
|
||||
flake8==3.5.0
|
||||
mock>=1.0.1
|
||||
pytest==2.9.2
|
||||
mock==2.0.0
|
||||
pytest==3.6.3
|
||||
pytest-cov==2.5.1
|
||||
|
||||
@@ -2,22 +2,23 @@ backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
||||
cached-property==1.3.0
|
||||
certifi==2017.4.17
|
||||
chardet==3.0.4
|
||||
docker==3.4.1
|
||||
docker-pycreds==0.3.0
|
||||
colorama==0.4.0; sys_platform == 'win32'
|
||||
docker==3.7.3
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
git+git://github.com/tartley/colorama.git@bd378c725b45eba0b8e5cc091c3ca76a954c92ff; sys_platform == 'win32'
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
jsonschema==2.6.0
|
||||
paramiko==2.4.2
|
||||
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
||||
pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
|
||||
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||
PySocks==1.6.7
|
||||
PyYAML==3.12
|
||||
requests==2.18.4
|
||||
PyYAML==4.2b1
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
texttable==0.9.1
|
||||
urllib3==1.21.1
|
||||
websocket-client==0.32.0
|
||||
urllib3==1.21.1; python_version == '3.3'
|
||||
websocket-client==0.56.0
|
||||
|
||||
@@ -5,7 +5,7 @@ set -ex
|
||||
./script/clean
|
||||
|
||||
TAG="docker-compose"
|
||||
docker build -t "$TAG" . | tail -n 200
|
||||
docker build -t "$TAG" .
|
||||
docker run \
|
||||
--rm --entrypoint="script/build/linux-entrypoint" \
|
||||
-v $(pwd)/dist:/code/dist \
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
|
||||
|
||||
rm -rf venv
|
||||
|
||||
virtualenv -p /usr/local/bin/python3 venv
|
||||
virtualenv -p ${TOOLCHAIN_PATH}/bin/python3 venv
|
||||
venv/bin/pip install -r requirements.txt
|
||||
venv/bin/pip install -r requirements-build.txt
|
||||
venv/bin/pip install --no-deps .
|
||||
|
||||
@@ -44,7 +44,7 @@ virtualenv .\venv
|
||||
# pip and pyinstaller generate lots of warnings, so we need to ignore them
|
||||
$ErrorActionPreference = "Continue"
|
||||
|
||||
.\venv\Scripts\pip install pypiwin32==220
|
||||
.\venv\Scripts\pip install pypiwin32==223
|
||||
.\venv\Scripts\pip install -r requirements.txt
|
||||
.\venv\Scripts\pip install --no-deps .
|
||||
.\venv\Scripts\pip install -r requirements-build.txt
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
#
|
||||
# Write the current commit sha to the file GITSHA. This file is included in
|
||||
# packaging so that `docker-compose version` can include the git sha.
|
||||
#
|
||||
set -e
|
||||
git rev-parse --short HEAD > compose/GITSHA
|
||||
# sets to 'unknown' and echoes a message if the command is not successful
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(git rev-parse --short HEAD)"
|
||||
if [[ "${?}" != "0" ]]; then
|
||||
echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
|
||||
DOCKER_COMPOSE_GITSHA="unknown"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
FROM python:3.6
|
||||
RUN mkdir -p /src && pip install -U Jinja2==2.10 \
|
||||
PyGithub==1.39 \
|
||||
pypandoc==1.4 \
|
||||
GitPython==2.1.9 \
|
||||
requests==2.18.4 \
|
||||
twine==1.11.0 && \
|
||||
apt-get update && apt-get install -y pandoc
|
||||
|
||||
VOLUME /src/script/release
|
||||
WORKDIR /src
|
||||
COPY . /src
|
||||
RUN python setup.py develop
|
||||
ENTRYPOINT ["python", "script/release/release.py"]
|
||||
CMD ["--help"]
|
||||
@@ -9,8 +9,7 @@ The following things are required to bring a release to a successful conclusion
|
||||
|
||||
### Local Docker engine (Linux Containers)
|
||||
|
||||
The release script runs inside a container and builds images that will be part
|
||||
of the release.
|
||||
The release script builds images that will be part of the release.
|
||||
|
||||
### Docker Hub account
|
||||
|
||||
@@ -20,6 +19,10 @@ following repositories:
|
||||
- docker/compose
|
||||
- docker/compose-tests
|
||||
|
||||
### Python
|
||||
|
||||
The release script is written in Python and requires Python 3.3 at minimum.
|
||||
|
||||
### A Github account and Github API token
|
||||
|
||||
Your Github account needs to have write access on the `docker/compose` repo.
|
||||
@@ -37,7 +40,7 @@ This API token should be exposed to the release script through the
|
||||
### A Bintray account and Bintray API key
|
||||
|
||||
Your Bintray account will need to be an admin member of the
|
||||
[docker-compose organization](https://github.com/settings/tokens).
|
||||
[docker-compose organization](https://bintray.com/docker-compose).
|
||||
Additionally, you should generate a personal API key. To do so, click your
|
||||
username in the top-right hand corner and select "Edit profile" ; on the new
|
||||
page, select "API key" in the left-side menu.
|
||||
@@ -53,6 +56,18 @@ Said account needs to be a member of the maintainers group for the
|
||||
Moreover, the `~/.pypirc` file should exist on your host and contain the
|
||||
relevant pypi credentials.
|
||||
|
||||
The following is a sample `.pypirc` provided as a guideline:
|
||||
|
||||
```
|
||||
[distutils]
|
||||
index-servers =
|
||||
pypi
|
||||
|
||||
[pypi]
|
||||
username = user
|
||||
password = pass
|
||||
```
|
||||
|
||||
## Start a feature release
|
||||
|
||||
A feature release is a release that includes all changes present in the
|
||||
@@ -114,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide.
|
||||
Once you're ready to make your release public, you may execute the following
|
||||
command from the root of the Compose repository:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEAE_VERSION
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
|
||||
```
|
||||
|
||||
Note that this command will create and publish versioned assets to the public.
|
||||
|
||||
@@ -26,12 +26,6 @@ if [ -z "$(command -v jq 2> /dev/null)" ]; then
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
|
||||
>&2 echo "$0 requires http://pandoc.org/"
|
||||
>&2 echo "Please install it and make sure it is available on your \$PATH."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
API=https://api.github.com/repos
|
||||
REPO=docker/compose
|
||||
GITHUB_REPO=git@github.com:$REPO
|
||||
@@ -59,8 +53,6 @@ docker push docker/compose-tests:latest
|
||||
docker push docker/compose-tests:$VERSION
|
||||
|
||||
echo "Uploading package to PyPI"
|
||||
pandoc -f markdown -t rst README.md -o README.rst
|
||||
sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
|
||||
./script/build/write-git-sha
|
||||
python setup.py sdist bdist_wheel
|
||||
if [ "$(command -v twine 2> /dev/null)" ]; then
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker Desktop for Mac and Windows](https://www.docker.com/products/docker-desktop)**.
|
||||
|
||||
Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
|
||||
Docker Desktop will automatically install the latest version of Docker Engine for you.
|
||||
|
||||
Alternatively, you can use the usual commands to install or upgrade Compose:
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from distutils.core import run_setup
|
||||
|
||||
import pypandoc
|
||||
from jinja2 import Template
|
||||
from release.bintray import BintrayAPI
|
||||
from release.const import BINTRAY_ORG
|
||||
@@ -17,6 +15,8 @@ from release.const import NAME
|
||||
from release.const import REPO_ROOT
|
||||
from release.downloader import BinaryDownloader
|
||||
from release.images import ImageManager
|
||||
from release.pypi import check_pypirc
|
||||
from release.pypi import pypi_upload
|
||||
from release.repository import delete_assets
|
||||
from release.repository import get_contributors
|
||||
from release.repository import Repository
|
||||
@@ -28,8 +28,6 @@ from release.utils import ScriptError
|
||||
from release.utils import update_init_py_version
|
||||
from release.utils import update_run_sh_version
|
||||
from release.utils import yesno
|
||||
from requests.exceptions import HTTPError
|
||||
from twine.commands.upload import main as twine_upload
|
||||
|
||||
|
||||
def create_initial_branch(repository, args):
|
||||
@@ -60,8 +58,11 @@ def create_bump_commit(repository, release_branch, bintray_user, bintray_org):
|
||||
repository.push_branch_to_remote(release_branch)
|
||||
|
||||
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
|
||||
print('Creating data repository {} on bintray'.format(release_branch.name))
|
||||
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
|
||||
if not bintray_api.repository_exists(bintray_org, release_branch.name):
|
||||
print('Creating data repository {} on bintray'.format(release_branch.name))
|
||||
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
|
||||
else:
|
||||
print('Bintray repository {} already exists. Skipping'.format(release_branch.name))
|
||||
|
||||
|
||||
def monitor_pr_status(pr_data):
|
||||
@@ -74,19 +75,24 @@ def monitor_pr_status(pr_data):
|
||||
'pending': 0,
|
||||
'success': 0,
|
||||
'failure': 0,
|
||||
'error': 0,
|
||||
}
|
||||
for detail in status.statuses:
|
||||
if detail.context == 'dco-signed':
|
||||
# dco-signed check breaks on merge remote-tracking ; ignore it
|
||||
continue
|
||||
summary[detail.state] += 1
|
||||
print('{pending} pending, {success} successes, {failure} failures'.format(**summary))
|
||||
if summary['pending'] == 0 and summary['failure'] == 0 and summary['success'] > 0:
|
||||
if detail.state in summary:
|
||||
summary[detail.state] += 1
|
||||
print(
|
||||
'{pending} pending, {success} successes, {failure} failures, '
|
||||
'{error} errors'.format(**summary)
|
||||
)
|
||||
if summary['failure'] > 0 or summary['error'] > 0:
|
||||
raise ScriptError('CI failures detected!')
|
||||
elif summary['pending'] == 0 and summary['success'] > 0:
|
||||
# This check assumes at least 1 non-DCO CI check to avoid race conditions.
|
||||
# If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
|
||||
return True
|
||||
elif summary['failure'] > 0:
|
||||
raise ScriptError('CI failures detected!')
|
||||
time.sleep(30)
|
||||
elif status.state == 'success':
|
||||
print('{} successes: all clear!'.format(status.total_count))
|
||||
@@ -94,12 +100,14 @@ def monitor_pr_status(pr_data):
|
||||
|
||||
|
||||
def check_pr_mergeable(pr_data):
|
||||
if not pr_data.mergeable:
|
||||
if pr_data.mergeable is False:
|
||||
# mergeable can also be null, in which case the warning would be a false positive.
|
||||
print(
|
||||
'WARNING!! PR #{} can not currently be merged. You will need to '
|
||||
'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
|
||||
)
|
||||
return pr_data.mergeable
|
||||
|
||||
return pr_data.mergeable is True
|
||||
|
||||
|
||||
def create_release_draft(repository, version, pr_data, files):
|
||||
@@ -160,24 +168,6 @@ def distclean():
|
||||
shutil.rmtree(folder, ignore_errors=True)
|
||||
|
||||
|
||||
def pypi_upload(args):
|
||||
print('Uploading to PyPi')
|
||||
try:
|
||||
twine_upload([
|
||||
'dist/docker_compose-{}*.whl'.format(args.release),
|
||||
'dist/docker-compose-{}*.tar.gz'.format(args.release)
|
||||
])
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 400 and 'File already exists' in e.message:
|
||||
if not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Package already uploaded on PyPi.'
|
||||
)
|
||||
print('Skipping PyPi upload - package already uploaded')
|
||||
else:
|
||||
raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
|
||||
|
||||
|
||||
def resume(args):
|
||||
try:
|
||||
distclean()
|
||||
@@ -266,6 +256,7 @@ def start(args):
|
||||
def finalize(args):
|
||||
distclean()
|
||||
try:
|
||||
check_pypirc()
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
img_manager = ImageManager(args.release)
|
||||
pr_data = repository.find_release_pr(args.release)
|
||||
@@ -273,7 +264,7 @@ def finalize(args):
|
||||
raise ScriptError('No PR found for {}'.format(args.release))
|
||||
if not check_pr_mergeable(pr_data):
|
||||
raise ScriptError('Can not finalize release with an unmergeable PR')
|
||||
if not img_manager.check_images(args.release):
|
||||
if not img_manager.check_images():
|
||||
raise ScriptError('Missing release image')
|
||||
br_name = branch_name(args.release)
|
||||
if not repository.branch_exists(br_name):
|
||||
@@ -284,10 +275,8 @@ def finalize(args):
|
||||
|
||||
repository.checkout_branch(br_name)
|
||||
|
||||
pypandoc.convert_file(
|
||||
os.path.join(REPO_ROOT, 'README.md'), 'rst', outputfile=os.path.join(REPO_ROOT, 'README.rst')
|
||||
)
|
||||
run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel'])
|
||||
os.system('python {setup_script} sdist bdist_wheel'.format(
|
||||
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
|
||||
|
||||
merge_status = pr_data.merge()
|
||||
if not merge_status.merged and not args.finalize_resume:
|
||||
|
||||
@@ -1,27 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
docker image inspect compose/release-tool > /dev/null
|
||||
if test $? -ne 0; then
|
||||
docker build -t compose/release-tool -f $(pwd)/script/release/Dockerfile $(pwd)
|
||||
if test -d ${VENV_DIR:-./.release-venv}; then
|
||||
true
|
||||
else
|
||||
./script/release/setup-venv.sh
|
||||
fi
|
||||
|
||||
if test -z $GITHUB_TOKEN; then
|
||||
echo "GITHUB_TOKEN environment variable must be set"
|
||||
exit 1
|
||||
if test -z "$*"; then
|
||||
args="--help"
|
||||
fi
|
||||
|
||||
if test -z $BINTRAY_TOKEN; then
|
||||
echo "BINTRAY_TOKEN environment variable must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -e GITHUB_TOKEN=$GITHUB_TOKEN -e BINTRAY_TOKEN=$BINTRAY_TOKEN -e SSH_AUTH_SOCK=$SSH_AUTH_SOCK -it \
|
||||
--mount type=bind,source=$(pwd),target=/src \
|
||||
--mount type=bind,source=$(pwd)/.git,target=/src/.git \
|
||||
--mount type=bind,source=$HOME/.docker,target=/root/.docker \
|
||||
--mount type=bind,source=$HOME/.gitconfig,target=/root/.gitconfig \
|
||||
--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \
|
||||
--mount type=bind,source=$HOME/.ssh,target=/root/.ssh \
|
||||
--mount type=bind,source=/tmp,target=/tmp \
|
||||
-v $HOME/.pypirc:/root/.pypirc \
|
||||
compose/release-tool $*
|
||||
${VENV_DIR:-./.release-venv}/bin/python ./script/release/release.py "$@"
|
||||
|
||||
@@ -15,7 +15,7 @@ class BintrayAPI(requests.Session):
|
||||
self.base_url = 'https://api.bintray.com/'
|
||||
|
||||
def create_repository(self, subject, repo_name, repo_type='generic'):
|
||||
url = '{base}/repos/{subject}/{repo_name}'.format(
|
||||
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
data = {
|
||||
@@ -27,10 +27,20 @@ class BintrayAPI(requests.Session):
|
||||
}
|
||||
return self.post_json(url, data)
|
||||
|
||||
def delete_repository(self, subject, repo_name):
|
||||
def repository_exists(self, subject, repo_name):
|
||||
url = '{base}/repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
result = self.get(url)
|
||||
if result.status_code == 404:
|
||||
return False
|
||||
result.raise_for_status()
|
||||
return True
|
||||
|
||||
def delete_repository(self, subject, repo_name):
|
||||
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
return self.delete(url)
|
||||
|
||||
def post_json(self, url, data, **kwargs):
|
||||
|
||||
@@ -2,6 +2,8 @@ from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
@@ -15,17 +17,22 @@ class ImageManager(object):
|
||||
def __init__(self, version):
|
||||
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
self.version = version
|
||||
if 'HUB_CREDENTIALS' in os.environ:
|
||||
print('HUB_CREDENTIALS found in environment, issuing login')
|
||||
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
|
||||
self.docker_client.login(
|
||||
username=credentials['Username'], password=credentials['Password']
|
||||
)
|
||||
|
||||
def build_images(self, repository, files):
|
||||
print("Building release images...")
|
||||
repository.write_git_sha()
|
||||
docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
distdir = os.path.join(REPO_ROOT, 'dist')
|
||||
os.makedirs(distdir, exist_ok=True)
|
||||
shutil.copy(files['docker-compose-Linux-x86_64'][0], distdir)
|
||||
os.chmod(os.path.join(distdir, 'docker-compose-Linux-x86_64'), 0o755)
|
||||
print('Building docker/compose image')
|
||||
logstream = docker_client.build(
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker/compose:{}'.format(self.version), dockerfile='Dockerfile.run',
|
||||
decode=True
|
||||
)
|
||||
@@ -36,7 +43,7 @@ class ImageManager(object):
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
print('Building test image (for UCP e2e)')
|
||||
logstream = docker_client.build(
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker-compose-tests:tmp', decode=True
|
||||
)
|
||||
for chunk in logstream:
|
||||
@@ -45,13 +52,15 @@ class ImageManager(object):
|
||||
if 'stream' in chunk:
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
container = docker_client.create_container(
|
||||
container = self.docker_client.create_container(
|
||||
'docker-compose-tests:tmp', entrypoint='tox'
|
||||
)
|
||||
docker_client.commit(container, 'docker/compose-tests', 'latest')
|
||||
docker_client.tag('docker/compose-tests:latest', 'docker/compose-tests:{}'.format(self.version))
|
||||
docker_client.remove_container(container, force=True)
|
||||
docker_client.remove_image('docker-compose-tests:tmp', force=True)
|
||||
self.docker_client.commit(container, 'docker/compose-tests', 'latest')
|
||||
self.docker_client.tag(
|
||||
'docker/compose-tests:latest', 'docker/compose-tests:{}'.format(self.version)
|
||||
)
|
||||
self.docker_client.remove_container(container, force=True)
|
||||
self.docker_client.remove_image('docker-compose-tests:tmp', force=True)
|
||||
|
||||
@property
|
||||
def image_names(self):
|
||||
@@ -61,23 +70,19 @@ class ImageManager(object):
|
||||
'docker/compose:{}'.format(self.version)
|
||||
]
|
||||
|
||||
def check_images(self, version):
|
||||
docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
|
||||
def check_images(self):
|
||||
for name in self.image_names:
|
||||
try:
|
||||
docker_client.inspect_image(name)
|
||||
self.docker_client.inspect_image(name)
|
||||
except docker.errors.ImageNotFound:
|
||||
print('Expected image {} was not found'.format(name))
|
||||
return False
|
||||
return True
|
||||
|
||||
def push_images(self):
|
||||
docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
|
||||
for name in self.image_names:
|
||||
print('Pushing {} to Docker Hub'.format(name))
|
||||
logstream = docker_client.push(name, stream=True, decode=True)
|
||||
logstream = self.docker_client.push(name, stream=True, decode=True)
|
||||
for chunk in logstream:
|
||||
if 'status' in chunk:
|
||||
print(chunk['status'])
|
||||
|
||||
44
script/release/release/pypi.py
Normal file
44
script/release/release/pypi.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from configparser import Error
|
||||
from requests.exceptions import HTTPError
|
||||
from twine.commands.upload import main as twine_upload
|
||||
from twine.utils import get_config
|
||||
|
||||
from .utils import ScriptError
|
||||
|
||||
|
||||
def pypi_upload(args):
|
||||
print('Uploading to PyPi')
|
||||
try:
|
||||
rel = args.release.replace('-rc', 'rc')
|
||||
twine_upload([
|
||||
'dist/docker_compose-{}*.whl'.format(rel),
|
||||
'dist/docker-compose-{}*.tar.gz'.format(rel)
|
||||
])
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 400 and 'File already exists' in str(e):
|
||||
if not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Package already uploaded on PyPi.'
|
||||
)
|
||||
print('Skipping PyPi upload - package already uploaded')
|
||||
else:
|
||||
raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
|
||||
|
||||
|
||||
def check_pypirc():
|
||||
try:
|
||||
config = get_config()
|
||||
except Error as e:
|
||||
raise ScriptError('Failed to parse .pypirc file: {}'.format(e))
|
||||
|
||||
if config is None:
|
||||
raise ScriptError('Failed to parse .pypirc file')
|
||||
|
||||
if 'pypi' not in config:
|
||||
raise ScriptError('Missing [pypi] section in .pypirc file')
|
||||
|
||||
if not (config['pypi'].get('username') and config['pypi'].get('password')):
|
||||
raise ScriptError('Missing login/password pair for pypi repo')
|
||||
@@ -219,6 +219,8 @@ def get_contributors(pr_data):
|
||||
commits = pr_data.get_commits()
|
||||
authors = {}
|
||||
for commit in commits:
|
||||
if not commit.author:
|
||||
continue
|
||||
author = commit.author.login
|
||||
authors[author] = authors.get(author, 0) + 1
|
||||
return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
|
||||
|
||||
47
script/release/setup-venv.sh
Executable file
47
script/release/setup-venv.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
debian_based() { test -f /etc/debian_version; }
|
||||
|
||||
if test -z $VENV_DIR; then
|
||||
VENV_DIR=./.release-venv
|
||||
fi
|
||||
|
||||
if test -z $PYTHONBIN; then
|
||||
PYTHONBIN=$(which python3)
|
||||
if test -z $PYTHONBIN; then
|
||||
PYTHONBIN=$(which python)
|
||||
fi
|
||||
fi
|
||||
|
||||
VERSION=$($PYTHONBIN -c "import sys; print('{}.{}'.format(*sys.version_info[0:2]))")
|
||||
if test $(echo $VERSION | cut -d. -f1) -lt 3; then
|
||||
echo "Python 3.3 or above is required"
|
||||
fi
|
||||
|
||||
if test $(echo $VERSION | cut -d. -f2) -lt 3; then
|
||||
echo "Python 3.3 or above is required"
|
||||
fi
|
||||
|
||||
# Debian / Ubuntu workaround:
|
||||
# https://askubuntu.com/questions/879437/ensurepip-is-disabled-in-debian-ubuntu-for-the-system-python
|
||||
if debian_based; then
|
||||
VENV_FLAGS="$VENV_FLAGS --without-pip"
|
||||
fi
|
||||
|
||||
$PYTHONBIN -m venv $VENV_DIR $VENV_FLAGS
|
||||
|
||||
VENV_PYTHONBIN=$VENV_DIR/bin/python
|
||||
|
||||
if debian_based; then
|
||||
curl https://bootstrap.pypa.io/get-pip.py -o $VENV_DIR/get-pip.py
|
||||
$VENV_PYTHONBIN $VENV_DIR/get-pip.py
|
||||
fi
|
||||
|
||||
$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
|
||||
PyGithub==1.39 \
|
||||
GitPython==2.1.9 \
|
||||
requests==2.18.4 \
|
||||
setuptools==40.6.2 \
|
||||
twine==1.11.0
|
||||
|
||||
$VENV_PYTHONBIN setup.py develop
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.22.0"
|
||||
VERSION="1.24.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
@@ -47,11 +47,17 @@ if [ -n "$HOME" ]; then
|
||||
fi
|
||||
|
||||
# Only allocate tty if we detect one
|
||||
if [ -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="-t"
|
||||
if [ -t 0 -a -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
fi
|
||||
if [ -t 0 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
|
||||
# Always set -i to support piped and terminal input in run/exec
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
|
||||
|
||||
# Handle userns security
|
||||
if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
|
||||
fi
|
||||
|
||||
exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
|
||||
|
||||
123
script/setup/osx
123
script/setup/osx
@@ -1,43 +1,104 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
python_version() {
|
||||
python -V 2>&1
|
||||
}
|
||||
. $(dirname $0)/osx_helpers.sh
|
||||
|
||||
python3_version() {
|
||||
python3 -V 2>&1
|
||||
}
|
||||
DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET:-"$(macos_version)"}
|
||||
SDK_FETCH=
|
||||
if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
||||
SDK_FETCH=1
|
||||
# SDK URL from https://github.com/docker/golang-cross/blob/master/osx-cross.sh
|
||||
SDK_URL=https://s3.dockerproject.org/darwin/v2/MacOSX${DEPLOYMENT_TARGET}.sdk.tar.xz
|
||||
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||
fi
|
||||
|
||||
openssl_version() {
|
||||
python -c "import ssl; print ssl.OPENSSL_VERSION"
|
||||
}
|
||||
OPENSSL_VERSION=1.1.0j
|
||||
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||
OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a
|
||||
|
||||
desired_python3_version="3.6.4"
|
||||
desired_python3_brew_version="3.6.4_2"
|
||||
python3_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e69a9a592232fa5a82741f6acecffc2f1d198d/Formula/python3.rb"
|
||||
PYTHON_VERSION=3.6.8
|
||||
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||
PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
|
||||
if !(which brew); then
|
||||
#
|
||||
# Install prerequisites.
|
||||
#
|
||||
if ! [ -x "$(command -v brew)" ]; then
|
||||
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
fi
|
||||
|
||||
brew update > /dev/null
|
||||
|
||||
if !(python3_version | grep "$desired_python3_version"); then
|
||||
if brew list | grep python3; then
|
||||
brew unlink python3
|
||||
fi
|
||||
|
||||
brew install "$python3_formula"
|
||||
brew switch python3 "$desired_python3_brew_version"
|
||||
if ! [ -x "$(command -v grealpath)" ]; then
|
||||
brew update > /dev/null
|
||||
brew install coreutils
|
||||
fi
|
||||
|
||||
echo "*** Using $(python3_version) ; $(python_version)"
|
||||
echo "*** Using $(openssl_version)"
|
||||
|
||||
if !(which virtualenv); then
|
||||
if ! [ -x "$(command -v python3)" ]; then
|
||||
brew update > /dev/null
|
||||
brew install python3
|
||||
fi
|
||||
if ! [ -x "$(command -v virtualenv)" ]; then
|
||||
pip install virtualenv
|
||||
fi
|
||||
|
||||
#
|
||||
# Create toolchain directory.
|
||||
#
|
||||
BUILD_PATH="$(grealpath $(dirname $0)/../../build)"
|
||||
mkdir -p ${BUILD_PATH}
|
||||
TOOLCHAIN_PATH="${BUILD_PATH}/toolchain"
|
||||
mkdir -p ${TOOLCHAIN_PATH}
|
||||
|
||||
#
|
||||
# Set macOS SDK.
|
||||
#
|
||||
if [ ${SDK_FETCH} ]; then
|
||||
SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
|
||||
fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
|
||||
else
|
||||
SDK_PATH="$(xcode-select --print-path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX${DEPLOYMENT_TARGET}.sdk"
|
||||
fi
|
||||
|
||||
#
|
||||
# Build OpenSSL.
|
||||
#
|
||||
OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/openssl ]; then
|
||||
rm -rf ${OPENSSL_SRC_PATH}
|
||||
fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
|
||||
(
|
||||
cd ${OPENSSL_SRC_PATH}
|
||||
export MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET}
|
||||
export SDKROOT=${SDK_PATH}
|
||||
./Configure darwin64-x86_64-cc --prefix=${TOOLCHAIN_PATH}
|
||||
make install_sw install_dev
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Build Python.
|
||||
#
|
||||
PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||
rm -rf ${PYTHON_SRC_PATH}
|
||||
fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
|
||||
(
|
||||
cd ${PYTHON_SRC_PATH}
|
||||
./configure --prefix=${TOOLCHAIN_PATH} \
|
||||
--enable-ipv6 --without-ensurepip --with-dtrace --without-gcc \
|
||||
--datarootdir=${TOOLCHAIN_PATH}/share \
|
||||
--datadir=${TOOLCHAIN_PATH}/share \
|
||||
--enable-framework=${TOOLCHAIN_PATH}/Frameworks \
|
||||
MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
|
||||
CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
|
||||
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}include" \
|
||||
LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
|
||||
make -j 4
|
||||
make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
|
||||
make frameworkinstallextras PYTHONAPPSDIR=${TOOLCHAIN_PATH}/share
|
||||
)
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
|
||||
echo "*** Using SDK ${SDK_PATH}"
|
||||
echo "*** Using $(python3_version ${TOOLCHAIN_PATH})"
|
||||
echo "*** Using $(openssl_version ${TOOLCHAIN_PATH})"
|
||||
|
||||
41
script/setup/osx_helpers.sh
Normal file
41
script/setup/osx_helpers.sh
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Check file's ($1) SHA1 ($2).
|
||||
check_sha1() {
|
||||
echo -n "$2 *$1" | shasum -c -
|
||||
}
|
||||
|
||||
# Download URL ($1) to path ($2).
|
||||
download() {
|
||||
curl -L $1 -o $2
|
||||
}
|
||||
|
||||
# Extract tarball ($1) in folder ($2).
|
||||
extract() {
|
||||
tar xf $1 -C $2
|
||||
}
|
||||
|
||||
# Download URL ($1), check SHA1 ($3), and extract utility ($2).
|
||||
fetch_tarball() {
|
||||
url=$1
|
||||
tarball=$2.tarball
|
||||
sha1=$3
|
||||
download $url $tarball
|
||||
check_sha1 $tarball $sha1
|
||||
extract $tarball $(dirname $tarball)
|
||||
}
|
||||
|
||||
# Version of Python at toolchain path ($1).
|
||||
python3_version() {
|
||||
$1/bin/python3 -V 2>&1
|
||||
}
|
||||
|
||||
# Version of OpenSSL used by toolchain ($1) Python.
|
||||
openssl_version() {
|
||||
$1/bin/python3 -c "import ssl; print(ssl.OPENSSL_VERSION)"
|
||||
}
|
||||
|
||||
# System macOS version.
|
||||
macos_version() {
|
||||
sw_vers -productVersion | cut -f1,2 -d'.'
|
||||
}
|
||||
@@ -5,7 +5,7 @@ set -ex
|
||||
|
||||
TAG="docker-compose:$(git rev-parse --short HEAD)"
|
||||
|
||||
# By default use the Dockerfile, but can be overriden to use an alternative file
|
||||
# By default use the Dockerfile, but can be overridden to use an alternative file
|
||||
# e.g DOCKERFILE=Dockerfile.armhf script/test/default
|
||||
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
|
||||
|
||||
|
||||
@@ -36,23 +36,24 @@ import requests
|
||||
|
||||
GITHUB_API = 'https://api.github.com/repos'
|
||||
|
||||
STAGES = ['tp', 'beta', 'rc']
|
||||
|
||||
class Version(namedtuple('_Version', 'major minor patch rc edition')):
|
||||
|
||||
class Version(namedtuple('_Version', 'major minor patch stage edition')):
|
||||
|
||||
@classmethod
|
||||
def parse(cls, version):
|
||||
edition = None
|
||||
version = version.lstrip('v')
|
||||
version, _, rc = version.partition('-')
|
||||
if rc:
|
||||
if 'rc' not in rc:
|
||||
edition = rc
|
||||
rc = None
|
||||
elif '-' in rc:
|
||||
edition, rc = rc.split('-')
|
||||
|
||||
version, _, stage = version.partition('-')
|
||||
if stage:
|
||||
if not any(marker in stage for marker in STAGES):
|
||||
edition = stage
|
||||
stage = None
|
||||
elif '-' in stage:
|
||||
edition, stage = stage.split('-')
|
||||
major, minor, patch = version.split('.', 3)
|
||||
return cls(major, minor, patch, rc, edition)
|
||||
return cls(major, minor, patch, stage, edition)
|
||||
|
||||
@property
|
||||
def major_minor(self):
|
||||
@@ -63,14 +64,22 @@ class Version(namedtuple('_Version', 'major minor patch rc edition')):
|
||||
"""Return a representation that allows this object to be sorted
|
||||
correctly with the default comparator.
|
||||
"""
|
||||
# rc releases should appear before official releases
|
||||
rc = (0, self.rc) if self.rc else (1, )
|
||||
return (int(self.major), int(self.minor), int(self.patch)) + rc
|
||||
# non-GA releases should appear before GA releases
|
||||
# Order: tp -> beta -> rc -> GA
|
||||
if self.stage:
|
||||
for st in STAGES:
|
||||
if st in self.stage:
|
||||
stage = (STAGES.index(st), self.stage)
|
||||
break
|
||||
else:
|
||||
stage = (len(STAGES),)
|
||||
|
||||
return (int(self.major), int(self.minor), int(self.patch)) + stage
|
||||
|
||||
def __str__(self):
|
||||
rc = '-{}'.format(self.rc) if self.rc else ''
|
||||
stage = '-{}'.format(self.stage) if self.stage else ''
|
||||
edition = '-{}'.format(self.edition) if self.edition else ''
|
||||
return '.'.join(map(str, self[:3])) + edition + rc
|
||||
return '.'.join(map(str, self[:3])) + edition + stage
|
||||
|
||||
|
||||
BLACKLIST = [ # List of versions known to be broken and should not be used
|
||||
@@ -113,9 +122,9 @@ def get_latest_versions(versions, num=1):
|
||||
|
||||
|
||||
def get_default(versions):
|
||||
"""Return a :class:`Version` for the latest non-rc version."""
|
||||
"""Return a :class:`Version` for the latest GA version."""
|
||||
for version in versions:
|
||||
if not version.rc:
|
||||
if not version.stage:
|
||||
return version
|
||||
|
||||
|
||||
@@ -123,8 +132,9 @@ def get_versions(tags):
|
||||
for tag in tags:
|
||||
try:
|
||||
v = Version.parse(tag['name'])
|
||||
if v not in BLACKLIST:
|
||||
yield v
|
||||
if v in BLACKLIST:
|
||||
continue
|
||||
yield v
|
||||
except ValueError:
|
||||
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
|
||||
|
||||
|
||||
26
setup.py
26
setup.py
@@ -32,11 +32,11 @@ def find_version(*file_paths):
|
||||
install_requires = [
|
||||
'cached-property >= 1.2.0, < 2',
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4',
|
||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
|
||||
'PyYAML >= 3.10, < 4.3',
|
||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.21',
|
||||
'texttable >= 0.9.0, < 0.10',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker >= 3.4.1, < 4.0',
|
||||
'docker[ssh] >= 3.7.0, < 4.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
@@ -55,7 +55,7 @@ extras_require = {
|
||||
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
|
||||
':python_version < "3.3"': ['ipaddress >= 1.0.16'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.3.9, < 0.4'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 0.5'],
|
||||
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
|
||||
}
|
||||
|
||||
@@ -77,19 +77,26 @@ setup(
|
||||
name='docker-compose',
|
||||
version=find_version("compose", "__init__.py"),
|
||||
description='Multi-container orchestration for Docker',
|
||||
long_description=read('README.md'),
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://www.docker.com/',
|
||||
project_urls={
|
||||
'Documentation': 'https://docs.docker.com/compose/overview',
|
||||
'Changelog': 'https://github.com/docker/compose/blob/release/CHANGELOG.md',
|
||||
'Source': 'https://github.com/docker/compose',
|
||||
'Tracker': 'https://github.com/docker/compose/issues',
|
||||
},
|
||||
author='Docker, Inc.',
|
||||
license='Apache License 2.0',
|
||||
packages=find_packages(exclude=['tests.*', 'tests']),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
tests_require=tests_require,
|
||||
entry_points="""
|
||||
[console_scripts]
|
||||
docker-compose=compose.cli.main:main
|
||||
""",
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
entry_points={
|
||||
'console_scripts': ['docker-compose=compose.cli.main:main'],
|
||||
},
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
@@ -100,5 +107,6 @@ setup(
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import signal
|
||||
@@ -41,7 +40,7 @@ ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
|
||||
|
||||
|
||||
BUILD_CACHE_TEXT = 'Using cache'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
|
||||
|
||||
|
||||
def start_process(base_dir, options):
|
||||
@@ -99,7 +98,14 @@ class ContainerStateCondition(object):
|
||||
|
||||
def __call__(self):
|
||||
try:
|
||||
container = self.client.inspect_container(self.name)
|
||||
if self.name.endswith('*'):
|
||||
ctnrs = self.client.containers(all=True, filters={'name': self.name[:-1]})
|
||||
if len(ctnrs) > 0:
|
||||
container = self.client.inspect_container(ctnrs[0]['Id'])
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
container = self.client.inspect_container(self.name)
|
||||
return container['State']['Status'] == self.status
|
||||
except errors.APIError:
|
||||
return False
|
||||
@@ -222,6 +228,16 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
assert self.dispatch(['config', '--quiet']).stdout == ''
|
||||
|
||||
def test_config_with_hash_option(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config', '--hash=*'])
|
||||
for service in self.project.get_services():
|
||||
assert '{} {}\n'.format(service.name, service.config_hash) in result.stdout
|
||||
|
||||
svc = self.project.get_service('other')
|
||||
result = self.dispatch(['config', '--hash=other'])
|
||||
assert result.stdout == '{} {}\n'.format(svc.name, svc.config_hash)
|
||||
|
||||
def test_config_default(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config'])
|
||||
@@ -293,6 +309,36 @@ class CLITestCase(DockerClientTestCase):
|
||||
}
|
||||
}
|
||||
|
||||
def test_config_with_dot_env(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['config'])
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'true',
|
||||
'image': 'alpine:latest',
|
||||
'ports': ['5643/tcp', '9999/tcp']
|
||||
}
|
||||
},
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_with_dot_env_and_override_dir(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['--project-directory', 'alt/', 'config'])
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'echo uwu',
|
||||
'image': 'alpine:3.4',
|
||||
'ports': ['3341/tcp', '4449/tcp']
|
||||
}
|
||||
},
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_external_volume_v2(self):
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
|
||||
@@ -552,10 +598,20 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert 'with_build' in running.stdout
|
||||
assert 'with_image' in running.stdout
|
||||
|
||||
def test_ps_all(self):
|
||||
self.project.get_service('simple').create_container(one_off='blahblah')
|
||||
result = self.dispatch(['ps'])
|
||||
assert 'simple-composefile_simple_run_' not in result.stdout
|
||||
|
||||
result2 = self.dispatch(['ps', '--all'])
|
||||
assert 'simple-composefile_simple_run_' in result2.stdout
|
||||
|
||||
def test_pull(self):
|
||||
result = self.dispatch(['pull'])
|
||||
assert 'Pulling simple' in result.stderr
|
||||
assert 'Pulling another' in result.stderr
|
||||
assert 'done' in result.stderr
|
||||
assert 'failed' not in result.stderr
|
||||
|
||||
def test_pull_with_digest(self):
|
||||
result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
|
||||
@@ -602,15 +658,15 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_pull_with_include_deps(self):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling db (busybox:latest)...',
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling db (busybox:1.27.2)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_build_plain(self):
|
||||
@@ -773,6 +829,13 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
|
||||
|
||||
def test_build_parallel(self):
|
||||
self.base_dir = 'tests/fixtures/build-multiple-composefile'
|
||||
result = self.dispatch(['build', '--parallel'])
|
||||
assert 'Successfully tagged build-multiple-composefile_a:latest' in result.stdout
|
||||
assert 'Successfully tagged build-multiple-composefile_b:latest' in result.stdout
|
||||
assert 'Successfully built' in result.stdout
|
||||
|
||||
def test_create(self):
|
||||
self.dispatch(['create'])
|
||||
service = self.project.get_service('simple')
|
||||
@@ -911,11 +974,11 @@ class CLITestCase(DockerClientTestCase):
|
||||
result = self.dispatch(['down', '--rmi=local', '--volumes'])
|
||||
assert 'Stopping v2-full_web_1' in result.stderr
|
||||
assert 'Stopping v2-full_other_1' in result.stderr
|
||||
assert 'Stopping v2-full_web_run_2' in result.stderr
|
||||
assert 'Stopping v2-full_web_run_' in result.stderr
|
||||
assert 'Removing v2-full_web_1' in result.stderr
|
||||
assert 'Removing v2-full_other_1' in result.stderr
|
||||
assert 'Removing v2-full_web_run_1' in result.stderr
|
||||
assert 'Removing v2-full_web_run_2' in result.stderr
|
||||
assert 'Removing v2-full_web_run_' in result.stderr
|
||||
assert 'Removing v2-full_web_run_' in result.stderr
|
||||
assert 'Removing volume v2-full_data' in result.stderr
|
||||
assert 'Removing image v2-full_web' in result.stderr
|
||||
assert 'Removing image busybox' not in result.stderr
|
||||
@@ -972,11 +1035,15 @@ class CLITestCase(DockerClientTestCase):
|
||||
def test_up_attached(self):
|
||||
self.base_dir = 'tests/fixtures/echo-services'
|
||||
result = self.dispatch(['up', '--no-color'])
|
||||
simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
|
||||
another_name = self.project.get_service('another').containers(
|
||||
stopped=True
|
||||
)[0].name_without_project
|
||||
|
||||
assert 'simple_1 | simple' in result.stdout
|
||||
assert 'another_1 | another' in result.stdout
|
||||
assert 'simple_1 exited with code 0' in result.stdout
|
||||
assert 'another_1 exited with code 0' in result.stdout
|
||||
assert '{} | simple'.format(simple_name) in result.stdout
|
||||
assert '{} | another'.format(another_name) in result.stdout
|
||||
assert '{} exited with code 0'.format(simple_name) in result.stdout
|
||||
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||
|
||||
@v2_only()
|
||||
def test_up(self):
|
||||
@@ -1680,11 +1747,12 @@ class CLITestCase(DockerClientTestCase):
|
||||
def test_run_rm(self):
|
||||
self.base_dir = 'tests/fixtures/volume'
|
||||
proc = start_process(self.base_dir, ['run', '--rm', 'test'])
|
||||
service = self.project.get_service('test')
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'volume_test_run_1',
|
||||
'running'))
|
||||
service = self.project.get_service('test')
|
||||
'volume_test_run_*',
|
||||
'running')
|
||||
)
|
||||
containers = service.containers(one_off=OneOffFilter.only)
|
||||
assert len(containers) == 1
|
||||
mounts = containers[0].get('Mounts')
|
||||
@@ -2007,39 +2075,39 @@ class CLITestCase(DockerClientTestCase):
|
||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'running'))
|
||||
|
||||
os.kill(proc.pid, signal.SIGINT)
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'exited'))
|
||||
|
||||
def test_run_handles_sigterm(self):
|
||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'running'))
|
||||
|
||||
os.kill(proc.pid, signal.SIGTERM)
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'exited'))
|
||||
|
||||
def test_run_handles_sighup(self):
|
||||
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'running'))
|
||||
|
||||
os.kill(proc.pid, signal.SIGHUP)
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'simple-composefile_simple_run_1',
|
||||
'simple-composefile_simple_run_*',
|
||||
'exited'))
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
@@ -2162,6 +2230,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_start_no_containers(self):
|
||||
result = self.dispatch(['start'], returncode=1)
|
||||
assert 'failed' in result.stderr
|
||||
assert 'No containers to start' in result.stderr
|
||||
|
||||
@v2_only()
|
||||
@@ -2239,19 +2308,44 @@ class CLITestCase(DockerClientTestCase):
|
||||
proc = start_process(self.base_dir, ['logs', '-f'])
|
||||
|
||||
self.dispatch(['up', '-d', 'another'])
|
||||
wait_on_condition(ContainerStateCondition(
|
||||
self.project.client,
|
||||
'logs-composefile_another_1',
|
||||
'exited'))
|
||||
another_name = self.project.get_service('another').get_container().name_without_project
|
||||
wait_on_condition(
|
||||
ContainerStateCondition(
|
||||
self.project.client,
|
||||
'logs-composefile_another_*',
|
||||
'exited'
|
||||
)
|
||||
)
|
||||
|
||||
simple_name = self.project.get_service('simple').get_container().name_without_project
|
||||
self.dispatch(['kill', 'simple'])
|
||||
|
||||
result = wait_on_process(proc)
|
||||
|
||||
assert 'hello' in result.stdout
|
||||
assert 'test' in result.stdout
|
||||
assert 'logs-composefile_another_1 exited with code 0' in result.stdout
|
||||
assert 'logs-composefile_simple_1 exited with code 137' in result.stdout
|
||||
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||
assert '{} exited with code 137'.format(simple_name) in result.stdout
|
||||
|
||||
def test_logs_follow_logs_from_restarted_containers(self):
|
||||
self.base_dir = 'tests/fixtures/logs-restart-composefile'
|
||||
proc = start_process(self.base_dir, ['up'])
|
||||
|
||||
wait_on_condition(
|
||||
ContainerStateCondition(
|
||||
self.project.client,
|
||||
'logs-restart-composefile_another_*',
|
||||
'exited'
|
||||
)
|
||||
)
|
||||
self.dispatch(['kill', 'simple'])
|
||||
|
||||
result = wait_on_process(proc)
|
||||
|
||||
assert result.stdout.count(
|
||||
r'logs-restart-composefile_another_1 exited with code 1'
|
||||
) == 3
|
||||
assert result.stdout.count('world') == 3
|
||||
|
||||
def test_logs_default(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
@@ -2276,17 +2370,17 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.dispatch(['up', '-d'])
|
||||
|
||||
result = self.dispatch(['logs', '-f', '-t'])
|
||||
assert re.search('(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
|
||||
assert re.search(r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
|
||||
|
||||
def test_logs_tail(self):
|
||||
self.base_dir = 'tests/fixtures/logs-tail-composefile'
|
||||
self.dispatch(['up'])
|
||||
|
||||
result = self.dispatch(['logs', '--tail', '2'])
|
||||
assert 'c\n' in result.stdout
|
||||
assert 'd\n' in result.stdout
|
||||
assert 'a\n' not in result.stdout
|
||||
assert 'b\n' not in result.stdout
|
||||
assert 'y\n' in result.stdout
|
||||
assert 'z\n' in result.stdout
|
||||
assert 'w\n' not in result.stdout
|
||||
assert 'x\n' not in result.stdout
|
||||
|
||||
def test_kill(self):
|
||||
self.dispatch(['up', '-d'], None)
|
||||
@@ -2460,9 +2554,9 @@ class CLITestCase(DockerClientTestCase):
|
||||
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
|
||||
return result.stdout.rstrip()
|
||||
|
||||
assert get_port(3000) == containers[0].get_local_port(3000)
|
||||
assert get_port(3000, index=1) == containers[0].get_local_port(3000)
|
||||
assert get_port(3000, index=2) == containers[1].get_local_port(3000)
|
||||
assert get_port(3000) in (containers[0].get_local_port(3000), containers[1].get_local_port(3000))
|
||||
assert get_port(3000, index=containers[0].number) == containers[0].get_local_port(3000)
|
||||
assert get_port(3000, index=containers[1].number) == containers[1].get_local_port(3000)
|
||||
assert get_port(3002) == ""
|
||||
|
||||
def test_events_json(self):
|
||||
@@ -2498,7 +2592,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
container, = self.project.containers()
|
||||
expected_template = ' container {} {}'
|
||||
expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_1']
|
||||
expected_meta_info = ['image=busybox:1.27.2', 'name=simple-composefile_simple_']
|
||||
|
||||
assert expected_template.format('create', container.id) in lines[0]
|
||||
assert expected_template.format('start', container.id) in lines[1]
|
||||
@@ -2580,8 +2674,11 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
assert len(containers) == 2
|
||||
web = containers[1]
|
||||
db_name = containers[0].name_without_project
|
||||
|
||||
assert set(get_links(web)) == set(['db', 'mydb_1', 'extends_mydb_1'])
|
||||
assert set(get_links(web)) == set(
|
||||
['db', db_name, 'extends_{}'.format(db_name)]
|
||||
)
|
||||
|
||||
expected_env = set([
|
||||
"FOO=1",
|
||||
@@ -2614,17 +2711,27 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/exit-code-from'
|
||||
proc = start_process(
|
||||
self.base_dir,
|
||||
['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
|
||||
['up', '--abort-on-container-exit', '--exit-code-from', 'another']
|
||||
)
|
||||
|
||||
result = wait_on_process(proc, returncode=1)
|
||||
|
||||
assert 'exit-code-from_another_1 exited with code 1' in result.stdout
|
||||
|
||||
def test_exit_code_from_signal_stop(self):
|
||||
self.base_dir = 'tests/fixtures/exit-code-from'
|
||||
proc = start_process(
|
||||
self.base_dir,
|
||||
['up', '--abort-on-container-exit', '--exit-code-from', 'simple']
|
||||
)
|
||||
result = wait_on_process(proc, returncode=137) # SIGKILL
|
||||
name = self.project.get_service('another').containers(stopped=True)[0].name_without_project
|
||||
assert '{} exited with code 1'.format(name) in result.stdout
|
||||
|
||||
def test_images(self):
|
||||
self.project.get_service('simple').create_container()
|
||||
result = self.dispatch(['images'])
|
||||
assert 'busybox' in result.stdout
|
||||
assert 'simple-composefile_simple_1' in result.stdout
|
||||
assert 'simple-composefile_simple_' in result.stdout
|
||||
|
||||
def test_images_default_composefile(self):
|
||||
self.base_dir = 'tests/fixtures/multiple-composefiles'
|
||||
@@ -2672,3 +2779,13 @@ class CLITestCase(DockerClientTestCase):
|
||||
with pytest.raises(DuplicateOverrideFileFound):
|
||||
get_project(self.base_dir, [])
|
||||
self.base_dir = None
|
||||
|
||||
def test_images_use_service_tag(self):
|
||||
pull_busybox(self.client)
|
||||
self.base_dir = 'tests/fixtures/images-service-tag'
|
||||
self.dispatch(['up', '-d', '--build'])
|
||||
result = self.dispatch(['images'])
|
||||
|
||||
assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None
|
||||
assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None
|
||||
assert re.search(r'foo3.+_foo3[ \t]+latest', result.stdout) is not None
|
||||
|
||||
4
tests/fixtures/build-multiple-composefile/a/Dockerfile
vendored
Normal file
4
tests/fixtures/build-multiple-composefile/a/Dockerfile
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
FROM busybox:latest
|
||||
RUN echo a
|
||||
CMD top
|
||||
4
tests/fixtures/build-multiple-composefile/b/Dockerfile
vendored
Normal file
4
tests/fixtures/build-multiple-composefile/b/Dockerfile
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
FROM busybox:latest
|
||||
RUN echo b
|
||||
CMD top
|
||||
8
tests/fixtures/build-multiple-composefile/docker-compose.yml
vendored
Normal file
8
tests/fixtures/build-multiple-composefile/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
a:
|
||||
build: ./a
|
||||
b:
|
||||
build: ./b
|
||||
4
tests/fixtures/default-env-file/alt/.env
vendored
Normal file
4
tests/fixtures/default-env-file/alt/.env
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
IMAGE=alpine:3.4
|
||||
COMMAND=echo uwu
|
||||
PORT1=3341
|
||||
PORT2=4449
|
||||
@@ -1,4 +1,6 @@
|
||||
web:
|
||||
version: '2.4'
|
||||
services:
|
||||
web:
|
||||
image: ${IMAGE}
|
||||
command: ${COMMAND}
|
||||
ports:
|
||||
|
||||
@@ -2,7 +2,7 @@ version: "2.2"
|
||||
|
||||
services:
|
||||
service:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
environment:
|
||||
|
||||
2
tests/fixtures/images-service-tag/Dockerfile
vendored
Normal file
2
tests/fixtures/images-service-tag/Dockerfile
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM busybox:latest
|
||||
RUN touch /foo
|
||||
10
tests/fixtures/images-service-tag/docker-compose.yml
vendored
Normal file
10
tests/fixtures/images-service-tag/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: "2.4"
|
||||
services:
|
||||
foo1:
|
||||
build: .
|
||||
image: test:dev
|
||||
foo2:
|
||||
build: .
|
||||
image: test:prod
|
||||
foo3:
|
||||
build: .
|
||||
@@ -1,11 +1,11 @@
|
||||
db:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
web:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
links:
|
||||
- db:db
|
||||
console:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
7
tests/fixtures/logs-restart-composefile/docker-compose.yml
vendored
Normal file
7
tests/fixtures/logs-restart-composefile/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: sh -c "echo hello && tail -f /dev/null"
|
||||
another:
|
||||
image: busybox:latest
|
||||
command: sh -c "sleep 0.5 && echo world && /bin/false"
|
||||
restart: "on-failure:2"
|
||||
@@ -1,3 +1,3 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: sh -c "echo a && echo b && echo c && echo d"
|
||||
command: sh -c "echo w && echo x && echo y && echo z"
|
||||
|
||||
6
tests/fixtures/networks/docker-compose.yml
vendored
6
tests/fixtures/networks/docker-compose.yml
vendored
@@ -2,17 +2,17 @@ version: "2"
|
||||
|
||||
services:
|
||||
web:
|
||||
image: busybox
|
||||
image: alpine:3.7
|
||||
command: top
|
||||
networks: ["front"]
|
||||
app:
|
||||
image: busybox
|
||||
image: alpine:3.7
|
||||
command: top
|
||||
networks: ["front", "back"]
|
||||
links:
|
||||
- "db:database"
|
||||
db:
|
||||
image: busybox
|
||||
image: alpine:3.7
|
||||
command: top
|
||||
networks: ["back"]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
|
||||
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
@@ -1,3 +1,3 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.27.2
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
4
tests/fixtures/v2-simple/docker-compose.yml
vendored
4
tests/fixtures/v2-simple/docker-compose.yml
vendored
@@ -1,8 +1,8 @@
|
||||
version: "2"
|
||||
services:
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
@@ -90,7 +90,8 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up()
|
||||
|
||||
containers = project.containers(['web'])
|
||||
assert [c.name for c in containers] == ['composetest_web_1']
|
||||
assert len(containers) == 1
|
||||
assert containers[0].name.startswith('composetest_web_')
|
||||
|
||||
def test_containers_with_extra_service(self):
|
||||
web = self.create_service('web')
|
||||
@@ -104,6 +105,23 @@ class ProjectTest(DockerClientTestCase):
|
||||
project = Project('composetest', [web, db], self.client)
|
||||
assert set(project.containers(stopped=True)) == set([web_1, db_1])
|
||||
|
||||
def test_parallel_pull_with_no_image(self):
|
||||
config_data = build_config(
|
||||
version=V2_3,
|
||||
services=[{
|
||||
'name': 'web',
|
||||
'build': {'context': '.'},
|
||||
}],
|
||||
)
|
||||
|
||||
project = Project.from_config(
|
||||
name='composetest',
|
||||
config_data=config_data,
|
||||
client=self.client
|
||||
)
|
||||
|
||||
project.pull(parallel_pull=True)
|
||||
|
||||
def test_volumes_from_service(self):
|
||||
project = Project.from_config(
|
||||
name='composetest',
|
||||
@@ -431,7 +449,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up(strategy=ConvergenceStrategy.always)
|
||||
assert len(project.containers()) == 2
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
db_container = [c for c in project.containers() if c.service == 'db'][0]
|
||||
assert db_container.id != old_db_id
|
||||
assert db_container.get('Volumes./etc') == db_volume_path
|
||||
|
||||
@@ -451,7 +469,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up(strategy=ConvergenceStrategy.always)
|
||||
assert len(project.containers()) == 2
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
db_container = [c for c in project.containers() if c.service == 'db'][0]
|
||||
assert db_container.id != old_db_id
|
||||
assert db_container.get_mount('/etc')['Source'] == db_volume_path
|
||||
|
||||
@@ -464,14 +482,14 @@ class ProjectTest(DockerClientTestCase):
|
||||
|
||||
project.up(['db'])
|
||||
assert len(project.containers()) == 1
|
||||
old_db_id = project.containers()[0].id
|
||||
container, = project.containers()
|
||||
old_db_id = container.id
|
||||
db_volume_path = container.get_mount('/var/db')['Source']
|
||||
|
||||
project.up(strategy=ConvergenceStrategy.never)
|
||||
assert len(project.containers()) == 2
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
db_container = [c for c in project.containers() if c.name == container.name][0]
|
||||
assert db_container.id == old_db_id
|
||||
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
||||
|
||||
@@ -498,7 +516,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
assert len(new_containers) == 2
|
||||
assert [c.is_running for c in new_containers] == [True, True]
|
||||
|
||||
db_container = [c for c in new_containers if 'db' in c.name][0]
|
||||
db_container = [c for c in new_containers if c.service == 'db'][0]
|
||||
assert db_container.id == old_db_id
|
||||
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
|
||||
|
||||
@@ -1944,7 +1962,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
|
||||
containers = project.containers(stopped=True)
|
||||
assert len(containers) == 1
|
||||
assert containers[0].name == 'underscoretest_svc1_1'
|
||||
assert containers[0].name.startswith('underscoretest_svc1_')
|
||||
assert containers[0].project == '_underscoretest'
|
||||
|
||||
full_vol_name = 'underscoretest_foo'
|
||||
@@ -1965,7 +1983,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
|
||||
containers = project2.containers(stopped=True)
|
||||
assert len(containers) == 1
|
||||
assert containers[0].name == 'dashtest_svc1_1'
|
||||
assert containers[0].name.startswith('dashtest_svc1_')
|
||||
assert containers[0].project == '-dashtest'
|
||||
|
||||
full_vol_name = 'dashtest_foo'
|
||||
|
||||
@@ -67,7 +67,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
create_and_start_container(foo)
|
||||
|
||||
assert len(foo.containers()) == 1
|
||||
assert foo.containers()[0].name == 'composetest_foo_1'
|
||||
assert foo.containers()[0].name.startswith('composetest_foo_')
|
||||
assert len(bar.containers()) == 0
|
||||
|
||||
create_and_start_container(bar)
|
||||
@@ -77,8 +77,8 @@ class ServiceTest(DockerClientTestCase):
|
||||
assert len(bar.containers()) == 2
|
||||
|
||||
names = [c.name for c in bar.containers()]
|
||||
assert 'composetest_bar_1' in names
|
||||
assert 'composetest_bar_2' in names
|
||||
assert len(names) == 2
|
||||
assert all(name.startswith('composetest_bar_') for name in names)
|
||||
|
||||
def test_containers_one_off(self):
|
||||
db = self.create_service('db')
|
||||
@@ -89,18 +89,18 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_project_is_added_to_container_name(self):
|
||||
service = self.create_service('web')
|
||||
create_and_start_container(service)
|
||||
assert service.containers()[0].name == 'composetest_web_1'
|
||||
assert service.containers()[0].name.startswith('composetest_web_')
|
||||
|
||||
def test_create_container_with_one_off(self):
|
||||
db = self.create_service('db')
|
||||
container = db.create_container(one_off=True)
|
||||
assert container.name == 'composetest_db_run_1'
|
||||
assert container.name.startswith('composetest_db_run_')
|
||||
|
||||
def test_create_container_with_one_off_when_existing_container_is_running(self):
|
||||
db = self.create_service('db')
|
||||
db.start()
|
||||
container = db.create_container(one_off=True)
|
||||
assert container.name == 'composetest_db_run_1'
|
||||
assert container.name.startswith('composetest_db_run_')
|
||||
|
||||
def test_create_container_with_unspecified_volume(self):
|
||||
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
|
||||
@@ -424,6 +424,22 @@ class ServiceTest(DockerClientTestCase):
|
||||
new_container = service.recreate_container(old_container)
|
||||
assert new_container.get_mount('/data')['Source'] == volume_path
|
||||
|
||||
def test_recreate_volume_to_mount(self):
|
||||
# https://github.com/docker/compose/issues/6280
|
||||
service = Service(
|
||||
project='composetest',
|
||||
name='db',
|
||||
client=self.client,
|
||||
build={'context': 'tests/fixtures/dockerfile-with-volume'},
|
||||
volumes=[MountSpec.parse({
|
||||
'type': 'volume',
|
||||
'target': '/data',
|
||||
})]
|
||||
)
|
||||
old_container = create_and_start_container(service)
|
||||
new_container = service.recreate_container(old_container)
|
||||
assert new_container.get_mount('/data')['Source']
|
||||
|
||||
def test_duplicate_volume_trailing_slash(self):
|
||||
"""
|
||||
When an image specifies a volume, and the Compose file specifies a host path
|
||||
@@ -489,7 +505,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
assert old_container.get('Config.Entrypoint') == ['top']
|
||||
assert old_container.get('Config.Cmd') == ['-d', '1']
|
||||
assert 'FOO=1' in old_container.get('Config.Env')
|
||||
assert old_container.name == 'composetest_db_1'
|
||||
assert old_container.name.startswith('composetest_db_')
|
||||
service.start_container(old_container)
|
||||
old_container.inspect() # reload volume data
|
||||
volume_path = old_container.get_mount('/etc')['Source']
|
||||
@@ -503,7 +519,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
assert new_container.get('Config.Entrypoint') == ['top']
|
||||
assert new_container.get('Config.Cmd') == ['-d', '1']
|
||||
assert 'FOO=2' in new_container.get('Config.Env')
|
||||
assert new_container.name == 'composetest_db_1'
|
||||
assert new_container.name.startswith('composetest_db_')
|
||||
assert new_container.get_mount('/etc')['Source'] == volume_path
|
||||
if not is_cluster(self.client):
|
||||
assert (
|
||||
@@ -836,13 +852,13 @@ class ServiceTest(DockerClientTestCase):
|
||||
db = self.create_service('db')
|
||||
web = self.create_service('web', links=[(db, None)])
|
||||
|
||||
create_and_start_container(db)
|
||||
create_and_start_container(db)
|
||||
db1 = create_and_start_container(db)
|
||||
db2 = create_and_start_container(db)
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'db'
|
||||
])
|
||||
|
||||
@@ -851,30 +867,33 @@ class ServiceTest(DockerClientTestCase):
|
||||
db = self.create_service('db')
|
||||
web = self.create_service('web', links=[(db, 'custom_link_name')])
|
||||
|
||||
create_and_start_container(db)
|
||||
create_and_start_container(db)
|
||||
db1 = create_and_start_container(db)
|
||||
db2 = create_and_start_container(db)
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'custom_link_name'
|
||||
])
|
||||
|
||||
@no_cluster('No legacy links support in Swarm')
|
||||
def test_start_container_with_external_links(self):
|
||||
db = self.create_service('db')
|
||||
web = self.create_service('web', external_links=['composetest_db_1',
|
||||
'composetest_db_2',
|
||||
'composetest_db_3:db_3'])
|
||||
db_ctnrs = [create_and_start_container(db) for _ in range(3)]
|
||||
web = self.create_service(
|
||||
'web', external_links=[
|
||||
db_ctnrs[0].name,
|
||||
db_ctnrs[1].name,
|
||||
'{}:db_3'.format(db_ctnrs[2].name)
|
||||
]
|
||||
)
|
||||
|
||||
for _ in range(3):
|
||||
create_and_start_container(db)
|
||||
create_and_start_container(web)
|
||||
|
||||
assert set(get_links(web.containers()[0])) == set([
|
||||
'composetest_db_1',
|
||||
'composetest_db_2',
|
||||
db_ctnrs[0].name,
|
||||
db_ctnrs[1].name,
|
||||
'db_3'
|
||||
])
|
||||
|
||||
@@ -892,14 +911,14 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
||||
db = self.create_service('db')
|
||||
|
||||
create_and_start_container(db)
|
||||
create_and_start_container(db)
|
||||
db1 = create_and_start_container(db)
|
||||
db2 = create_and_start_container(db)
|
||||
|
||||
c = create_and_start_container(db, one_off=OneOffFilter.only)
|
||||
|
||||
assert set(get_links(c)) == set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
db1.name, db1.name_without_project,
|
||||
db2.name, db2.name_without_project,
|
||||
'db'
|
||||
])
|
||||
|
||||
@@ -1249,17 +1268,15 @@ class ServiceTest(DockerClientTestCase):
|
||||
test that those containers are restarted and not removed/recreated.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
valid_numbers = [next_number, next_number + 1]
|
||||
service.create_container(number=next_number)
|
||||
service.create_container(number=next_number + 1)
|
||||
service.create_container(number=1)
|
||||
service.create_container(number=2)
|
||||
|
||||
ParallelStreamWriter.instance = None
|
||||
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
|
||||
service.scale(2)
|
||||
for container in service.containers():
|
||||
assert container.is_running
|
||||
assert container.number in valid_numbers
|
||||
assert container.number in [1, 2]
|
||||
|
||||
captured_output = mock_stderr.getvalue()
|
||||
assert 'Creating' not in captured_output
|
||||
@@ -1310,10 +1327,8 @@ class ServiceTest(DockerClientTestCase):
|
||||
|
||||
assert len(service.containers()) == 1
|
||||
assert service.containers()[0].is_running
|
||||
assert (
|
||||
"ERROR: for composetest_web_2 Cannot create container for service"
|
||||
" web: Boom" in mock_stderr.getvalue()
|
||||
)
|
||||
assert "ERROR: for composetest_web_" in mock_stderr.getvalue()
|
||||
assert "Cannot create container for service web: Boom" in mock_stderr.getvalue()
|
||||
|
||||
def test_scale_with_unexpected_exception(self):
|
||||
"""Test that when scaling if the API returns an error, that is not of type
|
||||
@@ -1580,16 +1595,17 @@ class ServiceTest(DockerClientTestCase):
|
||||
}
|
||||
|
||||
compose_labels = {
|
||||
LABEL_CONTAINER_NUMBER: '1',
|
||||
LABEL_ONE_OFF: 'False',
|
||||
LABEL_PROJECT: 'composetest',
|
||||
LABEL_SERVICE: 'web',
|
||||
LABEL_VERSION: __version__,
|
||||
LABEL_CONTAINER_NUMBER: '1'
|
||||
}
|
||||
expected = dict(labels_dict, **compose_labels)
|
||||
|
||||
service = self.create_service('web', labels=labels_dict)
|
||||
labels = create_and_start_container(service).labels.items()
|
||||
ctnr = create_and_start_container(service)
|
||||
labels = ctnr.labels.items()
|
||||
for pair in expected.items():
|
||||
assert pair in labels
|
||||
|
||||
@@ -1655,7 +1671,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_duplicate_containers(self):
|
||||
service = self.create_service('web')
|
||||
|
||||
options = service._get_container_create_options({}, 1)
|
||||
options = service._get_container_create_options({}, service._next_container_number())
|
||||
original = Container.create(service.client, **options)
|
||||
|
||||
assert set(service.containers(stopped=True)) == set([original])
|
||||
|
||||
@@ -55,8 +55,8 @@ class BasicProjectTest(ProjectTestCase):
|
||||
|
||||
def test_partial_change(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
|
||||
old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
|
||||
old_db = [c for c in old_containers if c.name_without_project.startswith('db_')][0]
|
||||
old_web = [c for c in old_containers if c.name_without_project.startswith('web_')][0]
|
||||
|
||||
self.cfg['web']['command'] = '/bin/true'
|
||||
|
||||
@@ -71,7 +71,7 @@ class BasicProjectTest(ProjectTestCase):
|
||||
|
||||
created = list(new_containers - old_containers)
|
||||
assert len(created) == 1
|
||||
assert created[0].name_without_project == 'web_1'
|
||||
assert created[0].name_without_project == old_web.name_without_project
|
||||
assert created[0].get('Config.Cmd') == ['/bin/true']
|
||||
|
||||
def test_all_change(self):
|
||||
@@ -114,7 +114,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
|
||||
def test_up(self):
|
||||
containers = self.run_up(self.cfg)
|
||||
assert set(c.name_without_project for c in containers) == set(['db_1', 'web_1', 'nginx_1'])
|
||||
assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
|
||||
|
||||
def test_change_leaf(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@@ -122,7 +122,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['nginx_1'])
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
|
||||
|
||||
def test_change_middle(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@@ -130,7 +130,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['web_1'])
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['web'])
|
||||
|
||||
def test_change_middle_always_recreate_deps(self):
|
||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
@@ -138,8 +138,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.name_without_project
|
||||
for c in new_containers - old_containers) == {'web_1', 'nginx_1'}
|
||||
assert set(c.service for c in new_containers - old_containers) == {'web', 'nginx'}
|
||||
|
||||
def test_change_root(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@@ -147,7 +146,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
assert set(c.name_without_project for c in new_containers - old_containers) == set(['db_1'])
|
||||
assert set(c.service for c in new_containers - old_containers) == set(['db'])
|
||||
|
||||
def test_change_root_always_recreate_deps(self):
|
||||
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
@@ -155,8 +154,9 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
|
||||
|
||||
assert set(c.name_without_project
|
||||
for c in new_containers - old_containers) == {'db_1', 'web_1', 'nginx_1'}
|
||||
assert set(c.service for c in new_containers - old_containers) == {
|
||||
'db', 'web', 'nginx'
|
||||
}
|
||||
|
||||
def test_change_root_no_recreate(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
@@ -195,9 +195,18 @@ class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
|
||||
web, = [c for c in containers if c.service == 'web']
|
||||
nginx, = [c for c in containers if c.service == 'nginx']
|
||||
db, = [c for c in containers if c.service == 'db']
|
||||
|
||||
assert set(get_links(web)) == {'composetest_db_1', 'db', 'db_1'}
|
||||
assert set(get_links(nginx)) == {'composetest_web_1', 'web', 'web_1'}
|
||||
assert set(get_links(web)) == {
|
||||
'composetest_db_1',
|
||||
'db',
|
||||
'db_1',
|
||||
}
|
||||
assert set(get_links(nginx)) == {
|
||||
'composetest_web_1',
|
||||
'web',
|
||||
'web_1',
|
||||
}
|
||||
|
||||
|
||||
class ServiceStateTest(DockerClientTestCase):
|
||||
|
||||
@@ -139,7 +139,9 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
def check_build(self, *args, **kwargs):
|
||||
kwargs.setdefault('rm', True)
|
||||
build_output = self.client.build(*args, **kwargs)
|
||||
stream_output(build_output, open('/dev/null', 'w'))
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
for event in stream_output(build_output, devnull):
|
||||
pass
|
||||
|
||||
def require_api_version(self, minimum):
|
||||
api_version = self.client.version()['ApiVersion']
|
||||
|
||||
@@ -193,7 +193,7 @@ class TestConsumeQueue(object):
|
||||
queue.put(item)
|
||||
|
||||
generator = consume_queue(queue, True)
|
||||
assert next(generator) is 'foobar-1'
|
||||
assert next(generator) == 'foobar-1'
|
||||
|
||||
def test_item_is_none_when_timeout_is_hit(self):
|
||||
queue = Queue()
|
||||
|
||||
@@ -155,6 +155,14 @@ class TestCallDocker(object):
|
||||
'docker', '--host', 'tcp://mydocker.net:2333', 'ps'
|
||||
]
|
||||
|
||||
def test_with_http_host(self):
|
||||
with mock.patch('subprocess.call') as fake_call:
|
||||
call_docker(['ps'], {'--host': 'http://mydocker.net:2333'})
|
||||
|
||||
assert fake_call.call_args[0][0] == [
|
||||
'docker', '--host', 'tcp://mydocker.net:2333', 'ps',
|
||||
]
|
||||
|
||||
def test_with_host_option_shorthand_equal(self):
|
||||
with mock.patch('subprocess.call') as fake_call:
|
||||
call_docker(['ps'], {'--host': '=tcp://mydocker.net:2333'})
|
||||
|
||||
@@ -171,7 +171,10 @@ class CLITestCase(unittest.TestCase):
|
||||
'--workdir': None,
|
||||
})
|
||||
|
||||
assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
|
||||
# NOTE: The "run" command is supposed to be a one-off tool; therefore restart policy "no"
|
||||
# (the default) is enforced despite explicit wish for "always" in the project
|
||||
# configuration file
|
||||
assert not mock_client.create_host_config.call_args[1].get('restart_policy')
|
||||
|
||||
command = TopLevelCommand(project)
|
||||
command.run({
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from operator import itemgetter
|
||||
from random import shuffle
|
||||
|
||||
import py
|
||||
import pytest
|
||||
@@ -42,7 +43,7 @@ from tests import unittest
|
||||
DEFAULT_VERSION = V2_0
|
||||
|
||||
|
||||
def make_service_dict(name, service_dict, working_dir, filename=None):
|
||||
def make_service_dict(name, service_dict, working_dir='.', filename=None):
|
||||
"""Test helper function to construct a ServiceExtendsResolver
|
||||
"""
|
||||
resolver = config.ServiceExtendsResolver(
|
||||
@@ -612,6 +613,19 @@ class ConfigTest(unittest.TestCase):
|
||||
excinfo.exconly()
|
||||
)
|
||||
|
||||
def test_config_integer_service_property_raise_validation_error(self):
|
||||
with pytest.raises(ConfigurationError) as excinfo:
|
||||
config.load(
|
||||
build_config_details({
|
||||
'version': '2.1',
|
||||
'services': {'foobar': {'image': 'busybox', 1234: 'hah'}}
|
||||
}, 'working_dir', 'filename.yml')
|
||||
)
|
||||
|
||||
assert (
|
||||
"Unsupported config option for services.foobar: '1234'" in excinfo.exconly()
|
||||
)
|
||||
|
||||
def test_config_invalid_service_name_raise_validation_error(self):
|
||||
with pytest.raises(ConfigurationError) as excinfo:
|
||||
config.load(
|
||||
@@ -1071,8 +1085,43 @@ class ConfigTest(unittest.TestCase):
|
||||
details = config.ConfigDetails('.', [base_file, override_file])
|
||||
web_service = config.load(details).services[0]
|
||||
assert web_service['networks'] == {
|
||||
'foobar': {'aliases': ['foo', 'bar']},
|
||||
'baz': None
|
||||
'foobar': {'aliases': ['bar', 'foo']},
|
||||
'baz': {}
|
||||
}
|
||||
|
||||
def test_load_with_multiple_files_mismatched_networks_format_inverse_order(self):
|
||||
base_file = config.ConfigFile(
|
||||
'override.yaml',
|
||||
{
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {
|
||||
'networks': ['baz']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
override_file = config.ConfigFile(
|
||||
'base.yaml',
|
||||
{
|
||||
'version': '2',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'example/web',
|
||||
'networks': {
|
||||
'foobar': {'aliases': ['foo', 'bar']}
|
||||
}
|
||||
}
|
||||
},
|
||||
'networks': {'foobar': {}, 'baz': {}}
|
||||
}
|
||||
)
|
||||
|
||||
details = config.ConfigDetails('.', [base_file, override_file])
|
||||
web_service = config.load(details).services[0]
|
||||
assert web_service['networks'] == {
|
||||
'foobar': {'aliases': ['bar', 'foo']},
|
||||
'baz': {}
|
||||
}
|
||||
|
||||
def test_load_with_multiple_files_v2(self):
|
||||
@@ -1291,7 +1340,7 @@ class ConfigTest(unittest.TestCase):
|
||||
assert tmpfs_mount.target == '/tmpfs'
|
||||
assert not tmpfs_mount.is_named_volume
|
||||
|
||||
assert host_mount.source == os.path.normpath('/abc')
|
||||
assert host_mount.source == '/abc'
|
||||
assert host_mount.target == '/xyz'
|
||||
assert not host_mount.is_named_volume
|
||||
|
||||
@@ -1322,6 +1371,32 @@ class ConfigTest(unittest.TestCase):
|
||||
assert mount.type == 'bind'
|
||||
assert mount.source == expected_source
|
||||
|
||||
def test_load_bind_mount_relative_path_with_tilde(self):
|
||||
base_file = config.ConfigFile(
|
||||
'base.yaml', {
|
||||
'version': '3.4',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'volumes': [
|
||||
{'type': 'bind', 'source': '~/web', 'target': '/web'},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
details = config.ConfigDetails('.', [base_file])
|
||||
config_data = config.load(details)
|
||||
mount = config_data.services[0].get('volumes')[0]
|
||||
assert mount.target == '/web'
|
||||
assert mount.type == 'bind'
|
||||
assert (
|
||||
not mount.source.startswith('~') and mount.source.endswith(
|
||||
'{}web'.format(os.path.sep)
|
||||
)
|
||||
)
|
||||
|
||||
def test_config_invalid_ipam_config(self):
|
||||
with pytest.raises(ConfigurationError) as excinfo:
|
||||
config.load(
|
||||
@@ -2643,6 +2718,45 @@ class ConfigTest(unittest.TestCase):
|
||||
['c 7:128 rwm', 'x 3:244 rw', 'f 0:128 n']
|
||||
)
|
||||
|
||||
def test_merge_isolation(self):
|
||||
base = {
|
||||
'image': 'bar',
|
||||
'isolation': 'default',
|
||||
}
|
||||
|
||||
override = {
|
||||
'isolation': 'hyperv',
|
||||
}
|
||||
|
||||
actual = config.merge_service_dicts(base, override, V2_3)
|
||||
assert actual == {
|
||||
'image': 'bar',
|
||||
'isolation': 'hyperv',
|
||||
}
|
||||
|
||||
def test_merge_storage_opt(self):
|
||||
base = {
|
||||
'image': 'bar',
|
||||
'storage_opt': {
|
||||
'size': '1G',
|
||||
'readonly': 'false',
|
||||
}
|
||||
}
|
||||
|
||||
override = {
|
||||
'storage_opt': {
|
||||
'size': '2G',
|
||||
'encryption': 'aes',
|
||||
}
|
||||
}
|
||||
|
||||
actual = config.merge_service_dicts(base, override, V2_3)
|
||||
assert actual['storage_opt'] == {
|
||||
'size': '2G',
|
||||
'readonly': 'false',
|
||||
'encryption': 'aes',
|
||||
}
|
||||
|
||||
def test_external_volume_config(self):
|
||||
config_details = build_config_details({
|
||||
'version': '2',
|
||||
@@ -2992,6 +3106,41 @@ class ConfigTest(unittest.TestCase):
|
||||
)
|
||||
config.load(config_details)
|
||||
|
||||
def test_config_duplicate_mount_points(self):
|
||||
config1 = build_config_details(
|
||||
{
|
||||
'version': '3.5',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/tmp/foo:/tmp/foo', '/tmp/foo:/tmp/foo:rw']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
config2 = build_config_details(
|
||||
{
|
||||
'version': '3.5',
|
||||
'services': {
|
||||
'web': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/x:/y', '/z:/y']
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
with self.assertRaises(ConfigurationError) as e:
|
||||
config.load(config1)
|
||||
self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
|
||||
', '.join(['/tmp/foo:/tmp/foo:rw']*2)))
|
||||
|
||||
with self.assertRaises(ConfigurationError) as e:
|
||||
config.load(config2)
|
||||
self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
|
||||
', '.join(['/x:/y:rw', '/z:/y:rw'])))
|
||||
|
||||
|
||||
class NetworkModeTest(unittest.TestCase):
|
||||
|
||||
@@ -3444,6 +3593,9 @@ class InterpolationTest(unittest.TestCase):
|
||||
'reservations': {'memory': '100M'},
|
||||
},
|
||||
},
|
||||
'credential_spec': {
|
||||
'file': 'spec.json'
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -3461,7 +3613,8 @@ class InterpolationTest(unittest.TestCase):
|
||||
'mem_limit': '300M',
|
||||
'mem_reservation': '100M',
|
||||
'cpus': 0.7,
|
||||
'name': 'foo'
|
||||
'name': 'foo',
|
||||
'security_opt': ['credentialspec=file://spec.json'],
|
||||
}
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
@@ -3536,6 +3689,13 @@ class VolumeConfigTest(unittest.TestCase):
|
||||
).services[0]
|
||||
assert d['volumes'] == [VolumeSpec.parse('/host/path:/container/path')]
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
|
||||
def test_volumes_order_is_preserved(self):
|
||||
volumes = ['/{0}:/{0}'.format(i) for i in range(0, 6)]
|
||||
shuffle(volumes)
|
||||
cfg = make_service_dict('foo', {'build': '.', 'volumes': volumes})
|
||||
assert cfg['volumes'] == volumes
|
||||
|
||||
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_volume_binding_with_home(self):
|
||||
@@ -3757,8 +3917,95 @@ class MergePortsTest(unittest.TestCase, MergeListsTest):
|
||||
|
||||
class MergeNetworksTest(unittest.TestCase, MergeListsTest):
|
||||
config_name = 'networks'
|
||||
base_config = ['frontend', 'backend']
|
||||
override_config = ['monitoring']
|
||||
base_config = {'default': {'aliases': ['foo.bar', 'foo.baz']}}
|
||||
override_config = {'default': {'ipv4_address': '123.234.123.234'}}
|
||||
|
||||
def test_no_network_overrides(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: self.base_config},
|
||||
{self.config_name: self.override_config},
|
||||
DEFAULT_VERSION)
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
|
||||
def test_network_has_none_value(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
'default': None
|
||||
}},
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}},
|
||||
DEFAULT_VERSION)
|
||||
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': []
|
||||
}
|
||||
}
|
||||
|
||||
def test_all_properties(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'link_local_ips': ['192.168.1.10', '192.168.1.11'],
|
||||
'ipv4_address': '111.111.111.111',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-first'
|
||||
}
|
||||
}},
|
||||
{self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.baz', 'foo.baz2'],
|
||||
'link_local_ips': ['192.168.1.11', '192.168.1.12'],
|
||||
'ipv4_address': '123.234.123.234',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
|
||||
}
|
||||
}},
|
||||
DEFAULT_VERSION)
|
||||
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz', 'foo.baz2'],
|
||||
'link_local_ips': ['192.168.1.10', '192.168.1.11', '192.168.1.12'],
|
||||
'ipv4_address': '123.234.123.234',
|
||||
'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
|
||||
}
|
||||
}
|
||||
|
||||
def test_no_network_name_overrides(self):
|
||||
service_dict = config.merge_service_dicts(
|
||||
{
|
||||
self.config_name: {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
self.config_name: {
|
||||
'another_network': {
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
},
|
||||
DEFAULT_VERSION)
|
||||
assert service_dict[self.config_name] == {
|
||||
'default': {
|
||||
'aliases': ['foo.bar', 'foo.baz'],
|
||||
'ipv4_address': '123.234.123.234'
|
||||
},
|
||||
'another_network': {
|
||||
'ipv4_address': '123.234.123.234'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class MergeStringsOrListsTest(unittest.TestCase):
|
||||
@@ -5096,3 +5343,19 @@ class SerializeTest(unittest.TestCase):
|
||||
serialized_config = yaml.load(serialize_config(config_dict))
|
||||
serialized_service = serialized_config['services']['web']
|
||||
assert serialized_service['command'] == 'echo 十六夜 咲夜'
|
||||
|
||||
def test_serialize_external_false(self):
|
||||
cfg = {
|
||||
'version': '3.4',
|
||||
'volumes': {
|
||||
'test': {
|
||||
'name': 'test-false',
|
||||
'external': False
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config_dict = config.load(build_config_details(cfg))
|
||||
serialized_config = yaml.load(serialize_config(config_dict))
|
||||
serialized_volume = serialized_config['volumes']['test']
|
||||
assert serialized_volume['external'] is False
|
||||
|
||||
@@ -9,6 +9,7 @@ import pytest
|
||||
|
||||
from compose.config.environment import env_vars_from_file
|
||||
from compose.config.environment import Environment
|
||||
from compose.config.errors import ConfigurationError
|
||||
from tests import unittest
|
||||
|
||||
|
||||
@@ -52,3 +53,12 @@ class EnvironmentTest(unittest.TestCase):
|
||||
assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
|
||||
'PARK_BOM': '박봄'
|
||||
}
|
||||
|
||||
def test_env_vars_from_file_whitespace(self):
|
||||
tmpdir = pytest.ensuretemp('env_file')
|
||||
self.addCleanup(tmpdir.remove)
|
||||
with codecs.open('{}/whitespace.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
|
||||
f.write('WHITESPACE =yes\n')
|
||||
with pytest.raises(ConfigurationError) as exc:
|
||||
env_vars_from_file(str(tmpdir.join('whitespace.env')))
|
||||
assert 'environment variable' in exc.exconly()
|
||||
|
||||
@@ -332,6 +332,37 @@ def test_interpolate_environment_external_resource_convert_types(mock_env):
|
||||
assert value == expected
|
||||
|
||||
|
||||
def test_interpolate_service_name_uses_dot(mock_env):
|
||||
entry = {
|
||||
'service.1': {
|
||||
'image': 'busybox',
|
||||
'ulimits': {
|
||||
'nproc': '${POSINT}',
|
||||
'nofile': {
|
||||
'soft': '${POSINT}',
|
||||
'hard': '${DEFAULT:-40000}'
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
expected = {
|
||||
'service.1': {
|
||||
'image': 'busybox',
|
||||
'ulimits': {
|
||||
'nproc': 50,
|
||||
'nofile': {
|
||||
'soft': 50,
|
||||
'hard': 40000
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
value = interpolate_environment_variables(V3_4, entry, 'service', mock_env)
|
||||
assert value == expected
|
||||
|
||||
|
||||
def test_escaped_interpolation(defaults_interpolator):
|
||||
assert defaults_interpolator('$${foo}') == '${foo}'
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import docker
|
||||
|
||||
from .. import mock
|
||||
from .. import unittest
|
||||
from compose.const import LABEL_ONE_OFF
|
||||
from compose.const import LABEL_SLUG
|
||||
from compose.container import Container
|
||||
from compose.container import get_container_name
|
||||
|
||||
@@ -30,7 +32,7 @@ class ContainerTest(unittest.TestCase):
|
||||
"Labels": {
|
||||
"com.docker.compose.project": "composetest",
|
||||
"com.docker.compose.service": "web",
|
||||
"com.docker.compose.container-number": 7,
|
||||
"com.docker.compose.container-number": "7",
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -95,6 +97,15 @@ class ContainerTest(unittest.TestCase):
|
||||
container = Container(None, self.container_dict, has_been_inspected=True)
|
||||
assert container.name_without_project == "custom_name_of_container"
|
||||
|
||||
def test_name_without_project_one_off(self):
|
||||
self.container_dict['Name'] = "/composetest_web_092cd63296f"
|
||||
self.container_dict['Config']['Labels'][LABEL_SLUG] = (
|
||||
"092cd63296fdc446ad432d3905dd1fcbe12a2ba6b52"
|
||||
)
|
||||
self.container_dict['Config']['Labels'][LABEL_ONE_OFF] = 'True'
|
||||
container = Container(None, self.container_dict, has_been_inspected=True)
|
||||
assert container.name_without_project == 'web_092cd63296fd'
|
||||
|
||||
def test_inspect_if_not_inspected(self):
|
||||
mock_client = mock.create_autospec(docker.APIClient)
|
||||
container = Container(mock_client, dict(Id="the_id"))
|
||||
|
||||
@@ -21,7 +21,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
b'31019763, "start": 1413653874, "total": 62763875}, '
|
||||
b'"progress": "..."}',
|
||||
]
|
||||
events = progress_stream.stream_output(output, StringIO())
|
||||
events = list(progress_stream.stream_output(output, StringIO()))
|
||||
assert len(events) == 1
|
||||
|
||||
def test_stream_output_div_zero(self):
|
||||
@@ -30,7 +30,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
b'0, "start": 1413653874, "total": 0}, '
|
||||
b'"progress": "..."}',
|
||||
]
|
||||
events = progress_stream.stream_output(output, StringIO())
|
||||
events = list(progress_stream.stream_output(output, StringIO()))
|
||||
assert len(events) == 1
|
||||
|
||||
def test_stream_output_null_total(self):
|
||||
@@ -39,7 +39,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
b'0, "start": 1413653874, "total": null}, '
|
||||
b'"progress": "..."}',
|
||||
]
|
||||
events = progress_stream.stream_output(output, StringIO())
|
||||
events = list(progress_stream.stream_output(output, StringIO()))
|
||||
assert len(events) == 1
|
||||
|
||||
def test_stream_output_progress_event_tty(self):
|
||||
@@ -52,7 +52,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
return True
|
||||
|
||||
output = TTYStringIO()
|
||||
events = progress_stream.stream_output(events, output)
|
||||
events = list(progress_stream.stream_output(events, output))
|
||||
assert len(output.getvalue()) > 0
|
||||
|
||||
def test_stream_output_progress_event_no_tty(self):
|
||||
@@ -61,7 +61,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
]
|
||||
output = StringIO()
|
||||
|
||||
events = progress_stream.stream_output(events, output)
|
||||
events = list(progress_stream.stream_output(events, output))
|
||||
assert len(output.getvalue()) == 0
|
||||
|
||||
def test_stream_output_no_progress_event_no_tty(self):
|
||||
@@ -70,7 +70,7 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
]
|
||||
output = StringIO()
|
||||
|
||||
events = progress_stream.stream_output(events, output)
|
||||
events = list(progress_stream.stream_output(events, output))
|
||||
assert len(output.getvalue()) > 0
|
||||
|
||||
def test_mismatched_encoding_stream_write(self):
|
||||
@@ -97,22 +97,24 @@ class ProgressStreamTestCase(unittest.TestCase):
|
||||
tf.seek(0)
|
||||
assert tf.read() == '???'
|
||||
|
||||
def test_get_digest_from_push(self):
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"progressDetail": {}, "aux": {"Digest": digest}},
|
||||
]
|
||||
assert progress_stream.get_digest_from_push(events) == digest
|
||||
|
||||
def test_get_digest_from_push():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"progressDetail": {}, "aux": {"Digest": digest}},
|
||||
]
|
||||
assert progress_stream.get_digest_from_push(events) == digest
|
||||
def test_get_digest_from_pull(self):
|
||||
events = list()
|
||||
assert progress_stream.get_digest_from_pull(events) is None
|
||||
|
||||
|
||||
def test_get_digest_from_pull():
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"status": "Digest: %s" % digest},
|
||||
]
|
||||
assert progress_stream.get_digest_from_pull(events) == digest
|
||||
digest = "sha256:abcd"
|
||||
events = [
|
||||
{"status": "..."},
|
||||
{"status": "..."},
|
||||
{"status": "Digest: %s" % digest},
|
||||
{"status": "..."},
|
||||
]
|
||||
assert progress_stream.get_digest_from_pull(events) == digest
|
||||
|
||||
@@ -254,9 +254,10 @@ class ProjectTest(unittest.TestCase):
|
||||
[container_ids[0] + ':rw']
|
||||
)
|
||||
|
||||
def test_events(self):
|
||||
def test_events_legacy(self):
|
||||
services = [Service(name='web'), Service(name='db')]
|
||||
project = Project('test', services, self.mock_client)
|
||||
self.mock_client.api_version = '1.21'
|
||||
self.mock_client.events.return_value = iter([
|
||||
{
|
||||
'status': 'create',
|
||||
@@ -362,6 +363,175 @@ class ProjectTest(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
|
||||
def test_events(self):
|
||||
services = [Service(name='web'), Service(name='db')]
|
||||
project = Project('test', services, self.mock_client)
|
||||
self.mock_client.api_version = '1.35'
|
||||
self.mock_client.events.return_value = iter([
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/image',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'abcde',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'web',
|
||||
'image': 'example/image',
|
||||
'name': 'test_web_1',
|
||||
}
|
||||
},
|
||||
'id': 'abcde',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000002000,
|
||||
},
|
||||
{
|
||||
'status': 'attach',
|
||||
'from': 'example/image',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'abcde',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'web',
|
||||
'image': 'example/image',
|
||||
'name': 'test_web_1',
|
||||
}
|
||||
},
|
||||
'id': 'abcde',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000003000,
|
||||
},
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/other',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'bdbdbd',
|
||||
'Attributes': {
|
||||
'image': 'example/other',
|
||||
'name': 'shrewd_einstein',
|
||||
}
|
||||
},
|
||||
'id': 'bdbdbd',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000005000,
|
||||
},
|
||||
{
|
||||
'status': 'create',
|
||||
'from': 'example/db',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'ababa',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'db',
|
||||
'image': 'example/db',
|
||||
'name': 'test_db_1',
|
||||
}
|
||||
},
|
||||
'id': 'ababa',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000004000,
|
||||
},
|
||||
{
|
||||
'status': 'destroy',
|
||||
'from': 'example/db',
|
||||
'Type': 'container',
|
||||
'Actor': {
|
||||
'ID': 'eeeee',
|
||||
'Attributes': {
|
||||
'com.docker.compose.project': 'test',
|
||||
'com.docker.compose.service': 'db',
|
||||
'image': 'example/db',
|
||||
'name': 'test_db_1',
|
||||
}
|
||||
},
|
||||
'id': 'eeeee',
|
||||
'time': 1420092061,
|
||||
'timeNano': 14200920610000004000,
|
||||
},
|
||||
])
|
||||
|
||||
def dt_with_microseconds(dt, us):
|
||||
return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
|
||||
|
||||
def get_container(cid):
|
||||
if cid == 'eeeee':
|
||||
raise NotFound(None, None, "oops")
|
||||
if cid == 'abcde':
|
||||
name = 'web'
|
||||
labels = {LABEL_SERVICE: name}
|
||||
elif cid == 'ababa':
|
||||
name = 'db'
|
||||
labels = {LABEL_SERVICE: name}
|
||||
else:
|
||||
labels = {}
|
||||
name = ''
|
||||
return {
|
||||
'Id': cid,
|
||||
'Config': {'Labels': labels},
|
||||
'Name': '/project_%s_1' % name,
|
||||
}
|
||||
|
||||
self.mock_client.inspect_container.side_effect = get_container
|
||||
|
||||
events = project.events()
|
||||
|
||||
events_list = list(events)
|
||||
# Assert the return value is a generator
|
||||
assert not list(events)
|
||||
assert events_list == [
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'web',
|
||||
'action': 'create',
|
||||
'id': 'abcde',
|
||||
'attributes': {
|
||||
'name': 'test_web_1',
|
||||
'image': 'example/image',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 2),
|
||||
'container': Container(None, get_container('abcde')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'web',
|
||||
'action': 'attach',
|
||||
'id': 'abcde',
|
||||
'attributes': {
|
||||
'name': 'test_web_1',
|
||||
'image': 'example/image',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 3),
|
||||
'container': Container(None, get_container('abcde')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'db',
|
||||
'action': 'create',
|
||||
'id': 'ababa',
|
||||
'attributes': {
|
||||
'name': 'test_db_1',
|
||||
'image': 'example/db',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 4),
|
||||
'container': Container(None, get_container('ababa')),
|
||||
},
|
||||
{
|
||||
'type': 'container',
|
||||
'service': 'db',
|
||||
'action': 'destroy',
|
||||
'id': 'eeeee',
|
||||
'attributes': {
|
||||
'name': 'test_db_1',
|
||||
'image': 'example/db',
|
||||
},
|
||||
'time': dt_with_microseconds(1420092061, 4),
|
||||
'container': None,
|
||||
},
|
||||
]
|
||||
|
||||
def test_net_unset(self):
|
||||
project = Project.from_config(
|
||||
name='test',
|
||||
@@ -620,3 +790,23 @@ class ProjectTest(unittest.TestCase):
|
||||
self.mock_client.pull.side_effect = OperationFailedError(b'pull error')
|
||||
with pytest.raises(ProjectError):
|
||||
project.pull(parallel_pull=True)
|
||||
|
||||
def test_avoid_multiple_push(self):
|
||||
service_config_latest = {'image': 'busybox:latest', 'build': '.'}
|
||||
service_config_default = {'image': 'busybox', 'build': '.'}
|
||||
service_config_sha = {
|
||||
'image': 'busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d',
|
||||
'build': '.'
|
||||
}
|
||||
svc1 = Service('busy1', **service_config_latest)
|
||||
svc1_1 = Service('busy11', **service_config_latest)
|
||||
svc2 = Service('busy2', **service_config_default)
|
||||
svc2_1 = Service('busy21', **service_config_default)
|
||||
svc3 = Service('busy3', **service_config_sha)
|
||||
svc3_1 = Service('busy31', **service_config_sha)
|
||||
project = Project(
|
||||
'composetest', [svc1, svc1_1, svc2, svc2_1, svc3, svc3_1], self.mock_client
|
||||
)
|
||||
with mock.patch('compose.service.Service.push') as fake_push:
|
||||
project.push()
|
||||
assert fake_push.call_count == 2
|
||||
|
||||
@@ -5,6 +5,7 @@ import docker
|
||||
import pytest
|
||||
from docker.constants import DEFAULT_DOCKER_API_VERSION
|
||||
from docker.errors import APIError
|
||||
from docker.errors import ImageNotFound
|
||||
from docker.errors import NotFound
|
||||
|
||||
from .. import mock
|
||||
@@ -21,6 +22,7 @@ from compose.const import LABEL_ONE_OFF
|
||||
from compose.const import LABEL_PROJECT
|
||||
from compose.const import LABEL_SERVICE
|
||||
from compose.const import SECRETS_PATH
|
||||
from compose.const import WINDOWS_LONGPATH_PREFIX
|
||||
from compose.container import Container
|
||||
from compose.errors import OperationFailedError
|
||||
from compose.parallel import ParallelStreamWriter
|
||||
@@ -38,6 +40,7 @@ from compose.service import NeedsBuildError
|
||||
from compose.service import NetworkMode
|
||||
from compose.service import NoSuchImageError
|
||||
from compose.service import parse_repository_tag
|
||||
from compose.service import rewrite_build_path
|
||||
from compose.service import Service
|
||||
from compose.service import ServiceNetworkMode
|
||||
from compose.service import warn_on_masked_volume
|
||||
@@ -317,13 +320,14 @@ class ServiceTest(unittest.TestCase):
|
||||
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
|
||||
prev_container = mock.Mock(
|
||||
id='ababab',
|
||||
image_config={'ContainerConfig': {}})
|
||||
image_config={'ContainerConfig': {}}
|
||||
)
|
||||
prev_container.full_slug = 'abcdefff1234'
|
||||
prev_container.get.return_value = None
|
||||
|
||||
opts = service._get_container_create_options(
|
||||
{},
|
||||
1,
|
||||
previous_container=prev_container)
|
||||
{}, 1, previous_container=prev_container
|
||||
)
|
||||
|
||||
assert service.options['labels'] == labels
|
||||
assert service.options['environment'] == environment
|
||||
@@ -355,11 +359,13 @@ class ServiceTest(unittest.TestCase):
|
||||
}.get(key, None)
|
||||
|
||||
prev_container.get.side_effect = container_get
|
||||
prev_container.full_slug = 'abcdefff1234'
|
||||
|
||||
opts = service._get_container_create_options(
|
||||
{},
|
||||
1,
|
||||
previous_container=prev_container)
|
||||
previous_container=prev_container
|
||||
)
|
||||
|
||||
assert opts['environment'] == ['affinity:container==ababab']
|
||||
|
||||
@@ -370,6 +376,7 @@ class ServiceTest(unittest.TestCase):
|
||||
id='ababab',
|
||||
image_config={'ContainerConfig': {}})
|
||||
prev_container.get.return_value = None
|
||||
prev_container.full_slug = 'abcdefff1234'
|
||||
|
||||
opts = service._get_container_create_options(
|
||||
{},
|
||||
@@ -386,7 +393,7 @@ class ServiceTest(unittest.TestCase):
|
||||
|
||||
@mock.patch('compose.service.Container', autospec=True)
|
||||
def test_get_container(self, mock_container_class):
|
||||
container_dict = dict(Name='default_foo_2')
|
||||
container_dict = dict(Name='default_foo_2_bdfa3ed91e2c')
|
||||
self.mock_client.containers.return_value = [container_dict]
|
||||
service = Service('foo', image='foo', client=self.mock_client)
|
||||
|
||||
@@ -463,6 +470,7 @@ class ServiceTest(unittest.TestCase):
|
||||
@mock.patch('compose.service.Container', autospec=True)
|
||||
def test_recreate_container(self, _):
|
||||
mock_container = mock.create_autospec(Container)
|
||||
mock_container.full_slug = 'abcdefff1234'
|
||||
service = Service('foo', client=self.mock_client, image='someimage')
|
||||
service.image = lambda: {'Id': 'abc123'}
|
||||
new_container = service.recreate_container(mock_container)
|
||||
@@ -476,6 +484,7 @@ class ServiceTest(unittest.TestCase):
|
||||
@mock.patch('compose.service.Container', autospec=True)
|
||||
def test_recreate_container_with_timeout(self, _):
|
||||
mock_container = mock.create_autospec(Container)
|
||||
mock_container.full_slug = 'abcdefff1234'
|
||||
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
|
||||
service = Service('foo', client=self.mock_client, image='someimage')
|
||||
service.recreate_container(mock_container, timeout=1)
|
||||
@@ -701,17 +710,19 @@ class ServiceTest(unittest.TestCase):
|
||||
image='example.com/foo',
|
||||
client=self.mock_client,
|
||||
network_mode=NetworkMode('bridge'),
|
||||
networks={'bridge': {}},
|
||||
networks={'bridge': {}, 'net2': {}},
|
||||
links=[(Service('one', client=self.mock_client), 'one')],
|
||||
volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')]
|
||||
volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')],
|
||||
volumes=[VolumeSpec('/ext', '/int', 'ro')],
|
||||
build={'context': 'some/random/path'},
|
||||
)
|
||||
config_hash = service.config_hash
|
||||
|
||||
for api_version in set(API_VERSIONS.values()):
|
||||
self.mock_client.api_version = api_version
|
||||
assert service._get_container_create_options({}, 1)['labels'][LABEL_CONFIG_HASH] == (
|
||||
config_hash
|
||||
)
|
||||
assert service._get_container_create_options(
|
||||
{}, 1
|
||||
)['labels'][LABEL_CONFIG_HASH] == config_hash
|
||||
|
||||
def test_remove_image_none(self):
|
||||
web = Service('web', image='example', client=self.mock_client)
|
||||
@@ -745,6 +756,13 @@ class ServiceTest(unittest.TestCase):
|
||||
mock_log.error.assert_called_once_with(
|
||||
"Failed to remove image for service %s: %s", web.name, error)
|
||||
|
||||
def test_remove_non_existing_image(self):
|
||||
self.mock_client.remove_image.side_effect = ImageNotFound('image not found')
|
||||
web = Service('web', image='example', client=self.mock_client)
|
||||
with mock.patch('compose.service.log', autospec=True) as mock_log:
|
||||
assert not web.remove_image(ImageType.all)
|
||||
mock_log.warning.assert_called_once_with("Image %s not found.", web.image_name)
|
||||
|
||||
def test_specifies_host_port_with_no_ports(self):
|
||||
service = Service(
|
||||
'foo',
|
||||
@@ -1029,6 +1047,23 @@ class ServiceTest(unittest.TestCase):
|
||||
assert len(override_opts['binds']) == 1
|
||||
assert override_opts['binds'][0] == 'vol:/data:rw'
|
||||
|
||||
def test_volumes_order_is_preserved(self):
|
||||
service = Service('foo', client=self.mock_client)
|
||||
volumes = [
|
||||
VolumeSpec.parse(cfg) for cfg in [
|
||||
'/v{0}:/v{0}:rw'.format(i) for i in range(6)
|
||||
]
|
||||
]
|
||||
ctnr_opts, override_opts = service._build_container_volume_options(
|
||||
previous_container=None,
|
||||
container_options={
|
||||
'volumes': volumes,
|
||||
'environment': {},
|
||||
},
|
||||
override_options={},
|
||||
)
|
||||
assert override_opts['binds'] == [vol.repr() for vol in volumes]
|
||||
|
||||
|
||||
class TestServiceNetwork(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -1461,3 +1496,28 @@ class ServiceSecretTest(unittest.TestCase):
|
||||
|
||||
assert volumes[0].source == secret1['file']
|
||||
assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
|
||||
|
||||
|
||||
class RewriteBuildPathTest(unittest.TestCase):
|
||||
@mock.patch('compose.service.IS_WINDOWS_PLATFORM', True)
|
||||
def test_rewrite_url_no_prefix(self):
|
||||
urls = [
|
||||
'http://test.com',
|
||||
'https://test.com',
|
||||
'git://test.com',
|
||||
'github.com/test/test',
|
||||
'git@test.com',
|
||||
]
|
||||
for u in urls:
|
||||
assert rewrite_build_path(u) == u
|
||||
|
||||
@mock.patch('compose.service.IS_WINDOWS_PLATFORM', True)
|
||||
def test_rewrite_windows_path(self):
|
||||
assert rewrite_build_path('C:\\context') == WINDOWS_LONGPATH_PREFIX + 'C:\\context'
|
||||
assert rewrite_build_path(
|
||||
rewrite_build_path('C:\\context')
|
||||
) == rewrite_build_path('C:\\context')
|
||||
|
||||
@mock.patch('compose.service.IS_WINDOWS_PLATFORM', False)
|
||||
def test_rewrite_unix_path(self):
|
||||
assert rewrite_build_path('/context') == '/context'
|
||||
|
||||
@@ -68,3 +68,11 @@ class TestParseBytes(object):
|
||||
assert utils.parse_bytes(123) == 123
|
||||
assert utils.parse_bytes('foobar') is None
|
||||
assert utils.parse_bytes('123') == 123
|
||||
|
||||
|
||||
class TestMoreItertools(object):
|
||||
def test_unique_everseen(self):
|
||||
unique = utils.unique_everseen
|
||||
assert list(unique([2, 1, 2, 1])) == [2, 1]
|
||||
assert list(unique([2, 1, 2, 1], hash)) == [2, 1]
|
||||
assert list(unique([2, 1, 2, 1], lambda x: 'key_%s' % x)) == [2, 1]
|
||||
|
||||
Reference in New Issue
Block a user