Compare commits

..

88 Commits

Author SHA1 Message Date
Ulysses Souza
8a1c60f64e Bump version to 1.25.5
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-04-10 09:30:41 +02:00
Ulysses Souza
750e521632 Update changelog
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-04-09 18:17:42 +02:00
Ulysses Souza
4afe60f271 Bump version to 1.25.5-rc1
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-04-09 14:24:01 +02:00
Ulysses Souza
286cc1c3be Bump OPENSSL to 1.1.1f
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-04-09 14:24:01 +02:00
Anca Iordache
41d229ebc4 Fix v3.8 schema support for binaries
Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit 98abe07646)
2020-04-08 17:16:25 +02:00
Anca Iordache
15bb3eeb39 add warning when max_replicas_per_node limits scale
Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit 79fe7ca997)
2020-04-08 17:16:19 +02:00
Anca Iordache
5d34f12f07 set min engine version needed for v38 schema support
Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit 02d8e9ee14)
2020-04-08 17:16:13 +02:00
Anca Iordache
82873d2b93 update api version for 3.8
Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit d9b0fabd9b)
2020-04-08 17:16:08 +02:00
Anca Iordache
0ab351d71e test update - remove 'placement' from unsupported fields
Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit 09c80ce49b)
2020-04-08 17:16:01 +02:00
Anca Iordache
fe8326619f Add v3.8 schema support
- service scale bounded by 'max_replicas_per_node' field

Signed-off-by: Anca Iordache <anca.iordache@docker.com>
(cherry picked from commit 391e5a6bc2)
2020-04-08 17:15:38 +02:00
Ulysses Souza
8d51620a78 Bump version to 1.25.4
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-02-03 09:53:27 +01:00
Ulysses Souza
36a5d4d401 Bump version to 1.25.4-rc2
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-31 10:53:59 +01:00
Ulysses Souza
f2a4f31a6d Remove None entries on execute command
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-31 10:52:51 +01:00
Ulysses Souza
c1a5734cad Bump version to 1.25.4-rc1
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-30 16:05:12 +01:00
Ulysses Souza
a32696ee1c Force MacOS SDK version to "10.11"
This is due to the fact that the new CI machines are on 10.14

Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-30 15:56:25 +01:00
Ulysses Souza
d4d1b42bea Update version to 1.25.3
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-23 14:25:58 +01:00
Nicolas De Loof
ef1a9e9423 Force sha256 file to be ASCII encoded
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-23 14:17:58 +01:00
Ulysses Souza
f2d8c610f1 Enforce Python37 in the creation of virtualenv
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-23 10:39:31 +01:00
Ulysses Souza
698e2846a8 Update version to 1.25.2
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-20 17:37:52 +01:00
Ulysses Souza
be4b7b559d Update release version for 1.25.2-rc2
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-20 15:25:13 +01:00
Ulysses Souza
ebc56c5ade Merge pull request #7133 from docker/jenkins
Automate release process
2020-01-20 15:23:59 +01:00
Ulysses Souza
6286beb321 Merge pull request #7152 from docker/bump-pyinstaller
Bump pyinstaller to 3.6
2020-01-17 18:33:24 +01:00
Ulysses Souza
f0e5926ea7 Merge pull request #6950 from benthorner/master
Add "--attach-dependencies" to command "up" for attaching to dependencies
2020-01-16 18:10:25 +01:00
Ben Thorner
a6b602d086 Support attaching to dependencies on up
When using the 'up' command, only services listed as arguments are
attached to, which can be very different to the 'no argument' case
if a service has many and deep dependencies:

   - It's not clear when dependencies have failed to start. Have to run
'compose ps' separately to find out.
   - It's not clear when dependencies are erroring. Have to run 'compose
logs' separately to find out.

With a simple setup, it's possible to work around theses issue by
using the 'up' command without arguments. But when there are lots of
'top-level' services, with common dependencies, in a single config,
using 'up' without arguments isn't practical due to resource limits
and the sheer volume of output from other services.

This introduces a new '--attach-dependencies' flag to optionally attach
dependent containers as part of the 'up' command. This makes their logs
visible in the output, alongside the listed services. It also means we
benefit from the '--abort-on-container-exit' behaviour when dependencies
fail to start, giving more visibility of the failure.

Signed-off-by: Ben Thorner <ben.thorner@digital.cabinet-office.gov.uk>
2020-01-16 13:41:54 +00:00
Ulysses Souza
387f5e4c96 Bump pyinstaller to 3.6
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-16 13:46:47 +01:00
yukihira1992
53d00f7677 Refactored mutable default values.
Signed-off-by: yukihira1992 <ykhr0130@gmail.com>
2020-01-15 11:16:24 +01:00
Christopher Crone
a2cdffeeee Bump Linux dependencies
- Alpine 3.10.3
- Debian Stretch 20191118
- Python 3.7.5

Signed-off-by: Christopher Crone <christopher.crone@docker.com>
2020-01-15 10:54:57 +01:00
Christopher Crone
a92a8eb508 Bump macOS dependencies
- Python 3.7.5
- OpenSSL 1.1.1d

Signed-off-by: Christopher Crone <christopher.crone@docker.com>
2020-01-15 10:54:57 +01:00
Ulysses Souza
d1ef7c41aa Merge pull request #7140 from docker/dependabot/pip/cached-property-1.5.1
Bump cached-property from 1.3.0 to 1.5.1
2020-01-14 19:50:31 +01:00
Ulysses Souza
78dc92246f Merge pull request #7142 from docker/dependabot/pip/ipaddress-1.0.23
Bump ipaddress from 1.0.18 to 1.0.23
2020-01-14 18:51:15 +01:00
dependabot-preview[bot]
dafece4ae5 Bump cached-property from 1.3.0 to 1.5.1
Bumps [cached-property](https://github.com/pydanny/cached-property) from 1.3.0 to 1.5.1.
- [Release notes](https://github.com/pydanny/cached-property/releases)
- [Changelog](https://github.com/pydanny/cached-property/blob/master/HISTORY.rst)
- [Commits](https://github.com/pydanny/cached-property/compare/1.3.0...1.5.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-14 17:26:05 +00:00
Ulysses Souza
f15e54ab1b Merge pull request #7090 from docker/dependabot/pip/urllib3-1.25.7
Bump urllib3 from 1.24.2 to 1.25.7
2020-01-14 18:25:49 +01:00
Ulysses Souza
0e36e9f3eb Merge pull request #7141 from docker/dependabot/pip/coverage-5.0.3
Bump coverage from 4.5.4 to 5.0.3
2020-01-14 18:25:20 +01:00
Ulysses Souza
71e166e3bd Merge pull request #7139 from docker/dependabot/pip/certifi-2019.11.28
Bump certifi from 2017.4.17 to 2019.11.28
2020-01-14 18:24:58 +01:00
dependabot-preview[bot]
120a7b1b06 Bump ipaddress from 1.0.18 to 1.0.23
Bumps [ipaddress](https://github.com/phihag/ipaddress) from 1.0.18 to 1.0.23.
- [Release notes](https://github.com/phihag/ipaddress/releases)
- [Commits](https://github.com/phihag/ipaddress/compare/v1.0.18...v1.0.23)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-14 17:23:07 +00:00
Ulysses Souza
4b332453db Merge pull request #7144 from docker/dependabot/pip/paramiko-2.7.1
Bump paramiko from 2.6.0 to 2.7.1
2020-01-14 18:21:39 +01:00
Ulysses Souza
d87e19c14b Merge pull request #7027 from kiniou/compatibility-option-from-env
Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable
2020-01-13 19:28:48 +01:00
Kevin Roy
093cc2c089 Allow setting compatibility options from environment
Signed-off-by: Kevin Roy <kiniou@gmail.com>
2020-01-13 14:53:03 +01:00
dependabot-preview[bot]
661afb4003 Bump paramiko from 2.6.0 to 2.7.1
Bumps [paramiko](https://github.com/paramiko/paramiko) from 2.6.0 to 2.7.1.
- [Release notes](https://github.com/paramiko/paramiko/releases)
- [Changelog](https://github.com/paramiko/paramiko/blob/master/NEWS)
- [Commits](https://github.com/paramiko/paramiko/compare/2.6.0...2.7.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-13 13:34:32 +00:00
dependabot-preview[bot]
2cdd2f626b Bump coverage from 4.5.4 to 5.0.3
Bumps [coverage](https://github.com/nedbat/coveragepy) from 4.5.4 to 5.0.3.
- [Release notes](https://github.com/nedbat/coveragepy/releases)
- [Changelog](https://github.com/nedbat/coveragepy/blob/master/CHANGES.rst)
- [Commits](https://github.com/nedbat/coveragepy/compare/coverage-4.5.4...coverage-5.0.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-13 13:33:13 +00:00
dependabot-preview[bot]
707a340304 Bump certifi from 2017.4.17 to 2019.11.28
Bumps [certifi](https://github.com/certifi/python-certifi) from 2017.4.17 to 2019.11.28.
- [Release notes](https://github.com/certifi/python-certifi/releases)
- [Commits](https://github.com/certifi/python-certifi/compare/2017.04.17...2019.11.28)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-13 13:32:31 +00:00
Ulysses Souza
f1cfd93c8f Merge pull request #7096 from docker/dependabot/pip/pytest-5.3.2
Bump pytest from 3.6.3 to 5.3.2
2020-01-10 17:47:09 +01:00
dependabot-preview[bot]
3ea84fd9bc Bump pytest from 3.6.3 to 5.3.2
Bumps [pytest](https://github.com/pytest-dev/pytest) from 3.6.3 to 5.3.2.
- [Release notes](https://github.com/pytest-dev/pytest/releases)
- [Changelog](https://github.com/pytest-dev/pytest/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/pytest-dev/pytest/compare/3.6.3...5.3.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-10 16:10:59 +00:00
Ulysses Souza
2cb1b4bd5b Merge pull request #6873 from frenzymadness/pytest_compatibility
Bump Pytest and add refactor compatibility with new version
2020-01-10 17:09:33 +01:00
Lumir Balhar
a436fb953c Remove indentation from test YAML
Signed-off-by: Lumir Balhar <lbalhar@redhat.com>
2020-01-10 08:41:11 +01:00
Nicolas De Loof
b2e9b83d46 update public CI so we run tests on same combinations of python+docker
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-09 16:15:34 +01:00
Nicolas De Loof
7ca5973a71 run release on tag by Jenkinsfile
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-09 16:11:03 +01:00
Ulysses Souza
26f1aeff15 Merge pull request #7054 from docker/dependabot/pip/jsonschema-3.2.0
Bump jsonschema from 3.0.1 to 3.2.0
2020-01-09 15:30:05 +01:00
Sergey Fursov
c818bfc62c support PyYAML up to 5.x version
Signed-off-by: Sergey Fursov <geyser85@gmail.com>
2020-01-09 12:30:51 +01:00
Lumir Balhar
73cc89c15f Use stdlib modules instead of deprecated pytest fixtures
Signed-off-by: Lumír Balhar <lbalhar@redhat.com>
2020-01-09 07:01:44 +01:00
Lumir Balhar
60458c8ae7 Implement custom context manager for changing CWD
Signed-off-by: Lumír Balhar <lbalhar@redhat.com>
2020-01-09 06:55:28 +01:00
Lumir Balhar
fb14f41ddb Move to the latest pytest versions for Python 2 and 3
Signed-off-by: Lumír Balhar <lbalhar@redhat.com>
2020-01-09 06:55:28 +01:00
Sebastiaan van Stijn
33eeef41ab Remove "bundle" subcommand and support for DAB files
Deploying stacks using the "Docker Application Bundle" (`.dab`) file
format was introduced as an experimental feature in Docker 1.13 /
17.03, but superseded by support for Docker Compose files in the CLI.

With no development being done on this feature, and no active use of the file
format, support for the DAB file format and the top-level `docker deploy` command
(hidden by default in 19.03), will be removed from the CLI, in favour of
`docker stack deploy` using compose files.

This patch removes the `docker-compose bundle` subcommand from Docker Compose,
which was used to convert compose files into DAB files (and given the above,
will no longer be needed).

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-01-08 16:42:49 +01:00
dependabot-preview[bot]
4ace98acbe Bump jsonschema from 3.0.1 to 3.2.0
Bumps [jsonschema](https://github.com/Julian/jsonschema) from 3.0.1 to 3.2.0.
- [Release notes](https://github.com/Julian/jsonschema/releases)
- [Changelog](https://github.com/Julian/jsonschema/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/Julian/jsonschema/compare/v3.0.1...v3.2.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-08 15:01:58 +00:00
Ulysses Souza
36790fc0e8 Merge pull request #7058 from docker/dependabot/pip/idna-2.8
Bump idna from 2.5 to 2.8
2020-01-08 16:00:45 +01:00
Ulysses Souza
23d663a84e Merge pull request #7059 from docker/dependabot/pip/pysocks-1.7.1
Bump pysocks from 1.6.7 to 1.7.1
2020-01-08 15:58:00 +01:00
dependabot-preview[bot]
81e3566ebd Bump urllib3 from 1.24.2 to 1.25.7
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.24.2 to 1.25.7.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/master/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.24.2...1.25.7)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-08 14:42:12 +00:00
Ulysses Souza
bd0ec191bd Merge pull request #7092 from docker/dependabot/pip/colorama-0.4.3
Bump colorama from 0.4.0 to 0.4.3
2020-01-08 15:41:40 +01:00
Ulysses Souza
d27ecf694c Merge pull request #7111 from docker/dependabot/pip/websocket-client-0.57.0
Bump websocket-client from 0.32.0 to 0.57.0
2020-01-08 15:40:54 +01:00
Ulysses Souza
bc90b7badf Merge pull request #7077 from docker/dependabot/pip/ddt-1.2.2
Bump ddt from 1.2.0 to 1.2.2
2020-01-08 15:35:38 +01:00
Ulysses Souza
702dd9406c Merge pull request #7093 from ulyssessouza/warn-invalid-version
Validate version format on formats 2+
2020-01-08 10:47:01 +01:00
Ulysses Souza
704ee56553 Merge pull request #7122 from ndeloof/shutil
Don't adjust output on terminal width when piped into another command
2020-01-08 10:46:03 +01:00
dependabot-preview[bot]
f2f6b30350 Bump websocket-client from 0.32.0 to 0.57.0
Bumps [websocket-client](https://github.com/websocket-client/websocket-client) from 0.32.0 to 0.57.0.
- [Release notes](https://github.com/websocket-client/websocket-client/releases)
- [Changelog](https://github.com/websocket-client/websocket-client/blob/master/ChangeLog)
- [Commits](https://github.com/websocket-client/websocket-client/compare/v0.32.0...v0.57.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-08 09:17:12 +00:00
dependabot-preview[bot]
75c45c27df Bump pysocks from 1.6.7 to 1.7.1
Bumps [pysocks](https://github.com/Anorov/PySocks) from 1.6.7 to 1.7.1.
- [Release notes](https://github.com/Anorov/PySocks/releases)
- [Changelog](https://github.com/Anorov/PySocks/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Anorov/PySocks/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-08 09:17:04 +00:00
Nicolas De Loof
31396786ba publish package on PyPI
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
d6c13b69c3 compute sha256sum
windows nodes don't have openssl installed:'(

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
0e826efee5 attempt to fix windows build
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
1af3852277 Generate changelog
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
9c6db546e8 Remove Circle-CI and AppVeyor config files
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
417d72ea3d Compute checksum
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
bdb11849b1 Use .Jenkinsfile extension for IDE support
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
da55677154 Release pipeline
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
7be66baaa7 TAG and BUILD_TAG are obsolete
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
6b0acc9ecb tests don't run in parallel
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
8859ab0d66 Use gotemplate formater to extract specific data
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
9478725a70 Fix tested docker releases in Pipeline
This allows Engine team to trigger a compose build by pushing a PR
changing the `dockerVersions` variable to test Release Candidates

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
2955f48468 Get docker versions using a plain command line
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
644c55c4f7 Use declarative syntax when possible
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
912d90832c Use a simple script to get docker-ce releases
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
c5c287db5c We don't use FOSSA anymore
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
dd889b990b Prepare drop of python 2.x support
see https://github.com/docker/compose/issues/6890

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 10:15:48 +01:00
Nicolas De Loof
3df4ba1544 Assume infinite terminal width when not running in a terminal
Close https://github.com/docker/compose/issues/7119

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
2020-01-08 08:40:18 +01:00
ulyssessouza
7f49bbb998 Validate version format on formats 2+
Signed-off-by: ulyssessouza <ulyssessouza@gmail.com>
2020-01-07 18:37:47 +01:00
Ulysses Souza
9f373b0b86 Merge pull request #7128 from docker/post-release-1.25.1
Post release 1.25.1
2020-01-07 17:20:41 +01:00
Ulysses Souza
67cce913a6 Set dev version to 1.26.0dev after releasing 1.25.1
Signed-off-by: Ulysses Souza <ulyssessouza@gmail.com>
2020-01-07 16:44:42 +01:00
dependabot-preview[bot]
025002260b Bump colorama from 0.4.0 to 0.4.3
Bumps [colorama](https://github.com/tartley/colorama) from 0.4.0 to 0.4.3.
- [Release notes](https://github.com/tartley/colorama/releases)
- [Changelog](https://github.com/tartley/colorama/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/tartley/colorama/compare/0.4.0...0.4.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-12-09 13:31:00 +00:00
dependabot-preview[bot]
e6e9263260 Bump ddt from 1.2.0 to 1.2.2
Bumps [ddt](https://github.com/datadriventests/ddt) from 1.2.0 to 1.2.2.
- [Release notes](https://github.com/datadriventests/ddt/releases)
- [Commits](https://github.com/datadriventests/ddt/compare/1.2.0...1.2.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-12-04 07:10:30 +00:00
dependabot-preview[bot]
101ee1cd62 Bump idna from 2.5 to 2.8
Bumps [idna](https://github.com/kjd/idna) from 2.5 to 2.8.
- [Release notes](https://github.com/kjd/idna/releases)
- [Changelog](https://github.com/kjd/idna/blob/master/HISTORY.rst)
- [Commits](https://github.com/kjd/idna/compare/v2.5...v2.8)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-25 13:27:05 +00:00
59 changed files with 2757 additions and 2727 deletions

View File

@@ -1,66 +0,0 @@
version: 2
jobs:
test:
macos:
xcode: "9.4.1"
steps:
- checkout
- run:
name: setup script
command: ./script/setup/osx
- run:
name: install tox
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
- run:
name: unit tests
command: tox -e py27,py37 -- tests/unit
build-osx-binary:
macos:
xcode: "9.4.1"
steps:
- checkout
- run:
name: upgrade python tools
command: sudo pip install --upgrade pip virtualenv==16.2.0
- run:
name: setup script
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
- run:
name: build script
command: ./script/build/osx
- store_artifacts:
path: dist/docker-compose-Darwin-x86_64
destination: docker-compose-Darwin-x86_64
- store_artifacts:
path: dist/docker-compose-Darwin-x86_64.tgz
destination: docker-compose-Darwin-x86_64.tgz
- deploy:
name: Deploy binary to bintray
command: |
OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
build-linux-binary:
machine:
enabled: true
steps:
- checkout
- run:
name: build Linux binary
command: ./script/build/linux
- store_artifacts:
path: dist/docker-compose-Linux-x86_64
destination: docker-compose-Linux-x86_64
- deploy:
name: Deploy binary to bintray
command: |
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
workflows:
version: 2
all:
jobs:
- test
- build-linux-binary
- build-osx-binary

View File

@@ -11,3 +11,4 @@ docs/_site
.tox
**/__pycache__
*.pyc
Jenkinsfile

View File

@@ -1,6 +1,68 @@
Change log
==========
1.25.5 (2020-02-04)
-------------------
### Features
- Bump OpenSSL from 1.1.1d to 1.1.1f
- Add 3.8 compose version
1.25.4 (2020-01-23)
-------------------
### Bugfixes
- Fix CI script to enforce the minimal MacOS version to 10.11
- Fix docker-compose exec for keys with no value
1.25.3 (2020-01-23)
-------------------
### Bugfixes
- Fix CI script to enforce the compilation with Python3
- Fix binary's sha256 in the release page
1.25.2 (2020-01-20)
-------------------
### Features
- Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable
- Bump PyInstaller from 3.5 to 3.6
- Bump pysocks from 1.6.7 to 1.7.1
- Bump websocket-client from 0.32.0 to 0.57.0
- Bump urllib3 from 1.24.2 to 1.25.7
- Bump jsonschema from 3.0.1 to 3.2.0
- Bump PyYAML from 4.2b1 to 5.3
- Bump certifi from 2017.4.17 to 2019.11.28
- Bump coverage from 4.5.4 to 5.0.3
- Bump paramiko from 2.6.0 to 2.7.1
- Bump cached-property from 1.3.0 to 1.5.1
- Bump minor Linux and MacOSX dependencies
### Bugfixes
- Validate version format on formats 2+
- Assume infinite terminal width when not running in a terminal
1.25.1 (2020-01-06)
-------------------

View File

@@ -1,9 +1,9 @@
ARG DOCKER_VERSION=18.09.7
ARG PYTHON_VERSION=3.7.4
ARG DOCKER_VERSION=19.03.5
ARG PYTHON_VERSION=3.7.5
ARG BUILD_ALPINE_VERSION=3.10
ARG BUILD_DEBIAN_VERSION=slim-stretch
ARG RUNTIME_ALPINE_VERSION=3.10.1
ARG RUNTIME_DEBIAN_VERSION=stretch-20190812-slim
ARG RUNTIME_ALPINE_VERSION=3.10.3
ARG RUNTIME_DEBIAN_VERSION=stretch-20191118-slim
ARG BUILD_PLATFORM=alpine

195
Jenkinsfile vendored
View File

@@ -1,95 +1,112 @@
#!groovy
def buildImage = { String baseImage ->
def image
wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
stage("build image for \"${baseImage}\"") {
checkout(scm)
def imageName = "dockerbuildbot/compose:${baseImage}-${gitCommit()}"
image = docker.image(imageName)
try {
image.pull()
} catch (Exception exc) {
sh """GIT_COMMIT=\$(script/build/write-git-sha) && \\
docker build -t ${imageName} \\
--target build \\
--build-arg BUILD_PLATFORM="${baseImage}" \\
--build-arg GIT_COMMIT="${GIT_COMMIT}" \\
.\\
"""
sh "docker push ${imageName}"
echo "${imageName}"
return imageName
}
}
}
echo "image.id: ${image.id}"
return image.id
}
def get_versions = { String imageId, int number ->
def docker_versions
wrappedNode(label: "ubuntu && amd64 && !zfs") {
def result = sh(script: """docker run --rm \\
--entrypoint=/code/.tox/py27/bin/python \\
${imageId} \\
/code/script/test/versions.py -n ${number} docker/docker-ce recent
""", returnStdout: true
)
docker_versions = result.split()
}
return docker_versions
}
def runTests = { Map settings ->
def dockerVersions = settings.get("dockerVersions", null)
def pythonVersions = settings.get("pythonVersions", null)
def baseImage = settings.get("baseImage", null)
def imageName = settings.get("image", null)
if (!pythonVersions) {
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py37')`")
}
if (!dockerVersions) {
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
}
{ ->
wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
stage("test python=${pythonVersions} / docker=${dockerVersions} / baseImage=${baseImage}") {
checkout(scm)
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
echo "Using local system's storage driver: ${storageDriver}"
sh """docker run \\
-t \\
--rm \\
--privileged \\
--volume="\$(pwd)/.git:/code/.git" \\
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
-e "TAG=${imageName}" \\
-e "STORAGE_DRIVER=${storageDriver}" \\
-e "DOCKER_VERSIONS=${dockerVersions}" \\
-e "BUILD_NUMBER=\$BUILD_TAG" \\
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
--entrypoint="script/test/ci" \\
${imageName} \\
--verbose
"""
}
}
}
}
def testMatrix = [failFast: true]
def dockerVersions = ['19.03.5']
def baseImages = ['alpine', 'debian']
def pythonVersions = ['py27', 'py37']
baseImages.each { baseImage ->
def imageName = buildImage(baseImage)
get_versions(imageName, 2).each { dockerVersion ->
pythonVersions.each { pyVersion ->
testMatrix["${baseImage}_${dockerVersion}_${pyVersion}"] = runTests([baseImage: baseImage, image: imageName, dockerVersions: dockerVersion, pythonVersions: pyVersion])
def pythonVersions = ['py37']
pipeline {
agent none
options {
skipDefaultCheckout(true)
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
stages {
stage('Build test images') {
// TODO use declarative 1.5.0 `matrix` once available on CI
parallel {
stage('alpine') {
agent {
label 'ubuntu && amd64 && !zfs'
}
steps {
buildImage('alpine')
}
}
stage('debian') {
agent {
label 'ubuntu && amd64 && !zfs'
}
steps {
buildImage('debian')
}
}
}
}
stage('Test') {
steps {
// TODO use declarative 1.5.0 `matrix` once available on CI
script {
def testMatrix = [:]
baseImages.each { baseImage ->
dockerVersions.each { dockerVersion ->
pythonVersions.each { pythonVersion ->
testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage)
}
}
}
parallel testMatrix
}
}
}
}
}
}
parallel(testMatrix)
def buildImage(baseImage) {
def scmvar = checkout(scm)
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
image = docker.image(imageName)
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
try {
image.pull()
} catch (Exception exc) {
ansiColor('xterm') {
sh """docker build -t ${imageName} \\
--target build \\
--build-arg BUILD_PLATFORM="${baseImage}" \\
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\
.\\
"""
sh "docker push ${imageName}"
}
echo "${imageName}"
return imageName
}
}
}
def runTests(dockerVersion, pythonVersion, baseImage) {
return {
stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") {
node("ubuntu && amd64 && !zfs") {
def scmvar = checkout(scm)
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim()
echo "Using local system's storage driver: ${storageDriver}"
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
sh """docker run \\
-t \\
--rm \\
--privileged \\
--volume="\$(pwd)/.git:/code/.git" \\
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
-e "TAG=${imageName}" \\
-e "STORAGE_DRIVER=${storageDriver}" \\
-e "DOCKER_VERSIONS=${dockerVersion}" \\
-e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\
-e "PY_TEST_VERSIONS=${pythonVersion}" \\
--entrypoint="script/test/ci" \\
${imageName} \\
--verbose
"""
}
}
}
}
}

304
Release.Jenkinsfile Normal file
View File

@@ -0,0 +1,304 @@
#!groovy
def dockerVersions = ['19.03.5', '18.09.9']
def baseImages = ['alpine', 'debian']
def pythonVersions = ['py37']
pipeline {
agent none
options {
skipDefaultCheckout(true)
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
stages {
stage('Build test images') {
// TODO use declarative 1.5.0 `matrix` once available on CI
parallel {
stage('alpine') {
agent {
label 'linux'
}
steps {
buildImage('alpine')
}
}
stage('debian') {
agent {
label 'linux'
}
steps {
buildImage('debian')
}
}
}
}
stage('Test') {
steps {
// TODO use declarative 1.5.0 `matrix` once available on CI
script {
def testMatrix = [:]
baseImages.each { baseImage ->
dockerVersions.each { dockerVersion ->
pythonVersions.each { pythonVersion ->
testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage)
}
}
}
parallel testMatrix
}
}
}
stage('Generate Changelog') {
agent {
label 'linux'
}
steps {
checkout scm
withCredentials([string(credentialsId: 'github-compose-release-test-token', variable: 'GITHUB_TOKEN')]) {
sh "./script/release/generate_changelog.sh"
}
archiveArtifacts artifacts: 'CHANGELOG.md'
stash( name: "changelog", includes: 'CHANGELOG.md' )
}
}
stage('Package') {
parallel {
stage('macosx binary') {
agent {
label 'mac-python'
}
environment {
DEPLOYMENT_TARGET="10.11"
}
steps {
checkout scm
sh './script/setup/osx'
sh 'tox -e py37 -- tests/unit'
sh './script/build/osx'
dir ('dist') {
checksum('docker-compose-Darwin-x86_64')
checksum('docker-compose-Darwin-x86_64.tgz')
}
archiveArtifacts artifacts: 'dist/*', fingerprint: true
dir("dist") {
stash name: "bin-darwin"
}
}
}
stage('linux binary') {
agent {
label 'linux'
}
steps {
checkout scm
sh ' ./script/build/linux'
dir ('dist') {
checksum('docker-compose-Linux-x86_64')
}
archiveArtifacts artifacts: 'dist/*', fingerprint: true
dir("dist") {
stash name: "bin-linux"
}
}
}
stage('windows binary') {
agent {
label 'windows-python'
}
environment {
PATH = "$PATH;C:\\Python37;C:\\Python37\\Scripts"
}
steps {
checkout scm
bat 'tox.exe -e py37 -- tests/unit'
powershell '.\\script\\build\\windows.ps1'
dir ('dist') {
checksum('docker-compose-Windows-x86_64.exe')
}
archiveArtifacts artifacts: 'dist/*', fingerprint: true
dir("dist") {
stash name: "bin-win"
}
}
}
stage('alpine image') {
agent {
label 'linux'
}
steps {
buildRuntimeImage('alpine')
}
}
stage('debian image') {
agent {
label 'linux'
}
steps {
buildRuntimeImage('debian')
}
}
}
}
stage('Release') {
when {
buildingTag()
}
parallel {
stage('Pushing images') {
agent {
label 'linux'
}
steps {
pushRuntimeImage('alpine')
pushRuntimeImage('debian')
}
}
stage('Creating Github Release') {
agent {
label 'linux'
}
environment {
GITHUB_TOKEN = credentials('github-release-token')
}
steps {
checkout scm
sh 'mkdir -p dist'
dir("dist") {
unstash "bin-darwin"
unstash "bin-linux"
unstash "bin-win"
unstash "changelog"
sh("""
curl -SfL https://github.com/github/hub/releases/download/v2.13.0/hub-linux-amd64-2.13.0.tgz | tar xzv --wildcards 'hub-*/bin/hub' --strip=2
./hub release create --draft --prerelease=${env.TAG_NAME !=~ /v[0-9\.]+/} \\
-a docker-compose-Darwin-x86_64 \\
-a docker-compose-Darwin-x86_64.sha256 \\
-a docker-compose-Darwin-x86_64.tgz \\
-a docker-compose-Darwin-x86_64.tgz.sha256 \\
-a docker-compose-Linux-x86_64 \\
-a docker-compose-Linux-x86_64.sha256 \\
-a docker-compose-Windows-x86_64.exe \\
-a docker-compose-Windows-x86_64.exe.sha256 \\
-a ../script/run/run.sh \\
-F CHANGELOG.md \${TAG_NAME}
""")
}
}
}
stage('Publishing Python packages') {
agent {
label 'linux'
}
environment {
PYPIRC = credentials('pypirc-docker-dsg-cibot')
}
steps {
checkout scm
sh """
rm -rf build/ dist/
pip install wheel
python setup.py sdist bdist_wheel
pip install twine
~/.local/bin/twine upload --config-file ${PYPIRC} ./dist/docker-compose-*.tar.gz ./dist/docker_compose-*-py2.py3-none-any.whl
"""
}
}
}
}
}
}
def buildImage(baseImage) {
def scmvar = checkout(scm)
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
image = docker.image(imageName)
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
try {
image.pull()
} catch (Exception exc) {
ansiColor('xterm') {
sh """docker build -t ${imageName} \\
--target build \\
--build-arg BUILD_PLATFORM="${baseImage}" \\
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\
.\\
"""
sh "docker push ${imageName}"
}
echo "${imageName}"
return imageName
}
}
}
def runTests(dockerVersion, pythonVersion, baseImage) {
return {
stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") {
node("linux") {
def scmvar = checkout(scm)
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim()
echo "Using local system's storage driver: ${storageDriver}"
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
sh """docker run \\
-t \\
--rm \\
--privileged \\
--volume="\$(pwd)/.git:/code/.git" \\
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
-e "TAG=${imageName}" \\
-e "STORAGE_DRIVER=${storageDriver}" \\
-e "DOCKER_VERSIONS=${dockerVersion}" \\
-e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\
-e "PY_TEST_VERSIONS=${pythonVersion}" \\
--entrypoint="script/test/ci" \\
${imageName} \\
--verbose
"""
}
}
}
}
}
def buildRuntimeImage(baseImage) {
scmvar = checkout scm
def imageName = "docker/compose:${baseImage}-${env.BRANCH_NAME}"
ansiColor('xterm') {
sh """docker build -t ${imageName} \\
--build-arg BUILD_PLATFORM="${baseImage}" \\
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT.take(7)}" \\
.
"""
}
sh "mkdir -p dist"
sh "docker save ${imageName} -o dist/docker-compose-${baseImage}.tar"
stash name: "compose-${baseImage}", includes: "dist/docker-compose-${baseImage}.tar"
}
def pushRuntimeImage(baseImage) {
unstash "compose-${baseImage}"
sh "docker load -i dist/docker-compose-${baseImage}.tar"
withDockerRegistry(credentialsId: 'dockerhub-dockerdsgcibot') {
sh "docker push docker/compose:${baseImage}-${env.TAG_NAME}"
if (baseImage == "alpine" && env.TAG_NAME != null) {
sh "docker tag docker/compose:alpine-${env.TAG_NAME} docker/compose:${env.TAG_NAME}"
sh "docker push docker/compose:${env.TAG_NAME}"
}
}
}
def checksum(filepath) {
if (isUnix()) {
sh "openssl sha256 -r -out ${filepath}.sha256 ${filepath}"
} else {
powershell "(Get-FileHash -Path ${filepath} -Algorithm SHA256 | % hash).ToLower() + ' *${filepath}' | Out-File -encoding ascii ${filepath}.sha256"
}
}

View File

@@ -1,24 +0,0 @@
version: '{branch}-{build}'
install:
- "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
- "python --version"
- "pip install tox==2.9.1 virtualenv==16.2.0"
# Build the binary after tests
build: false
test_script:
- "tox -e py27,py37 -- tests/unit"
- ps: ".\\script\\build\\windows.ps1"
artifacts:
- path: .\dist\docker-compose-Windows-x86_64.exe
name: "Compose Windows binary"
deploy:
- provider: Environment
name: master-builds
on:
branch: master

View File

@@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
__version__ = '1.25.1'
__version__ = '1.25.5'

View File

@@ -1,275 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import six
from docker.utils import split_command
from docker.utils.ports import split_port
from .cli.errors import UserError
from .config.serialize import denormalize_config
from .network import get_network_defs_for_service
from .service import format_environment
from .service import NoSuchImageError
from .service import parse_repository_tag
log = logging.getLogger(__name__)
SERVICE_KEYS = {
'working_dir': 'WorkingDir',
'user': 'User',
'labels': 'Labels',
}
IGNORED_KEYS = {'build'}
SUPPORTED_KEYS = {
'image',
'ports',
'expose',
'networks',
'command',
'environment',
'entrypoint',
} | set(SERVICE_KEYS)
VERSION = '0.1'
class NeedsPush(Exception):
def __init__(self, image_name):
self.image_name = image_name
class NeedsPull(Exception):
def __init__(self, image_name, service_name):
self.image_name = image_name
self.service_name = service_name
class MissingDigests(Exception):
def __init__(self, needs_push, needs_pull):
self.needs_push = needs_push
self.needs_pull = needs_pull
def serialize_bundle(config, image_digests):
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
def get_image_digests(project, allow_push=False):
digests = {}
needs_push = set()
needs_pull = set()
for service in project.services:
try:
digests[service.name] = get_image_digest(
service,
allow_push=allow_push,
)
except NeedsPush as e:
needs_push.add(e.image_name)
except NeedsPull as e:
needs_pull.add(e.service_name)
if needs_push or needs_pull:
raise MissingDigests(needs_push, needs_pull)
return digests
def get_image_digest(service, allow_push=False):
if 'image' not in service.options:
raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is "
"required to generate a proper image digest for the bundle. Specify "
"an image repo and tag with the 'image' option.".format(s=service))
_, _, separator = parse_repository_tag(service.options['image'])
# Compose file already uses a digest, no lookup required
if separator == '@':
return service.options['image']
digest = get_digest(service)
if digest:
return digest
if 'build' not in service.options:
raise NeedsPull(service.image_name, service.name)
if not allow_push:
raise NeedsPush(service.image_name)
return push_image(service)
def get_digest(service):
digest = None
try:
image = service.image()
# TODO: pick a digest based on the image tag if there are multiple
# digests
if image['RepoDigests']:
digest = image['RepoDigests'][0]
except NoSuchImageError:
try:
# Fetch the image digest from the registry
distribution = service.get_image_registry_data()
if distribution['Descriptor']['digest']:
digest = '{image_name}@{digest}'.format(
image_name=service.image_name,
digest=distribution['Descriptor']['digest']
)
except NoSuchImageError:
raise UserError(
"Digest not found for service '{service}'. "
"Repository does not exist or may require 'docker login'"
.format(service=service.name))
return digest
def push_image(service):
try:
digest = service.push()
except Exception:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
"registry.".format(s=service))
raise
if not digest:
raise ValueError("Failed to get digest for %s" % service.name)
repo, _, _ = parse_repository_tag(service.options['image'])
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
# only do this if RepoDigests isn't already populated
image = service.image()
if not image['RepoDigests']:
# Pull by digest so that image['RepoDigests'] is populated for next time
# and we don't have to pull/push again
service.client.pull(identifier)
log.info("Stored digest for {}".format(service.image_name))
return identifier
def to_bundle(config, image_digests):
if config.networks:
log.warning("Unsupported top level key 'networks' - ignoring")
if config.volumes:
log.warning("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config)
return {
'Version': VERSION,
'Services': {
name: convert_service_to_bundle(
name,
service_dict,
image_digests[name],
)
for name, service_dict in config['services'].items()
},
}
def convert_service_to_bundle(name, service_dict, image_digest):
container_config = {'Image': image_digest}
for key, value in service_dict.items():
if key in IGNORED_KEYS:
continue
if key not in SUPPORTED_KEYS:
log.warning("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue
if key == 'environment':
container_config['Env'] = format_environment({
envkey: envvalue for envkey, envvalue in value.items()
if envvalue
})
continue
if key in SERVICE_KEYS:
container_config[SERVICE_KEYS[key]] = value
continue
set_command_and_args(
container_config,
service_dict.get('entrypoint', []),
service_dict.get('command', []))
container_config['Networks'] = make_service_networks(name, service_dict)
ports = make_port_specs(service_dict)
if ports:
container_config['Ports'] = ports
return container_config
# See https://github.com/docker/swarmkit/blob/agent/exec/container/container.go#L95
def set_command_and_args(config, entrypoint, command):
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(command, six.string_types):
command = split_command(command)
if entrypoint:
config['Command'] = entrypoint + command
return
if command:
config['Args'] = command
def make_service_networks(name, service_dict):
networks = []
for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys():
log.warning(
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name))
networks.append(network_name)
return networks
def make_port_specs(service_dict):
ports = []
internal_ports = [
internal_port
for port_def in service_dict.get('ports', [])
for internal_port in split_port(port_def)[0]
]
internal_ports += service_dict.get('expose', [])
for internal_port in internal_ports:
spec = make_port_spec(internal_port)
if spec not in ports:
ports.append(spec)
return ports
def make_port_spec(value):
components = six.text_type(value).partition('/')
return {
'Protocol': components[2] or 'tcp',
'Port': int(components[0]),
}

View File

@@ -40,7 +40,8 @@ SILENT_COMMANDS = {
}
def project_from_options(project_dir, options, additional_options={}):
def project_from_options(project_dir, options, additional_options=None):
additional_options = additional_options or {}
override_dir = options.get('--project-directory')
environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or project_dir, environment_file)
@@ -59,7 +60,7 @@ def project_from_options(project_dir, options, additional_options={}):
tls_config=tls_config_from_options(options, environment),
environment=environment,
override_dir=override_dir,
compatibility=options.get('--compatibility'),
compatibility=compatibility_from_options(project_dir, options, environment),
interpolate=(not additional_options.get('--no-interpolate')),
environment_file=environment_file
)
@@ -81,7 +82,8 @@ def set_parallel_limit(environment):
parallel.GlobalLimit.set_global_limit(parallel_limit)
def get_config_from_options(base_dir, options, additional_options={}):
def get_config_from_options(base_dir, options, additional_options=None):
additional_options = additional_options or {}
override_dir = options.get('--project-directory')
environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or base_dir, environment_file)
@@ -90,7 +92,7 @@ def get_config_from_options(base_dir, options, additional_options={}):
)
return config.load(
config.find(base_dir, config_path, environment, override_dir),
options.get('--compatibility'),
compatibility_from_options(config_path, options, environment),
not additional_options.get('--no-interpolate')
)
@@ -198,3 +200,13 @@ def get_project_name(working_dir, project_name=None, environment=None):
return normalize_name(project)
return 'default'
def compatibility_from_options(working_dir, options=None, environment=None):
"""Get compose v3 compatibility from --compatibility option
or from COMPOSE_COMPATIBILITY environment variable."""
compatibility_option = options.get('--compatibility')
compatibility_environment = environment.get_boolean('COMPOSE_COMPATIBILITY')
return compatibility_option or compatibility_environment

View File

@@ -17,7 +17,12 @@ else:
def get_tty_width():
try:
width, _ = get_terminal_size()
# get_terminal_size can't determine the size if compose is piped
# to another command. But in such case it doesn't make sense to
# try format the output by terminal size as this output is consumed
# by another command. So let's pretend we have a huge terminal so
# output is single-lined
width, _ = get_terminal_size(fallback=(999, 0))
return int(width)
except OSError:
return 0

View File

@@ -15,14 +15,12 @@ from distutils.spawn import find_executable
from inspect import getdoc
from operator import attrgetter
import docker
import docker.errors
import docker.utils
from . import errors
from . import signals
from .. import __version__
from ..bundle import get_image_digests
from ..bundle import MissingDigests
from ..bundle import serialize_bundle
from ..config import ConfigurationError
from ..config import parse_environment
from ..config import parse_labels
@@ -34,6 +32,8 @@ from ..const import COMPOSEFILE_V2_2 as V2_2
from ..const import IS_WINDOWS_PLATFORM
from ..errors import StreamParseError
from ..progress_stream import StreamOutputError
from ..project import get_image_digests
from ..project import MissingDigests
from ..project import NoSuchService
from ..project import OneOffFilter
from ..project import ProjectError
@@ -213,7 +213,6 @@ class TopLevelCommand(object):
Commands:
build Build or rebuild services
bundle Generate a Docker bundle from the Compose file
config Validate and view the Compose file
create Create services
down Stop and remove containers, networks, images, and volumes
@@ -304,38 +303,6 @@ class TopLevelCommand(object):
progress=options.get('--progress'),
)
def bundle(self, options):
"""
Generate a Distributed Application Bundle (DAB) from the Compose file.
Images must have digests stored, which requires interaction with a
Docker registry. If digests aren't stored for all images, you can fetch
them with `docker-compose pull` or `docker-compose push`. To push images
automatically when bundling, pass `--push-images`. Only services with
a `build` option specified will have their images pushed.
Usage: bundle [options]
Options:
--push-images Automatically push images for any services
which have a `build` option specified.
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
"""
compose_config = get_config_from_options('.', self.toplevel_options)
output = options["--output"]
if not output:
output = "{}.dab".format(self.project.name)
image_digests = image_digests_for_project(self.project, options['--push-images'])
with open(output, 'w') as f:
f.write(serialize_bundle(compose_config, image_digests))
log.info("Wrote bundle to {}".format(output))
def config(self, options):
"""
Validate and view the Compose file.
@@ -1045,6 +1012,7 @@ class TopLevelCommand(object):
--build Build images before starting containers.
--abort-on-container-exit Stops all containers if any container was
stopped. Incompatible with -d.
--attach-dependencies Attach to dependent containers
-t, --timeout TIMEOUT Use this timeout in seconds for container
shutdown when attached or when containers are
already running. (default: 10)
@@ -1066,16 +1034,18 @@ class TopLevelCommand(object):
remove_orphans = options['--remove-orphans']
detached = options.get('--detach')
no_start = options.get('--no-start')
attach_dependencies = options.get('--attach-dependencies')
if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.")
if detached and (cascade_stop or exit_value_from or attach_dependencies):
raise UserError(
"-d cannot be combined with --abort-on-container-exit or --attach-dependencies.")
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from']
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from', '--attach-dependencies']
for excluded in [x for x in opts if options.get(x) and no_start]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
@@ -1120,7 +1090,10 @@ class TopLevelCommand(object):
if detached or no_start:
return
attached_containers = filter_containers_to_service_names(to_attach, service_names)
attached_containers = filter_attached_containers(
to_attach,
service_names,
attach_dependencies)
log_printer = log_printer_from_project(
self.project,
@@ -1216,12 +1189,10 @@ def timeout_from_opts(options):
return None if timeout is None else int(timeout)
def image_digests_for_project(project, allow_push=False):
def image_digests_for_project(project):
try:
return get_image_digests(
project,
allow_push=allow_push
)
return get_image_digests(project)
except MissingDigests as e:
def list_images(images):
return "\n".join(" {}".format(name) for name in sorted(images))
@@ -1427,8 +1398,8 @@ def log_printer_from_project(
log_args=log_args)
def filter_containers_to_service_names(containers, service_names):
if not service_names:
def filter_attached_containers(containers, service_names, attach_dependencies=False):
if attach_dependencies or not service_names:
return containers
return [
@@ -1495,7 +1466,12 @@ def call_docker(args, dockeropts, environment):
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
return subprocess.call(args, env=environment)
filtered_env = {}
for k, v in environment.items():
if v is not None:
filtered_env[k] = environment[k]
return subprocess.call(args, env=filtered_env)
def parse_scale_args(options):

View File

@@ -5,6 +5,7 @@ import functools
import io
import logging
import os
import re
import string
import sys
from collections import namedtuple
@@ -214,6 +215,12 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
.format(self.filename, VERSION_EXPLANATION)
)
version_pattern = re.compile(r"^[2-9]+(\.\d+)?$")
if not version_pattern.match(version):
raise ConfigurationError(
'Version "{}" in "{}" is invalid.'
.format(version, self.filename))
if version == '2':
return const.COMPOSEFILE_V2_0
@@ -983,12 +990,17 @@ def translate_deploy_keys_to_container_config(service_dict):
deploy_dict = service_dict['deploy']
ignored_keys = [
k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config', 'placement']
k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config']
if k in deploy_dict
]
if 'replicas' in deploy_dict and deploy_dict.get('mode', 'replicated') == 'replicated':
service_dict['scale'] = deploy_dict['replicas']
scale = deploy_dict.get('replicas', 1)
max_replicas = deploy_dict.get('placement', {}).get('max_replicas_per_node', scale)
service_dict['scale'] = min(scale, max_replicas)
if max_replicas < scale:
log.warning("Scale is limited to {} ('max_replicas_per_node' field).".format(
max_replicas))
if 'restart_policy' in deploy_dict:
service_dict['restart'] = {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -41,7 +41,10 @@ COMPOSEFILE_V3_4 = ComposeVersion('3.4')
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
COMPOSEFILE_V3_6 = ComposeVersion('3.6')
COMPOSEFILE_V3_7 = ComposeVersion('3.7')
COMPOSEFILE_V3_8 = ComposeVersion('3.8')
# minimum DOCKER ENGINE API version needed to support
# features for each compose schema version
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
COMPOSEFILE_V2_0: '1.22',
@@ -57,6 +60,7 @@ API_VERSIONS = {
COMPOSEFILE_V3_5: '1.30',
COMPOSEFILE_V3_6: '1.36',
COMPOSEFILE_V3_7: '1.38',
COMPOSEFILE_V3_8: '1.38',
}
API_VERSION_TO_ENGINE_VERSION = {
@@ -74,4 +78,5 @@ API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
API_VERSIONS[COMPOSEFILE_V3_7]: '18.06.0',
API_VERSIONS[COMPOSEFILE_V3_8]: '18.06.0',
}

View File

@@ -16,6 +16,7 @@ from docker.errors import NotFound
from docker.utils import version_lt
from . import parallel
from .cli.errors import UserError
from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
@@ -33,6 +34,7 @@ from .service import ContainerNetworkMode
from .service import ContainerPidMode
from .service import ConvergenceStrategy
from .service import NetworkMode
from .service import NoSuchImageError
from .service import parse_repository_tag
from .service import PidMode
from .service import Service
@@ -42,7 +44,6 @@ from .utils import microseconds_from_time_nano
from .utils import truncate_string
from .volume import ProjectVolumes
log = logging.getLogger(__name__)
@@ -86,10 +87,11 @@ class Project(object):
return labels
@classmethod
def from_config(cls, name, config_data, client, default_platform=None, extra_labels=[]):
def from_config(cls, name, config_data, client, default_platform=None, extra_labels=None):
"""
Construct a Project from a config.Config object.
"""
extra_labels = extra_labels or []
use_networking = (config_data.version and config_data.version != V1)
networks = build_networks(name, config_data, client)
project_networks = ProjectNetworks.from_services(
@@ -381,6 +383,7 @@ class Project(object):
def build_service(service):
service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress)
if parallel_build:
_, errors = parallel.parallel_execute(
services,
@@ -844,6 +847,91 @@ def get_secrets(service, service_secrets, secret_defs):
return secrets
def get_image_digests(project):
digests = {}
needs_push = set()
needs_pull = set()
for service in project.services:
try:
digests[service.name] = get_image_digest(service)
except NeedsPush as e:
needs_push.add(e.image_name)
except NeedsPull as e:
needs_pull.add(e.service_name)
if needs_push or needs_pull:
raise MissingDigests(needs_push, needs_pull)
return digests
def get_image_digest(service):
if 'image' not in service.options:
raise UserError(
"Service '{s.name}' doesn't define an image tag. An image name is "
"required to generate a proper image digest. Specify an image repo "
"and tag with the 'image' option.".format(s=service))
_, _, separator = parse_repository_tag(service.options['image'])
# Compose file already uses a digest, no lookup required
if separator == '@':
return service.options['image']
digest = get_digest(service)
if digest:
return digest
if 'build' not in service.options:
raise NeedsPull(service.image_name, service.name)
raise NeedsPush(service.image_name)
def get_digest(service):
digest = None
try:
image = service.image()
# TODO: pick a digest based on the image tag if there are multiple
# digests
if image['RepoDigests']:
digest = image['RepoDigests'][0]
except NoSuchImageError:
try:
# Fetch the image digest from the registry
distribution = service.get_image_registry_data()
if distribution['Descriptor']['digest']:
digest = '{image_name}@{digest}'.format(
image_name=service.image_name,
digest=distribution['Descriptor']['digest']
)
except NoSuchImageError:
raise UserError(
"Digest not found for service '{service}'. "
"Repository does not exist or may require 'docker login'"
.format(service=service.name))
return digest
class MissingDigests(Exception):
def __init__(self, needs_push, needs_pull):
self.needs_push = needs_push
self.needs_pull = needs_pull
class NeedsPush(Exception):
def __init__(self, image_name):
self.image_name = image_name
class NeedsPull(Exception):
def __init__(self, image_name, service_name):
self.image_name = image_name
self.service_name = service_name
class NoSuchService(Exception):
def __init__(self, name):
if isinstance(name, six.binary_type):

View File

@@ -185,7 +185,7 @@ class Service(object):
scale=1,
pid_mode=None,
default_platform=None,
extra_labels=[],
extra_labels=None,
**options
):
self.name = name
@@ -201,7 +201,7 @@ class Service(object):
self.scale_num = scale
self.default_platform = default_platform
self.options = options
self.extra_labels = extra_labels
self.extra_labels = extra_labels or []
def __repr__(self):
return '<Service: {}>'.format(self.name)

View File

@@ -126,18 +126,6 @@ _docker_compose_build() {
}
_docker_compose_bundle() {
case "$prev" in
--output|-o)
_filedir
return
;;
esac
COMPREPLY=( $( compgen -W "--push-images --help --output -o" -- "$cur" ) )
}
_docker_compose_config() {
case "$prev" in
--hash)
@@ -557,7 +545,7 @@ _docker_compose_up() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --attach-dependencies --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
;;
*)
__docker_compose_complete_services
@@ -581,7 +569,6 @@ _docker_compose() {
local commands=(
build
bundle
config
create
down

View File

@@ -121,12 +121,6 @@ __docker-compose_subcommand() {
'--parallel[Build images in parallel.]' \
'*:services:__docker-compose_services_from_build' && ret=0
;;
(bundle)
_arguments \
$opts_help \
'--push-images[Automatically push images for any services which have a `build` option specified.]' \
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
;;
(config)
_arguments \
$opts_help \
@@ -290,7 +284,7 @@ __docker-compose_subcommand() {
(up)
_arguments \
$opts_help \
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit and --attach-dependencies.]' \
$opts_no_color \
$opts_no_deps \
$opts_force_recreate \
@@ -298,6 +292,7 @@ __docker-compose_subcommand() {
$opts_no_build \
"(--no-build)--build[Build images before starting containers.]" \
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
"(-d)--attach-dependencies[Attach to dependent containers. Incompatible with -d.]" \
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
'--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \
'--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \

View File

@@ -87,6 +87,11 @@ exe = EXE(pyz,
'compose/config/config_schema_v3.7.json',
'DATA'
),
(
'compose/config/config_schema_v3.8.json',
'compose/config/config_schema_v3.8.json',
'DATA'
),
(
'compose/GITSHA',
'compose/GITSHA',

View File

@@ -96,6 +96,11 @@ coll = COLLECT(exe,
'compose/config/config_schema_v3.7.json',
'DATA'
),
(
'compose/config/config_schema_v3.8.json',
'compose/config/config_schema_v3.8.json',
'DATA'
),
(
'compose/GITSHA',
'compose/GITSHA',

View File

@@ -1 +1 @@
pyinstaller==3.5
pyinstaller==3.6

View File

@@ -1,6 +1,7 @@
coverage==4.5.4
ddt==1.2.0
coverage==5.0.3
ddt==1.2.2
flake8==3.7.9
mock==3.0.5
pytest==3.6.3
pytest==5.3.2; python_version >= '3.5'
pytest==4.6.5; python_version < '3.5'
pytest-cov==2.8.1

View File

@@ -1,25 +1,26 @@
backports.shutil_get_terminal_size==1.0.0
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
cached-property==1.3.0
certifi==2017.4.17
cached-property==1.5.1
certifi==2019.11.28
chardet==3.0.4
colorama==0.4.0; sys_platform == 'win32'
colorama==0.4.3; sys_platform == 'win32'
docker==4.1.0
docker-pycreds==0.4.0
dockerpty==0.4.1
docopt==0.6.2
enum34==1.1.6; python_version < '3.4'
functools32==3.2.3.post2; python_version < '3.2'
idna==2.5
ipaddress==1.0.18
jsonschema==3.0.1
paramiko==2.6.0
idna==2.8
ipaddress==1.0.23
jsonschema==3.2.0
paramiko==2.7.1
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
PySocks==1.6.7
PyYAML==4.2b1
PySocks==1.7.1
PyYAML==5.3
requests==2.22.0
six==1.12.0
subprocess32==3.5.4; python_version < '3.2'
texttable==1.6.2
urllib3==1.24.2; python_version == '3.3'
websocket-client==0.32.0
urllib3==1.25.7; python_version == '3.3'
websocket-client==0.57.0

View File

@@ -1,20 +0,0 @@
pipeline {
agent any
stages {
stage("License Scan") {
agent {
label 'ubuntu-1604-aufs-edge'
}
steps {
withCredentials([
string(credentialsId: 'fossa-api-key', variable: 'FOSSA_API_KEY')
]) {
checkout scm
sh "FOSSA_API_KEY='${FOSSA_API_KEY}' BRANCH_NAME='${env.BRANCH_NAME}' make -f script/fossa.mk fossa-analyze"
sh "FOSSA_API_KEY='${FOSSA_API_KEY}' make -f script/fossa.mk fossa-test"
}
}
}
}
}

View File

@@ -24,7 +24,7 @@ if [ ! -z "${BUILD_BOOTLOADER}" ]; then
git clone --single-branch --branch develop https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller
cd /tmp/pyinstaller/bootloader
# Checkout commit corresponding to version in requirements-build
git checkout v3.5
git checkout v3.6
"${VENV}"/bin/python3 ./waf configure --no-lsb all
"${VENV}"/bin/pip3 install ..
cd "${CODE_PATH}"

View File

@@ -6,7 +6,7 @@
#
# http://git-scm.com/download/win
#
# 2. Install Python 3.7.2:
# 2. Install Python 3.7.x:
#
# https://www.python.org/downloads/
#
@@ -39,7 +39,7 @@ if (Test-Path venv) {
Get-ChildItem -Recurse -Include *.pyc | foreach ($_) { Remove-Item $_.FullName }
# Create virtualenv
virtualenv .\venv
virtualenv -p C:\Python37\python.exe .\venv
# pip and pyinstaller generate lots of warnings, so we need to ignore them
$ErrorActionPreference = "Continue"

View File

@@ -1,16 +0,0 @@
# Variables for Fossa
BUILD_ANALYZER?=docker/fossa-analyzer
FOSSA_OPTS?=--option all-tags:true --option allow-unresolved:true
fossa-analyze:
docker run --rm -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
-v $(CURDIR)/$*:/go/src/github.com/docker/compose \
-w /go/src/github.com/docker/compose \
$(BUILD_ANALYZER) analyze ${FOSSA_OPTS} --branch ${BRANCH_NAME}
# This command is used to run the fossa test command
fossa-test:
docker run -i -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
-v $(CURDIR)/$*:/go/src/github.com/docker/compose \
-w /go/src/github.com/docker/compose \
$(BUILD_ANALYZER) test

View File

@@ -1,201 +1,9 @@
# Release HOWTO
This file describes the process of making a public release of `docker-compose`.
Please read it carefully before proceeding!
The release process is fully automated by `Release.Jenkinsfile`.
## Prerequisites
## Usage
The following things are required to bring a release to a successful conclusion
### Local Docker engine (Linux Containers)
The release script builds images that will be part of the release.
### Docker Hub account
You should be logged into a Docker Hub account that allows pushing to the
following repositories:
- docker/compose
- docker/compose-tests
### Python
The release script is written in Python and requires Python 3.3 at minimum.
### A Github account and Github API token
Your Github account needs to have write access on the `docker/compose` repo.
To generate a Github token, head over to the
[Personal access tokens](https://github.com/settings/tokens) page in your
Github settings and select "Generate new token". Your token should include
(at minimum) the following scopes:
- `repo:status`
- `public_repo`
This API token should be exposed to the release script through the
`GITHUB_TOKEN` environment variable.
### A Bintray account and Bintray API key
Your Bintray account will need to be an admin member of the
[docker-compose organization](https://bintray.com/docker-compose).
Additionally, you should generate a personal API key. To do so, click your
username in the top-right hand corner and select "Edit profile" ; on the new
page, select "API key" in the left-side menu.
This API key should be exposed to the release script through the
`BINTRAY_TOKEN` environment variable.
### A PyPi account
Said account needs to be a member of the maintainers group for the
[`docker-compose` project](https://pypi.org/project/docker-compose/).
Moreover, the `~/.pypirc` file should exist on your host and contain the
relevant pypi credentials.
The following is a sample `.pypirc` provided as a guideline:
```
[distutils]
index-servers =
pypi
[pypi]
username = user
password = pass
```
## Start a feature release
A feature release is a release that includes all changes present in the
`master` branch when initiated. It's typically versioned `X.Y.0-rc1`, where
Y is the minor version of the previous release incremented by one. A series
of one or more Release Candidates (RCs) should be made available to the public
to find and squash potential bugs.
From the root of the Compose repository, run the following command:
```
./script/release/release.sh -b <BINTRAY_USERNAME> start X.Y.0-rc1
```
After a short initialization period, the script will invite you to edit the
`CHANGELOG.md` file. Do so by being careful to respect the same format as
previous releases. Once done, the script will display a `diff` of the staged
changes for the bump commit. Once you validate these, a bump commit will be
created on the newly created release branch and pushed remotely.
The release tool then waits for the CI to conclude before proceeding.
If failures are reported, the release will be aborted until these are fixed.
Please refer to the "Resume a draft release" section below for more details.
Once all resources have been prepared, the release script will exit with a
message resembling this one:
```
You're almost done! Please verify that everything is in order and you are ready
to make the release public, then run the following command:
./script/release/release.sh -b user finalize X.Y.0-rc1
```
Once you are ready to finalize the release (making binaries and other versioned
assets public), proceed to the "Finalize a release" section of this guide.
## Start a patch release
A patch release is a release that builds off a previous release with discrete
additions. This can be an RC release after RC1 (`X.Y.0-rcZ`, `Z > 1`), a GA release
based off the final RC (`X.Y.0`), or a bugfix release based off a previous
GA release (`X.Y.Z`, `Z > 0`).
From the root of the Compose repository, run the following command:
```
./script/release/release.sh -b <BINTRAY_USERNAME> start --patch=BASE_VERSION RELEASE_VERSION
```
The process of starting a patch release is identical to starting a feature
release except for one difference ; at the beginning, the script will ask for
PR numbers you wish to cherry-pick into the release. These numbers should
correspond to existing PRs on the docker/compose repository. Multiple numbers
should be separated by whitespace.
Once you are ready to finalize the release (making binaries and other versioned
assets public), proceed to the "Finalize a release" section of this guide.
## Finalize a release
Once you're ready to make your release public, you may execute the following
command from the root of the Compose repository:
```
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
```
Note that this command will create and publish versioned assets to the public.
As a result, it can not be reverted. The command will perform some basic
sanity checks before doing so, but it is your responsibility to ensure
everything is in order before pushing the button.
After the command exits, you should make sure:
- The `docker/compose:VERSION` image is available on Docker Hub and functional
- The `pip install -U docker-compose==VERSION` command correctly installs the
specified version
- The install command on the Github release page installs the new release
## Resume a draft release
"Resuming" a release lets you address the following situations occurring before
a release is made final:
- Cherry-pick additional PRs to include in the release
- Resume a release that was aborted because of CI failures after they've been
addressed
- Rebuild / redownload assets after manual changes have been made to the
release branch
- etc.
From the root of the Compose repository, run the following command:
```
./script/release/release.sh -b <BINTRAY_USERNAME> resume RELEASE_VERSION
```
The release tool will attempt to determine what steps it's already been through
for the specified release and pick up where it left off. Some steps are
executed again no matter what as it's assumed they'll produce different
results, like building images or downloading binaries.
## Cancel a draft release
If issues snuck into your release branch, it is sometimes easier to start from
scratch. Before a release has been finalized, it is possible to cancel it using
the following command:
```
./script/release/release.sh -b <BINTRAY_USERNAME> cancel RELEASE_VERSION
```
This will remove the release branch with this release (locally and remotely),
close the associated PR, remove the release page draft on Github and delete
the Bintray repository for it, allowing you to start fresh.
## Manual operations
Some common, release-related operations are not covered by this tool and should
be handled manually by the operator:
- After any release:
- Announce new release on Slack
- After a GA release:
- Close the release milestone
- Merge back `CHANGELOG.md` changes from the `release` branch into `master`
- Bump the version in `compose/__init__.py` to the *next* minor version
number with `dev` appended. For example, if you just released `1.4.0`,
update it to `1.5.0dev`
- Update compose_version in [github.com/docker/docker.github.io/blob/master/_config.yml](https://github.com/docker/docker.github.io/blob/master/_config.yml) and [github.com/docker/docker.github.io/blob/master/_config_authoring.yml](https://github.com/docker/docker.github.io/blob/master/_config_authoring.yml)
- Update the release note in [github.com/docker/docker.github.io](https://github.com/docker/docker.github.io/blob/master/release-notes/docker-compose.md)
## Advanced options
You can consult the full list of options for the release tool by executing
`./script/release/release.sh --help`.
1. edit `compose/__init__.py` to set release version number
1. commit and tag as `v{major}.{minor}.{patch}`
1. edit `compose/__init__.py` again to set next development version number

View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -e
set -x
## Usage :
## changelog PREVIOUS_TAG..HEAD
# configure refs so we get pull-requests metadata
git config --add remote.origin.fetch +refs/pull/*/head:refs/remotes/origin/pull/*
git fetch origin
RANGE=${1:-"$(git describe --tags --abbrev=0)..HEAD"}
echo "Generate changelog for range ${RANGE}"
echo
pullrequests() {
for commit in $(git log ${RANGE} --format='format:%H'); do
# Get the oldest remotes/origin/pull/* branch to include this commit, i.e. the one to introduce it
git branch -a --sort=committerdate --contains $commit --list 'origin/pull/*' | head -1 | cut -d'/' -f4
done
}
changes=$(pullrequests | uniq)
echo "pull requests merged within range:"
echo $changes
echo '#Features' > FEATURES.md
echo '#Bugs' > BUGS.md
for pr in $changes; do
curl -fs -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/repos/docker/compose/pulls/${pr} -o PR.json
cat PR.json | jq -r ' select( .labels[].name | contains("kind/feature") ) | "- "+.title' >> FEATURES.md
cat PR.json | jq -r ' select( .labels[].name | contains("kind/bug") ) | "- "+.title' >> BUGS.md
done
echo ${TAG_NAME} > CHANGELOG.md
echo >> CHANGELOG.md
cat FEATURES.md >> CHANGELOG.md
echo >> CHANGELOG.md
cat BUGS.md >> CHANGELOG.md

View File

@@ -1,74 +0,0 @@
#!/bin/bash
#
# Create the official release
#
. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
function usage() {
>&2 cat << EOM
Publish a release by building all artifacts and pushing them.
This script requires that 'git config branch.${BRANCH}.release' is set to the
release version for the release branch.
EOM
exit 1
}
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
VERSION="$(git config "branch.${BRANCH}.release")" || usage
if [ -z "$(command -v jq 2> /dev/null)" ]; then
>&2 echo "$0 requires https://stedolan.github.io/jq/"
>&2 echo "Please install it and make sure it is available on your \$PATH."
exit 2
fi
API=https://api.github.com/repos
REPO=docker/compose
GITHUB_REPO=git@github.com:$REPO
# Check the build status is green
sha=$(git rev-parse HEAD)
url=$API/$REPO/statuses/$sha
build_status=$(curl -s $url | jq -r '.[0].state')
if [ -n "$SKIP_BUILD_CHECK" ]; then
echo "Skipping build status check..."
elif [[ "$build_status" != "success" ]]; then
>&2 echo "Build status is $build_status, but it should be success."
exit -1
fi
echo "Tagging the release as $VERSION"
git tag $VERSION
git push $GITHUB_REPO $VERSION
echo "Uploading the docker image"
docker push docker/compose:$VERSION
echo "Uploading the compose-tests image"
docker push docker/compose-tests:latest
docker push docker/compose-tests:$VERSION
echo "Uploading package to PyPI"
./script/build/write-git-sha
python setup.py sdist bdist_wheel
if [ "$(command -v twine 2> /dev/null)" ]; then
twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz ./dist/docker_compose-${VERSION/-/}-py2.py3-none-any.whl
else
python setup.py upload
fi
echo "Testing pip package"
deactivate || true
virtualenv venv-test
source venv-test/bin/activate
pip install docker-compose==$VERSION
docker-compose version
deactivate
rm -rf venv-test
echo "Now publish the github release, and test the downloads."
echo "Email maintainers@dockerproject.org and engineering@docker.com about the new release."

View File

@@ -1,38 +0,0 @@
#!/bin/bash
#
# Move the "bump to <version>" commit to the HEAD of the branch
#
. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
function usage() {
>&2 cat << EOM
Move the "bump to <version>" commit to the HEAD of the branch
This script requires that 'git config branch.${BRANCH}.release' is set to the
release version for the release branch.
EOM
exit 1
}
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
VERSION="$(git config "branch.${BRANCH}.release")" || usage
COMMIT_MSG="Bump $VERSION"
sha="$(git log --grep "$COMMIT_MSG\$" --format="%H")"
if [ -z "$sha" ]; then
>&2 echo "No commit with message \"$COMMIT_MSG\""
exit 2
fi
if [[ "$sha" == "$(git rev-parse HEAD)" ]]; then
>&2 echo "Bump commit already at HEAD"
exit 0
fi
commits=$(git log --format="%H" "$sha..HEAD" | wc -l | xargs echo)
git rebase --onto $sha~1 HEAD~$commits $BRANCH
git cherry-pick $sha

View File

@@ -1,387 +0,0 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import shutil
import sys
import time
from jinja2 import Template
from release.bintray import BintrayAPI
from release.const import BINTRAY_ORG
from release.const import NAME
from release.const import REPO_ROOT
from release.downloader import BinaryDownloader
from release.images import ImageManager
from release.images import is_tag_latest
from release.pypi import check_pypirc
from release.pypi import pypi_upload
from release.repository import delete_assets
from release.repository import get_contributors
from release.repository import Repository
from release.repository import upload_assets
from release.utils import branch_name
from release.utils import compatibility_matrix
from release.utils import read_release_notes_from_changelog
from release.utils import ScriptError
from release.utils import update_init_py_version
from release.utils import update_run_sh_version
from release.utils import yesno
def create_initial_branch(repository, args):
release_branch = repository.create_release_branch(args.release, args.base)
if args.base and args.cherries:
print('Detected patch version.')
cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
repository.cherry_pick_prs(release_branch, cherries.split())
return create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
def create_bump_commit(repository, release_branch, bintray_user, bintray_org):
with release_branch.config_reader() as cfg:
release = cfg.get('release')
print('Updating version info in __init__.py and run.sh')
update_run_sh_version(release)
update_init_py_version(release)
input('Please add the release notes to the CHANGELOG.md file, then press Enter to continue.')
proceed = None
while not proceed:
print(repository.diff())
proceed = yesno('Are these changes ok? y/N ', default=False)
if repository.diff():
repository.create_bump_commit(release_branch, release)
repository.push_branch_to_remote(release_branch)
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
if not bintray_api.repository_exists(bintray_org, release_branch.name):
print('Creating data repository {} on bintray'.format(release_branch.name))
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
else:
print('Bintray repository {} already exists. Skipping'.format(release_branch.name))
def monitor_pr_status(pr_data):
print('Waiting for CI to complete...')
last_commit = pr_data.get_commits().reversed[0]
while True:
status = last_commit.get_combined_status()
if status.state == 'pending' or status.state == 'failure':
summary = {
'pending': 0,
'success': 0,
'failure': 0,
'error': 0,
}
for detail in status.statuses:
if detail.context == 'dco-signed':
# dco-signed check breaks on merge remote-tracking ; ignore it
continue
if detail.state in summary:
summary[detail.state] += 1
print(
'{pending} pending, {success} successes, {failure} failures, '
'{error} errors'.format(**summary)
)
if summary['failure'] > 0 or summary['error'] > 0:
raise ScriptError('CI failures detected!')
elif summary['pending'] == 0 and summary['success'] > 0:
# This check assumes at least 1 non-DCO CI check to avoid race conditions.
# If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
return True
time.sleep(30)
elif status.state == 'success':
print('{} successes: all clear!'.format(status.total_count))
return True
def check_pr_mergeable(pr_data):
if pr_data.mergeable is False:
# mergeable can also be null, in which case the warning would be a false positive.
print(
'WARNING!! PR #{} can not currently be merged. You will need to '
'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
)
return pr_data.mergeable is True
def create_release_draft(repository, version, pr_data, files):
print('Creating Github release draft')
with open(os.path.join(os.path.dirname(__file__), 'release.md.tmpl'), 'r') as f:
template = Template(f.read())
print('Rendering release notes based on template')
release_notes = template.render(
version=version,
compat_matrix=compatibility_matrix(),
integrity=files,
contributors=get_contributors(pr_data),
changelog=read_release_notes_from_changelog(),
)
gh_release = repository.create_release(
version, release_notes, draft=True, prerelease='-rc' in version,
target_commitish='release'
)
print('Release draft initialized')
return gh_release
def print_final_instructions(args):
print(
"You're almost done! Please verify that everything is in order and "
"you are ready to make the release public, then run the following "
"command:\n{exe} -b {user} finalize {version}".format(
exe='./script/release/release.sh', user=args.bintray_user, version=args.release
)
)
def distclean():
print('Running distclean...')
dirs = [
os.path.join(REPO_ROOT, 'build'), os.path.join(REPO_ROOT, 'dist'),
os.path.join(REPO_ROOT, 'docker-compose.egg-info')
]
files = []
for base, dirnames, fnames in os.walk(REPO_ROOT):
for fname in fnames:
path = os.path.normpath(os.path.join(base, fname))
if fname.endswith('.pyc'):
files.append(path)
elif fname.startswith('.coverage.'):
files.append(path)
for dirname in dirnames:
path = os.path.normpath(os.path.join(base, dirname))
if dirname == '__pycache__':
dirs.append(path)
elif dirname == '.coverage-binfiles':
dirs.append(path)
for file in files:
os.unlink(file)
for folder in dirs:
shutil.rmtree(folder, ignore_errors=True)
def resume(args):
try:
distclean()
repository = Repository(REPO_ROOT, args.repo)
br_name = branch_name(args.release)
if not repository.branch_exists(br_name):
raise ScriptError('No local branch exists for this release.')
gh_release = repository.find_release(args.release)
if gh_release and not gh_release.draft:
print('WARNING!! Found non-draft (public) release for this version!')
proceed = yesno(
'Are you sure you wish to proceed? Modifying an already '
'released version is dangerous! y/N ', default=False
)
if proceed.lower() is not True:
raise ScriptError('Aborting release')
release_branch = repository.checkout_branch(br_name)
if args.cherries:
cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
repository.cherry_pick_prs(release_branch, cherries.split())
create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
pr_data = repository.find_release_pr(args.release)
if not pr_data:
pr_data = repository.create_release_pull_request(args.release)
check_pr_mergeable(pr_data)
if not args.skip_ci:
monitor_pr_status(pr_data)
downloader = BinaryDownloader(args.destination)
files = downloader.download_all(args.release)
if not gh_release:
gh_release = create_release_draft(repository, args.release, pr_data, files)
delete_assets(gh_release)
upload_assets(gh_release, files)
tag_as_latest = is_tag_latest(args.release)
img_manager = ImageManager(args.release, tag_as_latest)
img_manager.build_images(repository)
except ScriptError as e:
print(e)
return 1
print_final_instructions(args)
return 0
def cancel(args):
try:
repository = Repository(REPO_ROOT, args.repo)
repository.close_release_pr(args.release)
repository.remove_release(args.release)
repository.remove_bump_branch(args.release)
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], args.bintray_user)
print('Removing Bintray data repository for {}'.format(args.release))
bintray_api.delete_repository(args.bintray_org, branch_name(args.release))
distclean()
except ScriptError as e:
print(e)
return 1
print('Release cancellation complete.')
return 0
def start(args):
distclean()
try:
repository = Repository(REPO_ROOT, args.repo)
create_initial_branch(repository, args)
pr_data = repository.create_release_pull_request(args.release)
check_pr_mergeable(pr_data)
if not args.skip_ci:
monitor_pr_status(pr_data)
downloader = BinaryDownloader(args.destination)
files = downloader.download_all(args.release)
gh_release = create_release_draft(repository, args.release, pr_data, files)
upload_assets(gh_release, files)
tag_as_latest = is_tag_latest(args.release)
img_manager = ImageManager(args.release, tag_as_latest)
img_manager.build_images(repository)
except ScriptError as e:
print(e)
return 1
print_final_instructions(args)
return 0
def finalize(args):
distclean()
try:
check_pypirc()
repository = Repository(REPO_ROOT, args.repo)
tag_as_latest = is_tag_latest(args.release)
img_manager = ImageManager(args.release, tag_as_latest)
pr_data = repository.find_release_pr(args.release)
if not pr_data:
raise ScriptError('No PR found for {}'.format(args.release))
if not check_pr_mergeable(pr_data):
raise ScriptError('Can not finalize release with an unmergeable PR')
if not img_manager.check_images():
raise ScriptError('Missing release image')
br_name = branch_name(args.release)
if not repository.branch_exists(br_name):
raise ScriptError('No local branch exists for this release.')
gh_release = repository.find_release(args.release)
if not gh_release:
raise ScriptError('No Github release draft for this version')
repository.checkout_branch(br_name)
os.system('python {setup_script} sdist bdist_wheel'.format(
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
merge_status = pr_data.merge()
if not merge_status.merged and not args.finalize_resume:
raise ScriptError(
'Unable to merge PR #{}: {}'.format(pr_data.number, merge_status.message)
)
pypi_upload(args)
img_manager.push_images()
repository.publish_release(gh_release)
except ScriptError as e:
print(e)
return 1
return 0
ACTIONS = [
'start',
'cancel',
'resume',
'finalize',
]
EPILOG = '''Example uses:
* Start a new feature release (includes all changes currently in master)
release.sh -b user start 1.23.0
* Start a new patch release
release.sh -b user --patch 1.21.0 start 1.21.1
* Cancel / rollback an existing release draft
release.sh -b user cancel 1.23.0
* Restart a previously aborted patch release
release.sh -b user -p 1.21.0 resume 1.21.1
'''
def main():
if 'GITHUB_TOKEN' not in os.environ:
print('GITHUB_TOKEN environment variable must be set')
return 1
if 'BINTRAY_TOKEN' not in os.environ:
print('BINTRAY_TOKEN environment variable must be set')
return 1
parser = argparse.ArgumentParser(
description='Orchestrate a new release of docker/compose. This tool assumes that you have '
'obtained a Github API token and Bintray API key and set the GITHUB_TOKEN and '
'BINTRAY_TOKEN environment variables accordingly.',
epilog=EPILOG, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'action', choices=ACTIONS, help='The action to be performed for this release'
)
parser.add_argument('release', help='Release number, e.g. 1.9.0-rc1, 2.1.1')
parser.add_argument(
'--patch', '-p', dest='base',
help='Which version is being patched by this release'
)
parser.add_argument(
'--repo', '-r', dest='repo', default=NAME,
help='Start a release for the given repo (default: {})'.format(NAME)
)
parser.add_argument(
'-b', dest='bintray_user', required=True, metavar='USER',
help='Username associated with the Bintray API key'
)
parser.add_argument(
'--bintray-org', dest='bintray_org', metavar='ORG', default=BINTRAY_ORG,
help='Organization name on bintray where the data repository will be created.'
)
parser.add_argument(
'--destination', '-o', metavar='DIR', default='binaries',
help='Directory where release binaries will be downloaded relative to the project root'
)
parser.add_argument(
'--no-cherries', '-C', dest='cherries', action='store_false',
help='If set, the program will not prompt the user for PR numbers to cherry-pick'
)
parser.add_argument(
'--skip-ci-checks', dest='skip_ci', action='store_true',
help='If set, the program will not wait for CI jobs to complete'
)
parser.add_argument(
'--finalize-resume', dest='finalize_resume', action='store_true',
help='If set, finalize will continue through steps that have already been completed.'
)
args = parser.parse_args()
if args.action == 'start':
return start(args)
elif args.action == 'resume':
return resume(args)
elif args.action == 'cancel':
return cancel(args)
elif args.action == 'finalize':
return finalize(args)
print('Unexpected action "{}"'.format(args.action), file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(main())

View File

@@ -1,13 +0,0 @@
#!/bin/sh
if test -d ${VENV_DIR:-./.release-venv}; then
true
else
./script/release/setup-venv.sh
fi
if test -z "$*"; then
args="--help"
fi
${VENV_DIR:-./.release-venv}/bin/python ./script/release/release.py "$@"

View File

@@ -1,50 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import requests
from .const import NAME
class BintrayAPI(requests.Session):
def __init__(self, api_key, user, *args, **kwargs):
super(BintrayAPI, self).__init__(*args, **kwargs)
self.auth = (user, api_key)
self.base_url = 'https://api.bintray.com/'
def create_repository(self, subject, repo_name, repo_type='generic'):
url = '{base}repos/{subject}/{repo_name}'.format(
base=self.base_url, subject=subject, repo_name=repo_name,
)
data = {
'name': repo_name,
'type': repo_type,
'private': False,
'desc': 'Automated release for {}: {}'.format(NAME, repo_name),
'labels': ['docker-compose', 'docker', 'release-bot'],
}
return self.post_json(url, data)
def repository_exists(self, subject, repo_name):
url = '{base}/repos/{subject}/{repo_name}'.format(
base=self.base_url, subject=subject, repo_name=repo_name,
)
result = self.get(url)
if result.status_code == 404:
return False
result.raise_for_status()
return True
def delete_repository(self, subject, repo_name):
url = '{base}repos/{subject}/{repo_name}'.format(
base=self.base_url, subject=subject, repo_name=repo_name,
)
return self.delete(url)
def post_json(self, url, data, **kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self.post(url, data=json.dumps(data), **kwargs)

View File

@@ -1,10 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
NAME = 'docker/compose'
COMPOSE_TESTS_IMAGE_BASE_NAME = NAME + '-tests'
BINTRAY_ORG = 'docker-compose'

View File

@@ -1,73 +0,0 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import os
import requests
from .const import BINTRAY_ORG
from .const import NAME
from .const import REPO_ROOT
from .utils import branch_name
class BinaryDownloader(requests.Session):
base_bintray_url = 'https://dl.bintray.com/{}'.format(BINTRAY_ORG)
base_appveyor_url = 'https://ci.appveyor.com/api/projects/{}/artifacts/'.format(NAME)
def __init__(self, destination, *args, **kwargs):
super(BinaryDownloader, self).__init__(*args, **kwargs)
self.destination = destination
os.makedirs(self.destination, exist_ok=True)
def download_from_bintray(self, repo_name, filename):
print('Downloading {} from bintray'.format(filename))
url = '{base}/{repo_name}/{filename}'.format(
base=self.base_bintray_url, repo_name=repo_name, filename=filename
)
full_dest = os.path.join(REPO_ROOT, self.destination, filename)
return self._download(url, full_dest)
def download_from_appveyor(self, branch_name, filename):
print('Downloading {} from appveyor'.format(filename))
url = '{base}/dist%2F{filename}?branch={branch_name}'.format(
base=self.base_appveyor_url, filename=filename, branch_name=branch_name
)
full_dest = os.path.join(REPO_ROOT, self.destination, filename)
return self._download(url, full_dest)
def _download(self, url, full_dest):
m = hashlib.sha256()
with open(full_dest, 'wb') as f:
r = self.get(url, stream=True)
for chunk in r.iter_content(chunk_size=1024 * 600, decode_unicode=False):
print('.', end='', flush=True)
m.update(chunk)
f.write(chunk)
print(' download complete')
hex_digest = m.hexdigest()
with open(full_dest + '.sha256', 'w') as f:
f.write('{} {}\n'.format(hex_digest, os.path.basename(full_dest)))
return full_dest, hex_digest
def download_all(self, version):
files = {
'docker-compose-Darwin-x86_64.tgz': None,
'docker-compose-Darwin-x86_64': None,
'docker-compose-Linux-x86_64': None,
'docker-compose-Windows-x86_64.exe': None,
}
for filename in files.keys():
if 'Windows' in filename:
files[filename] = self.download_from_appveyor(
branch_name(version), filename
)
else:
files[filename] = self.download_from_bintray(
branch_name(version), filename
)
return files

View File

@@ -1,157 +0,0 @@
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import base64
import json
import os
import docker
from enum import Enum
from .const import NAME
from .const import REPO_ROOT
from .utils import ScriptError
from .utils import yesno
from script.release.release.const import COMPOSE_TESTS_IMAGE_BASE_NAME
class Platform(Enum):
ALPINE = 'alpine'
DEBIAN = 'debian'
def __str__(self):
return self.value
# Checks if this version respects the GA version format ('x.y.z') and not an RC
def is_tag_latest(version):
ga_version = all(n.isdigit() for n in version.split('.')) and version.count('.') == 2
return ga_version and yesno('Should this release be tagged as \"latest\"? [Y/n]: ', default=True)
class ImageManager(object):
def __init__(self, version, latest=False):
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
self.version = version
self.latest = latest
if 'HUB_CREDENTIALS' in os.environ:
print('HUB_CREDENTIALS found in environment, issuing login')
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
self.docker_client.login(
username=credentials['Username'], password=credentials['Password']
)
def _tag(self, image, existing_tag, new_tag):
existing_repo_tag = '{image}:{tag}'.format(image=image, tag=existing_tag)
new_repo_tag = '{image}:{tag}'.format(image=image, tag=new_tag)
self.docker_client.tag(existing_repo_tag, new_repo_tag)
def get_full_version(self, platform=None):
return self.version + '-' + platform.__str__() if platform else self.version
def get_runtime_image_tag(self, tag):
return '{image_base_image}:{tag}'.format(
image_base_image=NAME,
tag=self.get_full_version(tag)
)
def build_runtime_image(self, repository, platform):
git_sha = repository.write_git_sha()
compose_image_base_name = NAME
print('Building {image} image ({platform} based)'.format(
image=compose_image_base_name,
platform=platform
))
full_version = self.get_full_version(platform)
build_tag = self.get_runtime_image_tag(platform)
logstream = self.docker_client.build(
REPO_ROOT,
tag=build_tag,
buildargs={
'BUILD_PLATFORM': platform.value,
'GIT_COMMIT': git_sha,
},
decode=True
)
for chunk in logstream:
if 'error' in chunk:
raise ScriptError('Build error: {}'.format(chunk['error']))
if 'stream' in chunk:
print(chunk['stream'], end='')
if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, self.version)
if self.latest:
self._tag(compose_image_base_name, full_version, platform)
if platform == Platform.ALPINE:
self._tag(compose_image_base_name, full_version, 'latest')
def get_ucp_test_image_tag(self, tag=None):
return '{image}:{tag}'.format(
image=COMPOSE_TESTS_IMAGE_BASE_NAME,
tag=tag or self.version
)
# Used for producing a test image for UCP
def build_ucp_test_image(self, repository):
print('Building test image (debian based for UCP e2e)')
git_sha = repository.write_git_sha()
ucp_test_image_tag = self.get_ucp_test_image_tag()
logstream = self.docker_client.build(
REPO_ROOT,
tag=ucp_test_image_tag,
target='build',
buildargs={
'BUILD_PLATFORM': Platform.DEBIAN.value,
'GIT_COMMIT': git_sha,
},
decode=True
)
for chunk in logstream:
if 'error' in chunk:
raise ScriptError('Build error: {}'.format(chunk['error']))
if 'stream' in chunk:
print(chunk['stream'], end='')
self._tag(COMPOSE_TESTS_IMAGE_BASE_NAME, self.version, 'latest')
def build_images(self, repository):
self.build_runtime_image(repository, Platform.ALPINE)
self.build_runtime_image(repository, Platform.DEBIAN)
self.build_ucp_test_image(repository)
def check_images(self):
for name in self.get_images_to_push():
try:
self.docker_client.inspect_image(name)
except docker.errors.ImageNotFound:
print('Expected image {} was not found'.format(name))
return False
return True
def get_images_to_push(self):
tags_to_push = {
"{}:{}".format(NAME, self.version),
self.get_runtime_image_tag(Platform.ALPINE),
self.get_runtime_image_tag(Platform.DEBIAN),
self.get_ucp_test_image_tag(),
self.get_ucp_test_image_tag('latest'),
}
if is_tag_latest(self.version):
tags_to_push.add("{}:latest".format(NAME))
return tags_to_push
def push_images(self):
tags_to_push = self.get_images_to_push()
print('Build tags to push {}'.format(tags_to_push))
for name in tags_to_push:
print('Pushing {} to Docker Hub'.format(name))
logstream = self.docker_client.push(name, stream=True, decode=True)
for chunk in logstream:
if 'status' in chunk:
print(chunk['status'])
if 'error' in chunk:
raise ScriptError(
'Error pushing {name}: {err}'.format(name=name, err=chunk['error'])
)

View File

@@ -1,44 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from configparser import Error
from requests.exceptions import HTTPError
from twine.commands.upload import main as twine_upload
from twine.utils import get_config
from .utils import ScriptError
def pypi_upload(args):
print('Uploading to PyPi')
try:
rel = args.release.replace('-rc', 'rc')
twine_upload([
'dist/docker_compose-{}*.whl'.format(rel),
'dist/docker-compose-{}*.tar.gz'.format(rel)
])
except HTTPError as e:
if e.response.status_code == 400 and 'File already exists' in str(e):
if not args.finalize_resume:
raise ScriptError(
'Package already uploaded on PyPi.'
)
print('Skipping PyPi upload - package already uploaded')
else:
raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
def check_pypirc():
try:
config = get_config()
except Error as e:
raise ScriptError('Failed to parse .pypirc file: {}'.format(e))
if config is None:
raise ScriptError('Failed to parse .pypirc file')
if 'pypi' not in config:
raise ScriptError('Missing [pypi] section in .pypirc file')
if not (config['pypi'].get('username') and config['pypi'].get('password')):
raise ScriptError('Missing login/password pair for pypi repo')

View File

@@ -1,246 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import tempfile
import requests
from git import GitCommandError
from git import Repo
from github import Github
from .const import NAME
from .const import REPO_ROOT
from .utils import branch_name
from .utils import read_release_notes_from_changelog
from .utils import ScriptError
class Repository(object):
def __init__(self, root=None, gh_name=None):
if root is None:
root = REPO_ROOT
if gh_name is None:
gh_name = NAME
self.git_repo = Repo(root)
self.gh_client = Github(os.environ['GITHUB_TOKEN'])
self.gh_repo = self.gh_client.get_repo(gh_name)
def create_release_branch(self, version, base=None):
print('Creating release branch {} based on {}...'.format(version, base or 'master'))
remote = self.find_remote(self.gh_repo.full_name)
br_name = branch_name(version)
remote.fetch()
if self.branch_exists(br_name):
raise ScriptError(
"Branch {} already exists locally. Please remove it before "
"running the release script, or use `resume` instead.".format(
br_name
)
)
if base is not None:
base = self.git_repo.tag('refs/tags/{}'.format(base))
else:
base = 'refs/remotes/{}/master'.format(remote.name)
release_branch = self.git_repo.create_head(br_name, commit=base)
release_branch.checkout()
self.git_repo.git.merge('--strategy=ours', '--no-edit', '{}/release'.format(remote.name))
with release_branch.config_writer() as cfg:
cfg.set_value('release', version)
return release_branch
def find_remote(self, remote_name=None):
if not remote_name:
remote_name = self.gh_repo.full_name
for remote in self.git_repo.remotes:
for url in remote.urls:
if remote_name in url:
return remote
return None
def create_bump_commit(self, bump_branch, version):
print('Creating bump commit...')
bump_branch.checkout()
self.git_repo.git.commit('-a', '-s', '-m "Bump {}"'.format(version), '--no-verify')
def diff(self):
return self.git_repo.git.diff()
def checkout_branch(self, name):
return self.git_repo.branches[name].checkout()
def push_branch_to_remote(self, branch, remote_name=None):
print('Pushing branch {} to remote...'.format(branch.name))
remote = self.find_remote(remote_name)
remote.push(refspec=branch, force=True)
def branch_exists(self, name):
return name in [h.name for h in self.git_repo.heads]
def create_release_pull_request(self, version):
return self.gh_repo.create_pull(
title='Bump {}'.format(version),
body='Automated release for docker-compose {}\n\n{}'.format(
version, read_release_notes_from_changelog()
),
base='release',
head=branch_name(version),
)
def create_release(self, version, release_notes, **kwargs):
return self.gh_repo.create_git_release(
tag=version, name=version, message=release_notes, **kwargs
)
def find_release(self, version):
print('Retrieving release draft for {}'.format(version))
releases = self.gh_repo.get_releases()
for release in releases:
if release.tag_name == version and release.title == version:
return release
return None
def publish_release(self, release):
release.update_release(
name=release.title,
message=release.body,
draft=False,
prerelease=release.prerelease
)
def remove_release(self, version):
print('Removing release draft for {}'.format(version))
releases = self.gh_repo.get_releases()
for release in releases:
if release.tag_name == version and release.title == version:
if not release.draft:
print(
'The release at {} is no longer a draft. If you TRULY intend '
'to remove it, please do so manually.'.format(release.url)
)
continue
release.delete_release()
def remove_bump_branch(self, version, remote_name=None):
name = branch_name(version)
if not self.branch_exists(name):
return False
print('Removing local branch "{}"'.format(name))
if self.git_repo.active_branch.name == name:
print('Active branch is about to be deleted. Checking out to master...')
try:
self.checkout_branch('master')
except GitCommandError:
raise ScriptError(
'Unable to checkout master. Try stashing local changes before proceeding.'
)
self.git_repo.branches[name].delete(self.git_repo, name, force=True)
print('Removing remote branch "{}"'.format(name))
remote = self.find_remote(remote_name)
try:
remote.push(name, delete=True)
except GitCommandError as e:
if 'remote ref does not exist' in str(e):
return False
raise ScriptError(
'Error trying to remove remote branch: {}'.format(e)
)
return True
def find_release_pr(self, version):
print('Retrieving release PR for {}'.format(version))
name = branch_name(version)
open_prs = self.gh_repo.get_pulls(state='open')
for pr in open_prs:
if pr.head.ref == name:
print('Found matching PR #{}'.format(pr.number))
return pr
print('No open PR for this release branch.')
return None
def close_release_pr(self, version):
print('Retrieving and closing release PR for {}'.format(version))
name = branch_name(version)
open_prs = self.gh_repo.get_pulls(state='open')
count = 0
for pr in open_prs:
if pr.head.ref == name:
print('Found matching PR #{}'.format(pr.number))
pr.edit(state='closed')
count += 1
if count == 0:
print('No open PR for this release branch.')
return count
def write_git_sha(self):
with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f:
f.write(self.git_repo.head.commit.hexsha[:7])
return self.git_repo.head.commit.hexsha[:7]
def cherry_pick_prs(self, release_branch, ids):
if not ids:
return
release_branch.checkout()
for i in ids:
try:
i = int(i)
except ValueError as e:
raise ScriptError('Invalid PR id: {}'.format(e))
print('Retrieving PR#{}'.format(i))
pr = self.gh_repo.get_pull(i)
patch_data = requests.get(pr.patch_url).text
self.apply_patch(patch_data)
def apply_patch(self, patch_data):
with tempfile.NamedTemporaryFile(mode='w', prefix='_compose_cherry', encoding='utf-8') as f:
f.write(patch_data)
f.flush()
self.git_repo.git.am('--3way', f.name)
def get_prs_in_milestone(self, version):
milestones = self.gh_repo.get_milestones(state='open')
milestone = None
for ms in milestones:
if ms.title == version:
milestone = ms
break
if not milestone:
print('Didn\'t find a milestone matching "{}"'.format(version))
return None
issues = self.gh_repo.get_issues(milestone=milestone, state='all')
prs = []
for issue in issues:
if issue.pull_request is not None:
prs.append(issue.number)
return sorted(prs)
def get_contributors(pr_data):
commits = pr_data.get_commits()
authors = {}
for commit in commits:
if not commit or not commit.author or not commit.author.login:
continue
author = commit.author.login
authors[author] = authors.get(author, 0) + 1
return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
def upload_assets(gh_release, files):
print('Uploading binaries and hash sums')
for filename, filedata in files.items():
print('Uploading {}...'.format(filename))
gh_release.upload_asset(filedata[0], content_type='application/octet-stream')
gh_release.upload_asset('{}.sha256'.format(filedata[0]), content_type='text/plain')
print('Uploading run.sh...')
gh_release.upload_asset(
os.path.join(REPO_ROOT, 'script', 'run', 'run.sh'), content_type='text/plain'
)
def delete_assets(gh_release):
print('Removing previously uploaded assets')
for asset in gh_release.get_assets():
print('Deleting asset {}'.format(asset.name))
asset.delete_asset()

View File

@@ -1,85 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
from .const import REPO_ROOT
from compose import const as compose_const
section_header_re = re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+ \([0-9]{4}-[01][0-9]-[0-3][0-9]\)$')
class ScriptError(Exception):
pass
def branch_name(version):
return 'bump-{}'.format(version)
def read_release_notes_from_changelog():
with open(os.path.join(REPO_ROOT, 'CHANGELOG.md'), 'r') as f:
lines = f.readlines()
i = 0
while i < len(lines):
if section_header_re.match(lines[i]):
break
i += 1
j = i + 1
while j < len(lines):
if section_header_re.match(lines[j]):
break
j += 1
return ''.join(lines[i + 2:j - 1])
def update_init_py_version(version):
path = os.path.join(REPO_ROOT, 'compose', '__init__.py')
with open(path, 'r') as f:
contents = f.read()
contents = re.sub(r"__version__ = '[0-9a-z.-]+'", "__version__ = '{}'".format(version), contents)
with open(path, 'w') as f:
f.write(contents)
def update_run_sh_version(version):
path = os.path.join(REPO_ROOT, 'script', 'run', 'run.sh')
with open(path, 'r') as f:
contents = f.read()
contents = re.sub(r'VERSION="[0-9a-z.-]+"', 'VERSION="{}"'.format(version), contents)
with open(path, 'w') as f:
f.write(contents)
def compatibility_matrix():
result = {}
for engine_version in compose_const.API_VERSION_TO_ENGINE_VERSION.values():
result[engine_version] = []
for fmt, api_version in compose_const.API_VERSIONS.items():
result[compose_const.API_VERSION_TO_ENGINE_VERSION[api_version]].append(fmt.vstring)
return result
def yesno(prompt, default=None):
"""
Prompt the user for a yes or no.
Can optionally specify a default value, which will only be
used if they enter a blank line.
Unrecognised input (anything other than "y", "n", "yes",
"no" or "") will return None.
"""
answer = input(prompt).strip().lower()
if answer == "y" or answer == "yes":
return True
elif answer == "n" or answer == "no":
return False
elif answer == "":
return default
else:
return None

View File

@@ -1,47 +0,0 @@
#!/bin/bash
debian_based() { test -f /etc/debian_version; }
if test -z $VENV_DIR; then
VENV_DIR=./.release-venv
fi
if test -z $PYTHONBIN; then
PYTHONBIN=$(which python3)
if test -z $PYTHONBIN; then
PYTHONBIN=$(which python)
fi
fi
VERSION=$($PYTHONBIN -c "import sys; print('{}.{}'.format(*sys.version_info[0:2]))")
if test $(echo $VERSION | cut -d. -f1) -lt 3; then
echo "Python 3.3 or above is required"
fi
if test $(echo $VERSION | cut -d. -f2) -lt 3; then
echo "Python 3.3 or above is required"
fi
# Debian / Ubuntu workaround:
# https://askubuntu.com/questions/879437/ensurepip-is-disabled-in-debian-ubuntu-for-the-system-python
if debian_based; then
VENV_FLAGS="$VENV_FLAGS --without-pip"
fi
$PYTHONBIN -m venv $VENV_DIR $VENV_FLAGS
VENV_PYTHONBIN=$VENV_DIR/bin/python
if debian_based; then
curl https://bootstrap.pypa.io/get-pip.py -o $VENV_DIR/get-pip.py
$VENV_PYTHONBIN $VENV_DIR/get-pip.py
fi
$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
PyGithub==1.39 \
GitPython==2.1.9 \
requests==2.18.4 \
setuptools==40.6.2 \
twine==1.11.0
$VENV_PYTHONBIN setup.py develop

View File

@@ -15,7 +15,7 @@
set -e
VERSION="1.25.1"
VERSION="1.25.5"
IMAGE="docker/compose:$VERSION"

View File

@@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
fi
OPENSSL_VERSION=1.1.1c
OPENSSL_VERSION=1.1.1f
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
OPENSSL_SHA1=71b830a077276cbeccc994369538617a21bee808
OPENSSL_SHA1=238e001ea1fbf19ede43e36209c37c1a636bb51f
PYTHON_VERSION=3.7.4
PYTHON_VERSION=3.7.5
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
PYTHON_SHA1=fb1d764be8a9dcd40f2f152a610a0ab04e0d0ed3
PYTHON_SHA1=8b0311d4cca19f0ea9181731189fa33c9f5aedf9
#
# Install prerequisites.

View File

@@ -32,7 +32,7 @@ def find_version(*file_paths):
install_requires = [
'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 1',
'PyYAML >= 3.10, < 5',
'PyYAML >= 3.10, < 6',
'requests >= 2.20.0, < 3',
'texttable >= 0.9.0, < 2',
'websocket-client >= 0.32.0, < 1',

View File

@@ -43,6 +43,24 @@ ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
BUILD_CACHE_TEXT = 'Using cache'
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
COMPOSE_COMPATIBILITY_DICT = {
'version': '2.3',
'volumes': {'foo': {'driver': 'default'}},
'networks': {'bar': {}},
'services': {
'foo': {
'command': '/bin/true',
'image': 'alpine:3.10.1',
'scale': 3,
'restart': 'always:7',
'mem_limit': '300M',
'mem_reservation': '100M',
'cpus': 0.7,
'volumes': ['foo:/bar:rw'],
'networks': {'bar': None},
}
},
}
def start_process(base_dir, options):
@@ -269,7 +287,7 @@ services:
# assert there are no python objects encoded in the output
assert '!!' not in result.stdout
output = yaml.load(result.stdout)
output = yaml.safe_load(result.stdout)
expected = {
'version': '2.0',
'volumes': {'data': {'driver': 'local'}},
@@ -294,7 +312,7 @@ services:
def test_config_restart(self):
self.base_dir = 'tests/fixtures/restart'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
assert yaml.safe_load(result.stdout) == {
'version': '2.0',
'services': {
'never': {
@@ -323,7 +341,7 @@ services:
def test_config_external_network(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(['-f', 'external-networks.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'networks' in json_result
assert json_result['networks'] == {
'networks_foo': {
@@ -337,7 +355,7 @@ services:
def test_config_with_dot_env(self):
self.base_dir = 'tests/fixtures/default-env-file'
result = self.dispatch(['config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert json_result == {
'services': {
'web': {
@@ -352,7 +370,7 @@ services:
def test_config_with_env_file(self):
self.base_dir = 'tests/fixtures/default-env-file'
result = self.dispatch(['--env-file', '.env2', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert json_result == {
'services': {
'web': {
@@ -367,7 +385,7 @@ services:
def test_config_with_dot_env_and_override_dir(self):
self.base_dir = 'tests/fixtures/default-env-file'
result = self.dispatch(['--project-directory', 'alt/', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert json_result == {
'services': {
'web': {
@@ -382,7 +400,7 @@ services:
def test_config_external_volume_v2(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
@@ -398,7 +416,7 @@ services:
def test_config_external_volume_v2_x(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
@@ -414,7 +432,7 @@ services:
def test_config_external_volume_v3_x(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
@@ -430,7 +448,7 @@ services:
def test_config_external_volume_v3_4(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'volumes' in json_result
assert json_result['volumes'] == {
'foo': {
@@ -446,7 +464,7 @@ services:
def test_config_external_network_v3_5(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config'])
json_result = yaml.load(result.stdout)
json_result = yaml.safe_load(result.stdout)
assert 'networks' in json_result
assert json_result['networks'] == {
'foo': {
@@ -462,7 +480,7 @@ services:
def test_config_v1(self):
self.base_dir = 'tests/fixtures/v1-config'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
assert yaml.safe_load(result.stdout) == {
'version': '2.1',
'services': {
'net': {
@@ -487,7 +505,7 @@ services:
self.base_dir = 'tests/fixtures/v3-full'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
assert yaml.safe_load(result.stdout) == {
'version': '3.5',
'volumes': {
'foobar': {
@@ -564,24 +582,23 @@ services:
self.base_dir = 'tests/fixtures/compatibility-mode'
result = self.dispatch(['--compatibility', 'config'])
assert yaml.load(result.stdout) == {
'version': '2.3',
'volumes': {'foo': {'driver': 'default'}},
'networks': {'bar': {}},
'services': {
'foo': {
'command': '/bin/true',
'image': 'alpine:3.10.1',
'scale': 3,
'restart': 'always:7',
'mem_limit': '300M',
'mem_reservation': '100M',
'cpus': 0.7,
'volumes': ['foo:/bar:rw'],
'networks': {'bar': None},
}
},
}
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
@mock.patch.dict(os.environ)
def test_config_compatibility_mode_from_env(self):
self.base_dir = 'tests/fixtures/compatibility-mode'
os.environ['COMPOSE_COMPATIBILITY'] = 'true'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
@mock.patch.dict(os.environ)
def test_config_compatibility_mode_from_env_and_option_precedence(self):
self.base_dir = 'tests/fixtures/compatibility-mode'
os.environ['COMPOSE_COMPATIBILITY'] = 'false'
result = self.dispatch(['--compatibility', 'config'])
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
def test_ps(self):
self.project.get_service('simple').create_container()
@@ -855,32 +872,6 @@ services:
)
assert 'Favorite Touhou Character: hong.meiling' in result.stdout
def test_bundle_with_digests(self):
self.base_dir = 'tests/fixtures/bundle-with-digests/'
tmpdir = pytest.ensuretemp('cli_test_bundle')
self.addCleanup(tmpdir.remove)
filename = str(tmpdir.join('example.dab'))
self.dispatch(['bundle', '--output', filename])
with open(filename, 'r') as fh:
bundle = json.load(fh)
assert bundle == {
'Version': '0.1',
'Services': {
'web': {
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
'Networks': ['default'],
},
'redis': {
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
'374b2b7392de1e7d77be26ef8f7b'),
'Networks': ['default'],
}
},
}
def test_build_override_dir(self):
self.base_dir = 'tests/fixtures/build-path-override-dir'
self.override_dir = os.path.abspath('tests/fixtures')
@@ -1580,6 +1571,26 @@ services:
assert len(db.containers()) == 0
assert len(console.containers()) == 0
def test_up_with_attach_dependencies(self):
self.base_dir = 'tests/fixtures/echo-services-dependencies'
result = self.dispatch(['up', '--attach-dependencies', '--no-color', 'simple'], None)
simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
another_name = self.project.get_service('another').containers(
stopped=True
)[0].name_without_project
assert '{} | simple'.format(simple_name) in result.stdout
assert '{} | another'.format(another_name) in result.stdout
def test_up_handles_aborted_dependencies(self):
self.base_dir = 'tests/fixtures/abort-on-container-exit-dependencies'
proc = start_process(
self.base_dir,
['up', 'simple', '--attach-dependencies', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
proc.wait()
assert proc.returncode == 1
def test_up_with_force_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
@@ -1700,6 +1711,17 @@ services:
assert stderr == ""
assert stdout == "/\n"
@mock.patch.dict(os.environ)
def test_exec_novalue_var_dotenv_file(self):
os.environ['MYVAR'] = 'SUCCESS'
self.base_dir = 'tests/fixtures/exec-novalue-var'
self.dispatch(['up', '-d'])
assert len(self.project.containers()) == 1
stdout, stderr = self.dispatch(['exec', '-T', 'nginx', 'env'])
assert 'CHECK_VAR=SUCCESS' in stdout
assert not stderr
def test_exec_detach_long_form(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '--detach', 'console'])

View File

@@ -0,0 +1,10 @@
version: "2.0"
services:
simple:
image: busybox:1.31.0-uclibc
command: top
depends_on:
- another
another:
image: busybox:1.31.0-uclibc
command: ls /thecakeisalie

View File

@@ -1,9 +0,0 @@
version: '2.0'
services:
web:
image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
redis:
image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b

View File

@@ -0,0 +1,10 @@
version: "2.0"
services:
simple:
image: busybox:1.31.0-uclibc
command: echo simple
depends_on:
- another
another:
image: busybox:1.31.0-uclibc
command: echo another

View File

@@ -0,0 +1,6 @@
version: '3'
services:
nginx:
image: nginx
environment:
- CHECK_VAR=${MYVAR}

View File

@@ -1,6 +1,7 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import os
from compose.config.config import ConfigDetails
@@ -55,3 +56,17 @@ def create_host_file(client, filename):
content = fh.read()
return create_custom_host_file(client, filename, content)
@contextlib.contextmanager
def cd(path):
"""
A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)

View File

@@ -8,7 +8,6 @@ import random
import shutil
import tempfile
import py
import pytest
from docker.errors import APIError
from docker.errors import NotFound
@@ -16,6 +15,7 @@ from docker.errors import NotFound
from .. import mock
from ..helpers import build_config as load_config
from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from ..helpers import cd
from ..helpers import create_host_file
from .testcases import DockerClientTestCase
from .testcases import SWARM_SKIP_CONTAINERS_ALL
@@ -1329,9 +1329,9 @@ class ProjectTest(DockerClientTestCase):
})
details = config.ConfigDetails('.', [base_file, override_file])
tmpdir = py.test.ensuretemp('logging_test')
self.addCleanup(tmpdir.remove)
with tmpdir.as_cwd():
tmpdir = tempfile.mkdtemp('logging_test')
self.addCleanup(shutil.rmtree, tmpdir)
with cd(tmpdir):
config_data = config.load(details)
project = Project.from_config(
name='composetest', config_data=config_data, client=self.client

View File

@@ -6,8 +6,10 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import os
import shutil
import tempfile
import py
from docker.errors import ImageNotFound
from ..helpers import BUSYBOX_IMAGE_WITH_TAG
@@ -426,29 +428,32 @@ class ServiceStateTest(DockerClientTestCase):
@no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
def test_trigger_recreate_with_build(self):
context = py.test.ensuretemp('test_trigger_recreate_with_build')
self.addCleanup(context.remove)
context = tempfile.mkdtemp('test_trigger_recreate_with_build')
self.addCleanup(shutil.rmtree, context)
base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
dockerfile = context.join('Dockerfile')
dockerfile.write(base_image)
dockerfile = os.path.join(context, 'Dockerfile')
with open(dockerfile, mode="w") as dockerfile_fh:
dockerfile_fh.write(base_image)
web = self.create_service('web', build={'context': str(context)})
container = web.create_container()
dockerfile.write(base_image + 'CMD echo hello world\n')
with open(dockerfile, mode="w") as dockerfile_fh:
dockerfile_fh.write(base_image + 'CMD echo hello world\n')
web.build()
web = self.create_service('web', build={'context': str(context)})
assert ('recreate', [container]) == web.convergence_plan()
def test_image_changed_to_build(self):
context = py.test.ensuretemp('test_image_changed_to_build')
self.addCleanup(context.remove)
context.join('Dockerfile').write("""
FROM busybox
LABEL com.docker.compose.test_image=true
""")
context = tempfile.mkdtemp('test_image_changed_to_build')
self.addCleanup(shutil.rmtree, context)
with open(os.path.join(context, 'Dockerfile'), mode="w") as dockerfile:
dockerfile.write("""
FROM busybox
LABEL com.docker.compose.test_image=true
""")
web = self.create_service('web', image='busybox')
container = web.create_container()

View File

@@ -1,233 +0,0 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import docker
import pytest
from .. import mock
from compose import bundle
from compose import service
from compose.cli.errors import UserError
from compose.config.config import Config
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.service import NoSuchImageError
@pytest.fixture
def mock_service():
return mock.create_autospec(
service.Service,
client=mock.create_autospec(docker.APIClient),
options={})
def test_get_image_digest_exists(mock_service):
mock_service.options['image'] = 'abcd'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.get_image_digest(mock_service)
assert digest == 'digest1'
def test_get_image_digest_image_uses_digest(mock_service):
mock_service.options['image'] = image_id = 'redis@sha256:digest'
digest = bundle.get_image_digest(mock_service)
assert digest == image_id
assert not mock_service.image.called
def test_get_image_digest_from_repository(mock_service):
mock_service.options['image'] = 'abcd'
mock_service.image_name = 'abcd'
mock_service.image.side_effect = NoSuchImageError(None)
mock_service.get_image_registry_data.return_value = {'Descriptor': {'digest': 'digest'}}
digest = bundle.get_image_digest(mock_service)
assert digest == 'abcd@digest'
def test_get_image_digest_no_image(mock_service):
with pytest.raises(UserError) as exc:
bundle.get_image_digest(service.Service(name='theservice'))
assert "doesn't define an image tag" in exc.exconly()
def test_push_image_with_saved_digest(mock_service):
mock_service.options['build'] = '.'
mock_service.options['image'] = image_id = 'abcd'
mock_service.push.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': ['digest1']}
digest = bundle.push_image(mock_service)
assert digest == image_id + '@' + expected
mock_service.push.assert_called_once_with()
assert not mock_service.client.push.called
def test_push_image(mock_service):
mock_service.options['build'] = '.'
mock_service.options['image'] = image_id = 'abcd'
mock_service.push.return_value = expected = 'sha256:thedigest'
mock_service.image.return_value = {'RepoDigests': []}
digest = bundle.push_image(mock_service)
assert digest == image_id + '@' + expected
mock_service.push.assert_called_once_with()
mock_service.client.pull.assert_called_once_with(digest)
def test_to_bundle():
image_digests = {'a': 'aaaa', 'b': 'bbbb'}
services = [
{'name': 'a', 'build': '.', },
{'name': 'b', 'build': './b'},
]
config = Config(
version=V2_0,
services=services,
volumes={'special': {}},
networks={'extra': {}},
secrets={},
configs={}
)
with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
output = bundle.to_bundle(config, image_digests)
assert mock_log.mock_calls == [
mock.call("Unsupported top level key 'networks' - ignoring"),
mock.call("Unsupported top level key 'volumes' - ignoring"),
]
assert output == {
'Version': '0.1',
'Services': {
'a': {'Image': 'aaaa', 'Networks': ['default']},
'b': {'Image': 'bbbb', 'Networks': ['default']},
}
}
def test_convert_service_to_bundle():
name = 'theservice'
image_digest = 'thedigest'
service_dict = {
'ports': ['80'],
'expose': ['1234'],
'networks': {'extra': {}},
'command': 'foo',
'entrypoint': 'entry',
'environment': {'BAZ': 'ENV'},
'build': '.',
'working_dir': '/tmp',
'user': 'root',
'labels': {'FOO': 'LABEL'},
'privileged': True,
}
with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
mock_log.assert_called_once_with(
"Unsupported key 'privileged' in services.theservice - ignoring")
assert config == {
'Image': image_digest,
'Ports': [
{'Protocol': 'tcp', 'Port': 80},
{'Protocol': 'tcp', 'Port': 1234},
],
'Networks': ['extra'],
'Command': ['entry', 'foo'],
'Env': ['BAZ=ENV'],
'WorkingDir': '/tmp',
'User': 'root',
'Labels': {'FOO': 'LABEL'},
}
def test_set_command_and_args_none():
config = {}
bundle.set_command_and_args(config, [], [])
assert config == {}
def test_set_command_and_args_from_command():
config = {}
bundle.set_command_and_args(config, [], "echo ok")
assert config == {'Args': ['echo', 'ok']}
def test_set_command_and_args_from_entrypoint():
config = {}
bundle.set_command_and_args(config, "echo entry", [])
assert config == {'Command': ['echo', 'entry']}
def test_set_command_and_args_from_both():
config = {}
bundle.set_command_and_args(config, "echo entry", ["extra", "arg"])
assert config == {'Command': ['echo', 'entry', "extra", "arg"]}
def test_make_service_networks_default():
name = 'theservice'
service_dict = {}
with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
assert not mock_log.called
assert networks == ['default']
def test_make_service_networks():
name = 'theservice'
service_dict = {
'networks': {
'foo': {
'aliases': ['one', 'two'],
},
'bar': {}
},
}
with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
mock_log.assert_called_once_with(
"Unsupported key 'aliases' in services.theservice.networks.foo - ignoring")
assert sorted(networks) == sorted(service_dict['networks'])
def test_make_port_specs():
service_dict = {
'expose': ['80', '500/udp'],
'ports': [
'400:80',
'222',
'127.0.0.1:8001:8001',
'127.0.0.1:5000-5001:3000-3001'],
}
port_specs = bundle.make_port_specs(service_dict)
assert port_specs == [
{'Protocol': 'tcp', 'Port': 80},
{'Protocol': 'tcp', 'Port': 222},
{'Protocol': 'tcp', 'Port': 8001},
{'Protocol': 'tcp', 'Port': 3000},
{'Protocol': 'tcp', 'Port': 3001},
{'Protocol': 'udp', 'Port': 500},
]
def test_make_port_spec_with_protocol():
port_spec = bundle.make_port_spec("5000/udp")
assert port_spec == {'Protocol': 'udp', 'Port': 5000}
def test_make_port_spec_default_protocol():
port_spec = bundle.make_port_spec("50000")
assert port_spec == {'Protocol': 'tcp', 'Port': 50000}

View File

@@ -12,7 +12,7 @@ from compose.cli.formatter import ConsoleWarningFormatter
from compose.cli.main import build_one_off_container_options
from compose.cli.main import call_docker
from compose.cli.main import convergence_strategy_from_opts
from compose.cli.main import filter_containers_to_service_names
from compose.cli.main import filter_attached_containers
from compose.cli.main import get_docker_start_call
from compose.cli.main import setup_console_handler
from compose.cli.main import warn_for_swarm_mode
@@ -37,7 +37,7 @@ def logging_handler():
class TestCLIMainTestCase(object):
def test_filter_containers_to_service_names(self):
def test_filter_attached_containers(self):
containers = [
mock_container('web', 1),
mock_container('web', 2),
@@ -46,17 +46,29 @@ class TestCLIMainTestCase(object):
mock_container('another', 1),
]
service_names = ['web', 'db']
actual = filter_containers_to_service_names(containers, service_names)
actual = filter_attached_containers(containers, service_names)
assert actual == containers[:3]
def test_filter_containers_to_service_names_all(self):
def test_filter_attached_containers_with_dependencies(self):
containers = [
mock_container('web', 1),
mock_container('web', 2),
mock_container('db', 1),
mock_container('other', 1),
mock_container('another', 1),
]
service_names = ['web', 'db']
actual = filter_attached_containers(containers, service_names, attach_dependencies=True)
assert actual == containers
def test_filter_attached_containers_all(self):
containers = [
mock_container('web', 1),
mock_container('db', 1),
mock_container('other', 1),
]
service_names = []
actual = filter_containers_to_service_names(containers, service_names)
actual = filter_attached_containers(containers, service_names)
assert actual == containers
def test_warning_in_swarm_mode(self):

View File

@@ -10,12 +10,14 @@ import tempfile
from operator import itemgetter
from random import shuffle
import py
import pytest
import yaml
from ddt import data
from ddt import ddt
from ...helpers import build_config_details
from ...helpers import BUSYBOX_IMAGE_WITH_TAG
from ...helpers import cd
from compose.config import config
from compose.config import types
from compose.config.config import ConfigFile
@@ -68,6 +70,7 @@ def secret_sort(secrets):
return sorted(secrets, key=itemgetter('source'))
@ddt
class ConfigTest(unittest.TestCase):
def test_load(self):
@@ -777,13 +780,14 @@ class ConfigTest(unittest.TestCase):
})
details = config.ConfigDetails('.', [base_file, override_file])
tmpdir = py.test.ensuretemp('config_test')
self.addCleanup(tmpdir.remove)
tmpdir.join('common.yml').write("""
base:
labels: ['label=one']
""")
with tmpdir.as_cwd():
tmpdir = tempfile.mkdtemp('config_test')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'common.yml'), mode="w") as common_fh:
common_fh.write("""
base:
labels: ['label=one']
""")
with cd(tmpdir):
service_dicts = config.load(details).services
expected = [
@@ -812,19 +816,20 @@ class ConfigTest(unittest.TestCase):
}
)
tmpdir = pytest.ensuretemp('config_test')
self.addCleanup(tmpdir.remove)
tmpdir.join('base.yml').write("""
version: '2.2'
services:
base:
image: base
web:
extends: base
""")
tmpdir = tempfile.mkdtemp('config_test')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh:
base_fh.write("""
version: '2.2'
services:
base:
image: base
web:
extends: base
""")
details = config.ConfigDetails('.', [main_file])
with tmpdir.as_cwd():
with cd(tmpdir):
service_dicts = config.load(details).services
assert service_dicts[0] == {
'name': 'prodweb',
@@ -1762,22 +1767,23 @@ class ConfigTest(unittest.TestCase):
assert services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'] == 'none'
def test_load_yaml_with_yaml_error(self):
tmpdir = py.test.ensuretemp('invalid_yaml_test')
self.addCleanup(tmpdir.remove)
invalid_yaml_file = tmpdir.join('docker-compose.yml')
invalid_yaml_file.write("""
web:
this is bogus: ok: what
""")
tmpdir = tempfile.mkdtemp('invalid_yaml_test')
self.addCleanup(shutil.rmtree, tmpdir)
invalid_yaml_file = os.path.join(tmpdir, 'docker-compose.yml')
with open(invalid_yaml_file, mode="w") as invalid_yaml_file_fh:
invalid_yaml_file_fh.write("""
web:
this is bogus: ok: what
""")
with pytest.raises(ConfigurationError) as exc:
config.load_yaml(str(invalid_yaml_file))
assert 'line 3, column 32' in exc.exconly()
assert 'line 3, column 22' in exc.exconly()
def test_load_yaml_with_bom(self):
tmpdir = py.test.ensuretemp('bom_yaml')
self.addCleanup(tmpdir.remove)
bom_yaml = tmpdir.join('docker-compose.yml')
tmpdir = tempfile.mkdtemp('bom_yaml')
self.addCleanup(shutil.rmtree, tmpdir)
bom_yaml = os.path.join(tmpdir, 'docker-compose.yml')
with codecs.open(str(bom_yaml), 'w', encoding='utf-8') as f:
f.write('''\ufeff
version: '2.3'
@@ -1885,6 +1891,26 @@ class ConfigTest(unittest.TestCase):
}
]
@data(
'2 ',
'3.',
'3.0.0',
'3.0.a',
'3.a',
'3a')
def test_invalid_version_formats(self, version):
content = {
'version': version,
'services': {
'web': {
'image': 'alpine',
}
}
}
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details(content))
assert 'Version "{}" in "filename.yml" is invalid.'.format(version) in exc.exconly()
def test_group_add_option(self):
actual = config.load(build_config_details({
'version': '2',
@@ -3611,7 +3637,6 @@ class InterpolationTest(unittest.TestCase):
assert 'labels' in warn_message
assert 'endpoint_mode' in warn_message
assert 'update_config' in warn_message
assert 'placement' in warn_message
assert 'resources.reservations.cpus' in warn_message
assert 'restart_policy.delay' in warn_message
assert 'restart_policy.window' in warn_message
@@ -4701,43 +4726,48 @@ class ExtendsTest(unittest.TestCase):
@mock.patch.dict(os.environ)
def test_extends_with_environment_and_env_files(self):
tmpdir = py.test.ensuretemp('test_extends_with_environment')
self.addCleanup(tmpdir.remove)
commondir = tmpdir.mkdir('common')
commondir.join('base.yml').write("""
app:
image: 'example/app'
env_file:
- 'envs'
environment:
- SECRET
- TEST_ONE=common
- TEST_TWO=common
""")
tmpdir.join('docker-compose.yml').write("""
ext:
extends:
file: common/base.yml
service: app
env_file:
- 'envs'
environment:
- THING
- TEST_ONE=top
""")
commondir.join('envs').write("""
COMMON_ENV_FILE
TEST_ONE=common-env-file
TEST_TWO=common-env-file
TEST_THREE=common-env-file
TEST_FOUR=common-env-file
""")
tmpdir.join('envs').write("""
TOP_ENV_FILE
TEST_ONE=top-env-file
TEST_TWO=top-env-file
TEST_THREE=top-env-file
""")
tmpdir = tempfile.mkdtemp('test_extends_with_environment')
self.addCleanup(shutil.rmtree, tmpdir)
commondir = os.path.join(tmpdir, 'common')
os.mkdir(commondir)
with open(os.path.join(commondir, 'base.yml'), mode="w") as base_fh:
base_fh.write("""
app:
image: 'example/app'
env_file:
- 'envs'
environment:
- SECRET
- TEST_ONE=common
- TEST_TWO=common
""")
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
ext:
extends:
file: common/base.yml
service: app
env_file:
- 'envs'
environment:
- THING
- TEST_ONE=top
""")
with open(os.path.join(commondir, 'envs'), mode="w") as envs_fh:
envs_fh.write("""
COMMON_ENV_FILE
TEST_ONE=common-env-file
TEST_TWO=common-env-file
TEST_THREE=common-env-file
TEST_FOUR=common-env-file
""")
with open(os.path.join(tmpdir, 'envs'), mode="w") as envs_fh:
envs_fh.write("""
TOP_ENV_FILE
TEST_ONE=top-env-file
TEST_TWO=top-env-file
TEST_THREE=top-env-file
""")
expected = [
{
@@ -4760,72 +4790,77 @@ class ExtendsTest(unittest.TestCase):
os.environ['THING'] = 'thing'
os.environ['COMMON_ENV_FILE'] = 'secret'
os.environ['TOP_ENV_FILE'] = 'secret'
config = load_from_filename(str(tmpdir.join('docker-compose.yml')))
config = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert config == expected
def test_extends_with_mixed_versions_is_error(self):
tmpdir = py.test.ensuretemp('test_extends_with_mixed_version')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: "2"
services:
web:
extends:
file: base.yml
service: base
image: busybox
""")
tmpdir.join('base.yml').write("""
base:
volumes: ['/foo']
ports: ['3000:3000']
""")
with pytest.raises(ConfigurationError) as exc:
load_from_filename(str(tmpdir.join('docker-compose.yml')))
assert 'Version mismatch' in exc.exconly()
def test_extends_with_defined_version_passes(self):
tmpdir = py.test.ensuretemp('test_extends_with_defined_version')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: "2"
services:
web:
extends:
file: base.yml
service: base
image: busybox
""")
tmpdir.join('base.yml').write("""
version: "2"
services:
tmpdir = tempfile.mkdtemp('test_extends_with_mixed_version')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
version: "2"
services:
web:
extends:
file: base.yml
service: base
image: busybox
""")
with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh:
base_fh.write("""
base:
volumes: ['/foo']
ports: ['3000:3000']
command: top
""")
""")
service = load_from_filename(str(tmpdir.join('docker-compose.yml')))
with pytest.raises(ConfigurationError) as exc:
load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert 'Version mismatch' in exc.exconly()
def test_extends_with_defined_version_passes(self):
tmpdir = tempfile.mkdtemp('test_extends_with_defined_version')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
version: "2"
services:
web:
extends:
file: base.yml
service: base
image: busybox
""")
with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh:
base_fh.write("""
version: "2"
services:
base:
volumes: ['/foo']
ports: ['3000:3000']
command: top
""")
service = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert service[0]['command'] == "top"
def test_extends_with_depends_on(self):
tmpdir = py.test.ensuretemp('test_extends_with_depends_on')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: "2"
services:
base:
image: example
web:
extends: base
image: busybox
depends_on: ['other']
other:
image: example
""")
services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
tmpdir = tempfile.mkdtemp('test_extends_with_depends_on')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
version: "2"
services:
base:
image: example
web:
extends: base
image: busybox
depends_on: ['other']
other:
image: example
""")
services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert service_sort(services)[2]['depends_on'] == {
'other': {'condition': 'service_started'}
}
@@ -4844,45 +4879,47 @@ class ExtendsTest(unittest.TestCase):
}]
def test_extends_with_ports(self):
tmpdir = py.test.ensuretemp('test_extends_with_ports')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: '2'
tmpdir = tempfile.mkdtemp('test_extends_with_ports')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
version: '2'
services:
a:
image: nginx
ports:
- 80
services:
a:
image: nginx
ports:
- 80
b:
extends:
service: a
""")
services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
b:
extends:
service: a
""")
services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert len(services) == 2
for svc in services:
assert svc['ports'] == [types.ServicePort('80', None, None, None, None)]
def test_extends_with_security_opt(self):
tmpdir = py.test.ensuretemp('test_extends_with_ports')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: '2'
tmpdir = tempfile.mkdtemp('test_extends_with_ports')
self.addCleanup(shutil.rmtree, tmpdir)
with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh:
docker_compose_fh.write("""
version: '2'
services:
a:
image: nginx
security_opt:
- apparmor:unconfined
- seccomp:unconfined
services:
a:
image: nginx
security_opt:
- apparmor:unconfined
- seccomp:unconfined
b:
extends:
service: a
""")
services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
b:
extends:
service: a
""")
services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml')))
assert len(services) == 2
for svc in services:
assert types.SecurityOpt.parse('apparmor:unconfined') in svc['security_opt']
@@ -5037,7 +5074,7 @@ class HealthcheckTest(unittest.TestCase):
})
)
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['test']
assert serialized_service['healthcheck'] == {
@@ -5064,7 +5101,7 @@ class HealthcheckTest(unittest.TestCase):
})
)
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['test']
assert serialized_service['healthcheck'] == {
@@ -5271,7 +5308,7 @@ class SerializeTest(unittest.TestCase):
'secrets': secrets_dict
}))
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['web']
assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
assert 'secrets' in serialized_config
@@ -5286,7 +5323,7 @@ class SerializeTest(unittest.TestCase):
}
], volumes={}, networks={}, secrets={}, configs={})
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
assert '8080:80/tcp' in serialized_config['services']['web']['ports']
def test_serialize_ports_with_ext_ip(self):
@@ -5298,7 +5335,7 @@ class SerializeTest(unittest.TestCase):
}
], volumes={}, networks={}, secrets={}, configs={})
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
assert '127.0.0.1:8080:80/tcp' in serialized_config['services']['web']['ports']
def test_serialize_configs(self):
@@ -5326,7 +5363,7 @@ class SerializeTest(unittest.TestCase):
'configs': configs_dict
}))
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['web']
assert secret_sort(serialized_service['configs']) == secret_sort(service_dict['configs'])
assert 'configs' in serialized_config
@@ -5366,7 +5403,7 @@ class SerializeTest(unittest.TestCase):
}
config_dict = config.load(build_config_details(cfg))
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['web']
assert serialized_service['environment']['CURRENCY'] == '$$'
assert serialized_service['command'] == 'echo $$FOO'
@@ -5388,7 +5425,7 @@ class SerializeTest(unittest.TestCase):
}
config_dict = config.load(build_config_details(cfg), interpolate=False)
serialized_config = yaml.load(serialize_config(config_dict, escape_dollar=False))
serialized_config = yaml.safe_load(serialize_config(config_dict, escape_dollar=False))
serialized_service = serialized_config['services']['web']
assert serialized_service['environment']['CURRENCY'] == '$'
assert serialized_service['command'] == 'echo $FOO'
@@ -5407,7 +5444,7 @@ class SerializeTest(unittest.TestCase):
config_dict = config.load(build_config_details(cfg))
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_service = serialized_config['services']['web']
assert serialized_service['command'] == 'echo 十六夜 咲夜'
@@ -5423,6 +5460,6 @@ class SerializeTest(unittest.TestCase):
}
config_dict = config.load(build_config_details(cfg))
serialized_config = yaml.load(serialize_config(config_dict))
serialized_config = yaml.safe_load(serialize_config(config_dict))
serialized_volume = serialized_config['volumes']['test']
assert serialized_volume['external'] is False

View File

@@ -4,6 +4,9 @@ from __future__ import print_function
from __future__ import unicode_literals
import codecs
import os
import shutil
import tempfile
import pytest
@@ -46,19 +49,19 @@ class EnvironmentTest(unittest.TestCase):
assert env.get_boolean('UNDEFINED') is False
def test_env_vars_from_file_bom(self):
tmpdir = pytest.ensuretemp('env_file')
self.addCleanup(tmpdir.remove)
tmpdir = tempfile.mkdtemp('env_file')
self.addCleanup(shutil.rmtree, tmpdir)
with codecs.open('{}/bom.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
f.write('\ufeffPARK_BOM=박봄\n')
assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
assert env_vars_from_file(str(os.path.join(tmpdir, 'bom.env'))) == {
'PARK_BOM': '박봄'
}
def test_env_vars_from_file_whitespace(self):
tmpdir = pytest.ensuretemp('env_file')
self.addCleanup(tmpdir.remove)
tmpdir = tempfile.mkdtemp('env_file')
self.addCleanup(shutil.rmtree, tmpdir)
with codecs.open('{}/whitespace.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
f.write('WHITESPACE =yes\n')
with pytest.raises(ConfigurationError) as exc:
env_vars_from_file(str(tmpdir.join('whitespace.env')))
env_vars_from_file(str(os.path.join(tmpdir, 'whitespace.env')))
assert 'environment variable' in exc.exconly()