mirror of
https://github.com/docker/compose.git
synced 2026-02-10 10:39:23 +08:00
Compare commits
358 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ad10575d1 | ||
|
|
2887d82d16 | ||
|
|
2919bebea4 | ||
|
|
5478c966f1 | ||
|
|
e546533cfe | ||
|
|
abef11b2a6 | ||
|
|
802fa20228 | ||
|
|
fa34ee7362 | ||
|
|
a3a23bf949 | ||
|
|
cfc48f2c13 | ||
|
|
f8142a899c | ||
|
|
2e7493a889 | ||
|
|
4be2fa010a | ||
|
|
386bdda246 | ||
|
|
17bbbba7d6 | ||
|
|
1ca10f90fb | ||
|
|
452880af7c | ||
|
|
944660048d | ||
|
|
dbe4d7323e | ||
|
|
1678a4fbe4 | ||
|
|
4e83bafec6 | ||
|
|
8973a940e6 | ||
|
|
8835056ce4 | ||
|
|
3135a0a839 | ||
|
|
cdae06a89c | ||
|
|
79bf9ed652 | ||
|
|
29af1a84ca | ||
|
|
9375c15bad | ||
|
|
8ebb1a6f19 | ||
|
|
37be2ad9cd | ||
|
|
6fe35498a5 | ||
|
|
ce52f597a0 | ||
|
|
79f29dda23 | ||
|
|
7172849913 | ||
|
|
c24b7b6464 | ||
|
|
74f892de95 | ||
|
|
09acc5febf | ||
|
|
1f16a7929d | ||
|
|
f9113202e8 | ||
|
|
5f2161cad9 | ||
|
|
70f8e38b1d | ||
|
|
186aa6e5c3 | ||
|
|
bc57a1bd54 | ||
|
|
eca358e2f0 | ||
|
|
32ac6edb86 | ||
|
|
475f8199f7 | ||
|
|
98d7cc8d0c | ||
|
|
d7c7e21921 | ||
|
|
70ead597d2 | ||
|
|
b9092cacdb | ||
|
|
1566930a70 | ||
|
|
a5fbf91b72 | ||
|
|
ecf03fe280 | ||
|
|
47d170b06a | ||
|
|
9973f051ba | ||
|
|
2199278b44 | ||
|
|
5add9192ac | ||
|
|
0c6fce271e | ||
|
|
9d7ad3bac1 | ||
|
|
719a1b0581 | ||
|
|
bbdb3cab88 | ||
|
|
ee8ca5d6f8 | ||
|
|
15e8edca3c | ||
|
|
81e223d499 | ||
|
|
862a13b8f3 | ||
|
|
cacbcccc0c | ||
|
|
672ced8742 | ||
|
|
4cfa622de8 | ||
|
|
525bc9ef7a | ||
|
|
60dcf87cc0 | ||
|
|
cf3c07d6ee | ||
|
|
b03889ac2a | ||
|
|
66856e884c | ||
|
|
7a7c9ff67a | ||
|
|
413e5db7b3 | ||
|
|
69c0683bfe | ||
|
|
088a798e7a | ||
|
|
35eb40424c | ||
|
|
99464d9c2b | ||
|
|
cd8e2f870f | ||
|
|
c641ea08ae | ||
|
|
d285ba6aee | ||
|
|
cd098e0cad | ||
|
|
d212fe68a6 | ||
|
|
c8279bc4db | ||
|
|
61aa2e346e | ||
|
|
98932e9cb4 | ||
|
|
59491c7d77 | ||
|
|
75d41edb94 | ||
|
|
f9099c91ae | ||
|
|
1b326fce57 | ||
|
|
ca721728f6 | ||
|
|
2e31ebba6a | ||
|
|
993bada521 | ||
|
|
b0e7d801a3 | ||
|
|
7258edb75d | ||
|
|
f9d1075a5d | ||
|
|
a1c9d4925a | ||
|
|
3d80c8e86d | ||
|
|
0bfa1c34f0 | ||
|
|
57a2bb0c50 | ||
|
|
3d693f3733 | ||
|
|
ce5451c5b4 | ||
|
|
df2e833cf0 | ||
|
|
cacc9752a3 | ||
|
|
cf419dce4c | ||
|
|
8c387c6013 | ||
|
|
57055e0e66 | ||
|
|
c37fb783fe | ||
|
|
b29b6a1538 | ||
|
|
d68113f5c0 | ||
|
|
26e1a2dd31 | ||
|
|
1f55b533c4 | ||
|
|
fb4d5aa7e6 | ||
|
|
e806520dc3 | ||
|
|
a2516c48d9 | ||
|
|
79639af394 | ||
|
|
2d2b0bd9a8 | ||
|
|
9d2508cf58 | ||
|
|
e1baa90f6b | ||
|
|
c15e8af7f8 | ||
|
|
4218b46c78 | ||
|
|
81258f59db | ||
|
|
e4b4babc24 | ||
|
|
f764faa841 | ||
|
|
a857be3f7e | ||
|
|
a89128118b | ||
|
|
263d18ce93 | ||
|
|
51ee6093df | ||
|
|
9de6ec3700 | ||
|
|
99e67d0c06 | ||
|
|
75d5eb0108 | ||
|
|
8a9575bd0d | ||
|
|
b612361541 | ||
|
|
c2783d6f88 | ||
|
|
a5b13f369d | ||
|
|
3a47000e71 | ||
|
|
482bca9519 | ||
|
|
79557e3d3a | ||
|
|
f2dc923084 | ||
|
|
8a89d94e15 | ||
|
|
e047169315 | ||
|
|
2b24eb693c | ||
|
|
c217bab7f6 | ||
|
|
5265f63c34 | ||
|
|
9e3d9f6681 | ||
|
|
41c8df39fe | ||
|
|
ef10c1803f | ||
|
|
ada945c5cd | ||
|
|
ac148bc1ca | ||
|
|
e84ffb6aeb | ||
|
|
8a339946fa | ||
|
|
b2723d6b3d | ||
|
|
6ccbb56fec | ||
|
|
c6dd7da15e | ||
|
|
154d7c1722 | ||
|
|
fc757fb4f5 | ||
|
|
2948c396a6 | ||
|
|
15f8c30a51 | ||
|
|
cd1fcd3ea5 | ||
|
|
1e4fde8aa7 | ||
|
|
a27448bdab | ||
|
|
dc712bfa23 | ||
|
|
3b846ac8de | ||
|
|
0863785e96 | ||
|
|
c6c57fcf49 | ||
|
|
733b827f85 | ||
|
|
853215acf6 | ||
|
|
87935893fc | ||
|
|
aa79fb2473 | ||
|
|
76d0406fab | ||
|
|
a1f3cb6d89 | ||
|
|
7bf9963cd6 | ||
|
|
d8e390eb9f | ||
|
|
3f1d41a97e | ||
|
|
087bef4f95 | ||
|
|
0b039202ac | ||
|
|
40b0ce3e5d | ||
|
|
42c965935f | ||
|
|
615c01c50a | ||
|
|
e34d329227 | ||
|
|
1f97a572fe | ||
|
|
b09d8802ed | ||
|
|
572032fc0b | ||
|
|
133df63108 | ||
|
|
dbc229dc37 | ||
|
|
bb0bd3b26b | ||
|
|
a734371e7f | ||
|
|
768c788da9 | ||
|
|
aee88e21bf | ||
|
|
a35aef4953 | ||
|
|
fbbf78d3da | ||
|
|
a65b3cd758 | ||
|
|
4813689c9e | ||
|
|
436a343a18 | ||
|
|
d9ffec4002 | ||
|
|
3cddd1b670 | ||
|
|
c8a621b637 | ||
|
|
f472fd545b | ||
|
|
f1f0894c1b | ||
|
|
b572b32999 | ||
|
|
8ad4c08109 | ||
|
|
c27132afad | ||
|
|
9de1f569f3 | ||
|
|
698ea33b15 | ||
|
|
f158fb03e7 | ||
|
|
8f5f7e72be | ||
|
|
718346f103 | ||
|
|
ae0f3c74a0 | ||
|
|
e40eaa5df6 | ||
|
|
0c20fc5d91 | ||
|
|
d5d49a8e29 | ||
|
|
14a1a0c020 | ||
|
|
6933435004 | ||
|
|
cf96fcb4af | ||
|
|
bcccac69fa | ||
|
|
2ec7615ed6 | ||
|
|
2ed171cae9 | ||
|
|
325637d9d5 | ||
|
|
bab8b3985e | ||
|
|
532d00fede | ||
|
|
ab0a0d69d9 | ||
|
|
56fbd22825 | ||
|
|
8419a670ae | ||
|
|
4bd93b95a9 | ||
|
|
47ff8d710c | ||
|
|
d980d170a6 | ||
|
|
f9061720b5 | ||
|
|
01eb4b6250 | ||
|
|
f4ed9b2ef5 | ||
|
|
b7374b6271 | ||
|
|
6b3855335e | ||
|
|
6e697c3b97 | ||
|
|
fee5261014 | ||
|
|
0612d973c7 | ||
|
|
0323920957 | ||
|
|
5232100331 | ||
|
|
8b293d486e | ||
|
|
a2bcf52665 | ||
|
|
afc161a0b1 | ||
|
|
14e7a11b3c | ||
|
|
c139455fce | ||
|
|
d3933cd34a | ||
|
|
5b2092688a | ||
|
|
64633a81cc | ||
|
|
fc3df83d39 | ||
|
|
0fc3b51b50 | ||
|
|
7b82b2e8c7 | ||
|
|
cfa5d02b52 | ||
|
|
dd240787c2 | ||
|
|
d563a66405 | ||
|
|
b0c10cb876 | ||
|
|
dd927e0fdd | ||
|
|
1110ad0108 | ||
|
|
f266e3459d | ||
|
|
bffb6094da | ||
|
|
66ed9b492e | ||
|
|
07e2717bee | ||
|
|
dce70a5566 | ||
|
|
4682e766a3 | ||
|
|
8a0090c18c | ||
|
|
a7894ddfea | ||
|
|
516eae0f5a | ||
|
|
4bc1cbc32a | ||
|
|
d9e05f262f | ||
|
|
d1bf27e73a | ||
|
|
b8b6199958 | ||
|
|
dbe3a6e9a9 | ||
|
|
61bb1ea484 | ||
|
|
eedbb28d5e | ||
|
|
2e20097f56 | ||
|
|
10864ba687 | ||
|
|
6421ae5ea3 | ||
|
|
6ea20e43f6 | ||
|
|
ccc777831c | ||
|
|
2975b5a279 | ||
|
|
e7f82d2989 | ||
|
|
6559af7660 | ||
|
|
c32bc095f3 | ||
|
|
1affc55b17 | ||
|
|
e86e10fb6b | ||
|
|
e0e06a4b56 | ||
|
|
05efe52ccd | ||
|
|
ba1e0311a7 | ||
|
|
8edb0d872d | ||
|
|
d5eb209be0 | ||
|
|
f009de025c | ||
|
|
5b02922455 | ||
|
|
2b604c1e8b | ||
|
|
db819bf0b2 | ||
|
|
afa5d93c90 | ||
|
|
fb8cd7d813 | ||
|
|
8f4d56a648 | ||
|
|
9b12f489aa | ||
|
|
03bdd67eb5 | ||
|
|
69fe42027a | ||
|
|
7925f8cfa8 | ||
|
|
147a8e9ab8 | ||
|
|
91182ccb34 | ||
|
|
9194b8783e | ||
|
|
fd83791d55 | ||
|
|
f0264e1991 | ||
|
|
e008db5c97 | ||
|
|
4368b8ac05 | ||
|
|
2f5d5fc93f | ||
|
|
98bb68e404 | ||
|
|
de8717cd07 | ||
|
|
ca8ab06571 | ||
|
|
956434504c | ||
|
|
7712d19b32 | ||
|
|
b1adcfb7e3 | ||
|
|
5017b25f14 | ||
|
|
12ed765af8 | ||
|
|
62057d098f | ||
|
|
fdb7a16212 | ||
|
|
5b869b1ad5 | ||
|
|
4cb92294a3 | ||
|
|
9df0a4f3a9 | ||
|
|
3844ff2fde | ||
|
|
d82190025a | ||
|
|
013cb51582 | ||
|
|
402060e419 | ||
|
|
bd67b90869 | ||
|
|
297bee897b | ||
|
|
be324d57a2 | ||
|
|
c7c5b5e8c4 | ||
|
|
9018511750 | ||
|
|
7107431ae0 | ||
|
|
21a51bcd60 | ||
|
|
5629f62644 | ||
|
|
756eae0f01 | ||
|
|
6a35663781 | ||
|
|
9d7202d122 | ||
|
|
e3e93d40a8 | ||
|
|
feccc03e4a | ||
|
|
b21a06cd6f | ||
|
|
7b02f4c3a7 | ||
|
|
cc595a65f0 | ||
|
|
25e419c763 | ||
|
|
abf67565f6 | ||
|
|
7208a50bdc | ||
|
|
8493540a1c | ||
|
|
15089886c2 | ||
|
|
48a6f2132b | ||
|
|
467d910959 | ||
|
|
5b9b519e8a | ||
|
|
b29ffb49e9 | ||
|
|
c17274d014 | ||
|
|
772a307192 | ||
|
|
bf46a6cc60 | ||
|
|
39b0518850 | ||
|
|
de1958c5ff | ||
|
|
bbcfce4029 | ||
|
|
879f7cb1ed | ||
|
|
54c3136e34 | ||
|
|
cc2462e6f4 | ||
|
|
6194d78813 | ||
|
|
4b4c250638 |
@@ -10,10 +10,10 @@ jobs:
|
||||
command: ./script/setup/osx
|
||||
- run:
|
||||
name: install tox
|
||||
command: sudo pip install --upgrade tox==2.1.1
|
||||
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
|
||||
- run:
|
||||
name: unit tests
|
||||
command: tox -e py27,py36,py37 -- tests/unit
|
||||
command: tox -e py27,py37 -- tests/unit
|
||||
|
||||
build-osx-binary:
|
||||
macos:
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
- checkout
|
||||
- run:
|
||||
name: upgrade python tools
|
||||
command: sudo pip install --upgrade pip virtualenv
|
||||
command: sudo pip install --upgrade pip virtualenv==16.2.0
|
||||
- run:
|
||||
name: setup script
|
||||
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||
|
||||
14
.fossa.yml
Normal file
14
.fossa.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli)
|
||||
# Visit https://fossa.io to learn more
|
||||
|
||||
version: 2
|
||||
cli:
|
||||
server: https://app.fossa.io
|
||||
fetcher: custom
|
||||
project: git@github.com:docker/compose
|
||||
analyze:
|
||||
modules:
|
||||
- name: .
|
||||
type: pip
|
||||
target: .
|
||||
path: .
|
||||
63
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
63
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug encountered while using docker-compose
|
||||
title: ''
|
||||
labels: kind/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
3. When making a bug report, make sure you provide all required information. The easier it is for
|
||||
maintainers to reproduce, the faster it'll be fixed.
|
||||
-->
|
||||
|
||||
## Description of the issue
|
||||
|
||||
## Context information (for bug reports)
|
||||
|
||||
**Output of `docker-compose version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker version`**
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
**Output of `docker-compose config`**
|
||||
(Make sure to add the relevant `-f` and other flags)
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
|
||||
## Steps to reproduce the issue
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Observed result
|
||||
|
||||
### Expected result
|
||||
|
||||
### Stacktrace / full error message
|
||||
|
||||
```
|
||||
(paste here)
|
||||
```
|
||||
|
||||
## Additional information
|
||||
|
||||
OS version / distribution, `docker-compose` install method, etc.
|
||||
32
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea to improve Compose
|
||||
title: ''
|
||||
labels: kind/feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
|
||||
|
||||
1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
|
||||
- For questions and general support, use https://forums.docker.com
|
||||
- For documentation issues, use https://github.com/docker/docker.github.io
|
||||
- For issues with the `docker stack` commands and the version 3 of the Compose file, use
|
||||
https://github.com/docker/cli
|
||||
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
|
||||
the original discussion.
|
||||
-->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
12
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/question-about-using-compose.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: Question about using Compose
|
||||
about: This is not the appropriate channel
|
||||
title: ''
|
||||
labels: kind/question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Please post on our forums: https://forums.docker.com for questions about using `docker-compose`.
|
||||
|
||||
Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker.
|
||||
59
.github/stale.yml
vendored
Normal file
59
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 180
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 7
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- kind/feature
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: true
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
unmarkComment: >
|
||||
This issue has been automatically marked as not stale anymore due to the recent activity.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
closeComment: >
|
||||
This issue has been automatically closed because it had not recent activity during the stale period.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
@@ -14,7 +14,7 @@
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: git://github.com/asottile/reorder_python_imports
|
||||
sha: v0.3.5
|
||||
sha: v1.3.4
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
language_version: 'python2.7'
|
||||
|
||||
76
CHANGELOG.md
76
CHANGELOG.md
@@ -1,6 +1,82 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.24.0 (2019-03-28)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Added support for connecting to the Docker Engine using the `ssh` protocol.
|
||||
|
||||
- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
|
||||
in the command's output.
|
||||
|
||||
- Add bash completion for `ps --all|-a`
|
||||
|
||||
- Support for credential_spec
|
||||
|
||||
- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed a bug where some valid credential helpers weren't properly handled by Compose
|
||||
when attempting to pull images from private registries.
|
||||
|
||||
- Fixed an issue where the output of `docker-compose start` before containers were created
|
||||
was misleading
|
||||
|
||||
- To match the Docker CLI behavior and to avoid confusing issues, Compose will no longer
|
||||
accept whitespace in variable names sourced from environment files.
|
||||
|
||||
- Compose will now report a configuration error if a service attempts to declare
|
||||
duplicate mount points in the volumes section.
|
||||
|
||||
- Fixed an issue with the containerized version of Compose that prevented users from
|
||||
writing to stdin during interactive sessions started by `run` or `exec`.
|
||||
|
||||
- One-off containers started by `run` no longer adopt the restart policy of the service,
|
||||
and are instead set to never restart.
|
||||
|
||||
- Fixed an issue that caused some container events to not appear in the output of
|
||||
the `docker-compose events` command.
|
||||
|
||||
- Missing images will no longer stop the execution of `docker-compose down` commands
|
||||
(a warning will be displayed instead).
|
||||
|
||||
- Force `virtualenv` version for macOS CI
|
||||
|
||||
- Fix merging of compose files when network has `None` config
|
||||
|
||||
- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
|
||||
|
||||
- Bump `docker-py` version to `3.7.2` to fix SSH and proxy config issues
|
||||
|
||||
- Fix release script and some typos on release documentation
|
||||
|
||||
1.23.2 (2018-11-28)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Reverted a 1.23.0 change that appended random strings to container names
|
||||
created by `docker-compose up`, causing addressability issues.
|
||||
Note: Containers created by `docker-compose run` will continue to use
|
||||
randomly generated names to avoid collisions during parallel runs.
|
||||
|
||||
- Fixed an issue where some `dockerfile` paths would fail unexpectedly when
|
||||
attempting to build on Windows.
|
||||
|
||||
- Fixed a bug where build context URLs would fail to build on Windows.
|
||||
|
||||
- Fixed a bug that caused `run` and `exec` commands to fail for some otherwise
|
||||
accepted values of the `--host` parameter.
|
||||
|
||||
- Fixed an issue where overrides for the `storage_opt` and `isolation` keys in
|
||||
service definitions weren't properly applied.
|
||||
|
||||
- Fixed a bug where some invalid Compose files would raise an uncaught
|
||||
exception during validation.
|
||||
|
||||
1.23.1 (2018-11-01)
|
||||
-------------------
|
||||
|
||||
|
||||
91
Dockerfile
91
Dockerfile
@@ -1,33 +1,74 @@
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM python:3.6
|
||||
ARG DOCKER_VERSION=18.09.7
|
||||
ARG PYTHON_VERSION=3.7.4
|
||||
ARG BUILD_ALPINE_VERSION=3.10
|
||||
ARG BUILD_DEBIAN_VERSION=slim-stretch
|
||||
ARG RUNTIME_ALPINE_VERSION=3.10.1
|
||||
ARG RUNTIME_DEBIAN_VERSION=stretch-20190812-slim
|
||||
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
python-dev \
|
||||
git
|
||||
ARG BUILD_PLATFORM=alpine
|
||||
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
FROM docker:${DOCKER_VERSION} AS docker-cli
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
ENV LANG en_US.UTF-8
|
||||
FROM python:${PYTHON_VERSION}-alpine${BUILD_ALPINE_VERSION} AS build-alpine
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
build-base \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libgcc \
|
||||
make \
|
||||
musl-dev \
|
||||
openssl \
|
||||
openssl-dev \
|
||||
python2 \
|
||||
python2-dev \
|
||||
zlib-dev
|
||||
ENV BUILD_BOOTLOADER=1
|
||||
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
FROM python:${PYTHON_VERSION}-${BUILD_DEBIAN_VERSION} AS build-debian
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libgcc-6-dev \
|
||||
libssl-dev \
|
||||
make \
|
||||
openssl \
|
||||
python2.7-dev \
|
||||
zlib1g-dev
|
||||
|
||||
FROM build-${BUILD_PLATFORM} AS build
|
||||
COPY docker-compose-entrypoint.sh /usr/local/bin/
|
||||
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
|
||||
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
WORKDIR /code/
|
||||
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
|
||||
RUN pip install virtualenv==16.2.0
|
||||
RUN pip install tox==2.9.1
|
||||
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
ADD requirements-dev.txt /code/
|
||||
ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
COPY requirements.txt .
|
||||
COPY requirements-dev.txt .
|
||||
COPY .pre-commit-config.yaml .
|
||||
COPY tox.ini .
|
||||
COPY setup.py .
|
||||
COPY README.md .
|
||||
COPY compose compose/
|
||||
RUN tox --notest
|
||||
COPY . .
|
||||
ARG GIT_COMMIT=unknown
|
||||
ENV DOCKER_COMPOSE_GITSHA=$GIT_COMMIT
|
||||
RUN script/build/linux-entrypoint
|
||||
|
||||
ADD . /code/
|
||||
RUN chown -R user /code/
|
||||
|
||||
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||
FROM alpine:${RUNTIME_ALPINE_VERSION} AS runtime-alpine
|
||||
FROM debian:${RUNTIME_DEBIAN_VERSION} AS runtime-debian
|
||||
FROM runtime-${BUILD_PLATFORM} AS runtime
|
||||
COPY docker-compose-entrypoint.sh /usr/local/bin/
|
||||
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
|
||||
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
COPY --from=build /usr/local/bin/docker-compose /usr/local/bin/docker-compose
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
FROM python:3.6
|
||||
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
curl \
|
||||
python-dev \
|
||||
git
|
||||
|
||||
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
|
||||
SHA256=f8de6378dad825b9fd5c3c2f949e791d22f918623c27a72c84fd6975a0e5d0a2; \
|
||||
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
|
||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||
mv docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm dockerbins.tgz
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
ADD requirements-dev.txt /code/
|
||||
ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
RUN tox --notest
|
||||
|
||||
ADD . /code/
|
||||
RUN chown -R user /code/
|
||||
|
||||
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||
@@ -1,19 +0,0 @@
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM alpine:3.8
|
||||
|
||||
ENV GLIBC 2.28-r0
|
||||
|
||||
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
|
||||
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
|
||||
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
|
||||
apk add --no-cache glibc-$GLIBC.apk && \
|
||||
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
|
||||
ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
|
||||
ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
|
||||
rm /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
|
||||
apk del curl
|
||||
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
|
||||
|
||||
ENTRYPOINT ["docker-compose"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM s390x/alpine:3.6
|
||||
FROM s390x/alpine:3.10.1
|
||||
|
||||
ARG COMPOSE_VERSION=1.16.1
|
||||
|
||||
|
||||
60
Jenkinsfile
vendored
60
Jenkinsfile
vendored
@@ -1,29 +1,38 @@
|
||||
#!groovy
|
||||
|
||||
def image
|
||||
|
||||
def buildImage = { ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("build image") {
|
||||
def buildImage = { String baseImage ->
|
||||
def image
|
||||
wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
|
||||
stage("build image for \"${baseImage}\"") {
|
||||
checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${gitCommit()}"
|
||||
def imageName = "dockerbuildbot/compose:${baseImage}-${gitCommit()}"
|
||||
image = docker.image(imageName)
|
||||
try {
|
||||
image.pull()
|
||||
} catch (Exception exc) {
|
||||
image = docker.build(imageName, ".")
|
||||
image.push()
|
||||
sh """GIT_COMMIT=\$(script/build/write-git-sha) && \\
|
||||
docker build -t ${imageName} \\
|
||||
--target build \\
|
||||
--build-arg BUILD_PLATFORM="${baseImage}" \\
|
||||
--build-arg GIT_COMMIT="${GIT_COMMIT}" \\
|
||||
.\\
|
||||
"""
|
||||
sh "docker push ${imageName}"
|
||||
echo "${imageName}"
|
||||
return imageName
|
||||
}
|
||||
}
|
||||
}
|
||||
echo "image.id: ${image.id}"
|
||||
return image.id
|
||||
}
|
||||
|
||||
def get_versions = { int number ->
|
||||
def get_versions = { String imageId, int number ->
|
||||
def docker_versions
|
||||
wrappedNode(label: "ubuntu && !zfs") {
|
||||
wrappedNode(label: "ubuntu && amd64 && !zfs") {
|
||||
def result = sh(script: """docker run --rm \\
|
||||
--entrypoint=/code/.tox/py27/bin/python \\
|
||||
${image.id} \\
|
||||
--entrypoint=/code/.tox/py37/bin/python \\
|
||||
${imageId} \\
|
||||
/code/script/test/versions.py -n ${number} docker/docker-ce recent
|
||||
""", returnStdout: true
|
||||
)
|
||||
@@ -35,17 +44,19 @@ def get_versions = { int number ->
|
||||
def runTests = { Map settings ->
|
||||
def dockerVersions = settings.get("dockerVersions", null)
|
||||
def pythonVersions = settings.get("pythonVersions", null)
|
||||
def baseImage = settings.get("baseImage", null)
|
||||
def imageName = settings.get("image", null)
|
||||
|
||||
if (!pythonVersions) {
|
||||
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py36')`")
|
||||
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py37')`")
|
||||
}
|
||||
if (!dockerVersions) {
|
||||
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
|
||||
}
|
||||
|
||||
{ ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("test python=${pythonVersions} / docker=${dockerVersions}") {
|
||||
wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
|
||||
stage("test python=${pythonVersions} / docker=${dockerVersions} / baseImage=${baseImage}") {
|
||||
checkout(scm)
|
||||
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
|
||||
echo "Using local system's storage driver: ${storageDriver}"
|
||||
@@ -55,13 +66,13 @@ def runTests = { Map settings ->
|
||||
--privileged \\
|
||||
--volume="\$(pwd)/.git:/code/.git" \\
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
|
||||
-e "TAG=${image.id}" \\
|
||||
-e "TAG=${imageName}" \\
|
||||
-e "STORAGE_DRIVER=${storageDriver}" \\
|
||||
-e "DOCKER_VERSIONS=${dockerVersions}" \\
|
||||
-e "BUILD_NUMBER=\$BUILD_TAG" \\
|
||||
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
|
||||
--entrypoint="script/test/ci" \\
|
||||
${image.id} \\
|
||||
${imageName} \\
|
||||
--verbose
|
||||
"""
|
||||
}
|
||||
@@ -69,16 +80,13 @@ def runTests = { Map settings ->
|
||||
}
|
||||
}
|
||||
|
||||
buildImage()
|
||||
|
||||
def testMatrix = [failFast: true]
|
||||
def docker_versions = get_versions(2)
|
||||
|
||||
for (int i = 0; i < docker_versions.length; i++) {
|
||||
def dockerVersion = docker_versions[i]
|
||||
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
|
||||
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
|
||||
testMatrix["${dockerVersion}_py37"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py37"])
|
||||
def baseImages = ['alpine', 'debian']
|
||||
baseImages.each { baseImage ->
|
||||
def imageName = buildImage(baseImage)
|
||||
get_versions(imageName, 2).each { dockerVersion ->
|
||||
testMatrix["${baseImage}_${dockerVersion}"] = runTests([baseImage: baseImage, image: imageName, dockerVersions: dockerVersion, pythonVersions: 'py37'])
|
||||
}
|
||||
}
|
||||
|
||||
parallel(testMatrix)
|
||||
|
||||
23
MAINTAINERS
23
MAINTAINERS
@@ -11,9 +11,8 @@
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"mefyl",
|
||||
"mnottale",
|
||||
"shin-",
|
||||
"rumpl",
|
||||
"ulyssessouza",
|
||||
]
|
||||
[Org.Alumni]
|
||||
people = [
|
||||
@@ -34,6 +33,10 @@
|
||||
# including muti-file support, variable interpolation, secrets
|
||||
# emulation and many more
|
||||
"dnephin",
|
||||
|
||||
"shin-",
|
||||
"mefyl",
|
||||
"mnottale",
|
||||
]
|
||||
|
||||
[people]
|
||||
@@ -74,7 +77,17 @@
|
||||
Email = "mazz@houseofmnowster.com"
|
||||
GitHub = "mnowster"
|
||||
|
||||
[People.shin-]
|
||||
[people.rumpl]
|
||||
Name = "Djordje Lukic"
|
||||
Email = "djordje.lukic@docker.com"
|
||||
GitHub = "rumpl"
|
||||
|
||||
[people.shin-]
|
||||
Name = "Joffrey F"
|
||||
Email = "joffrey@docker.com"
|
||||
Email = "f.joffrey@gmail.com"
|
||||
GitHub = "shin-"
|
||||
|
||||
[people.ulyssessouza]
|
||||
Name = "Ulysses Domiciano Souza"
|
||||
Email = "ulysses.souza@docker.com"
|
||||
GitHub = "ulyssessouza"
|
||||
|
||||
@@ -4,8 +4,7 @@ include requirements.txt
|
||||
include requirements-dev.txt
|
||||
include tox.ini
|
||||
include *.md
|
||||
exclude README.md
|
||||
include README.rst
|
||||
include README.md
|
||||
include compose/config/*.json
|
||||
include compose/GITSHA
|
||||
recursive-include contrib/completion *
|
||||
|
||||
11
README.md
11
README.md
@@ -6,11 +6,11 @@ Compose is a tool for defining and running multi-container Docker applications.
|
||||
With Compose, you use a Compose file to configure your application's services.
|
||||
Then, using a single command, you create and start all the services
|
||||
from your configuration. To learn more about all the features of Compose
|
||||
see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#features).
|
||||
see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/index.md#features).
|
||||
|
||||
Compose is great for development, testing, and staging environments, as well as
|
||||
CI workflows. You can learn more about each case in
|
||||
[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#common-use-cases).
|
||||
[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/index.md#common-use-cases).
|
||||
|
||||
Using Compose is basically a three-step process.
|
||||
|
||||
@@ -35,7 +35,7 @@ A `docker-compose.yml` looks like this:
|
||||
image: redis
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
|
||||
[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md).
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
@@ -48,9 +48,8 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
|
||||
- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose)
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
|
||||
- Code repository for Compose is on [GitHub](https://github.com/docker/compose).
|
||||
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new/choose). Thank you!
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
@@ -2,15 +2,15 @@
|
||||
version: '{branch}-{build}'
|
||||
|
||||
install:
|
||||
- "SET PATH=C:\\Python36-x64;C:\\Python36-x64\\Scripts;%PATH%"
|
||||
- "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
|
||||
- "python --version"
|
||||
- "pip install tox==2.9.1 virtualenv==15.1.0"
|
||||
- "pip install tox==2.9.1 virtualenv==16.2.0"
|
||||
|
||||
# Build the binary after tests
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- "tox -e py27,py36,py37 -- tests/unit"
|
||||
- "tox -e py27,py37 -- tests/unit"
|
||||
- ps: ".\\script\\build\\windows.ps1"
|
||||
|
||||
artifacts:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.23.1'
|
||||
__version__ = '1.25.0dev'
|
||||
|
||||
@@ -95,19 +95,10 @@ def get_image_digest(service, allow_push=False):
|
||||
if separator == '@':
|
||||
return service.options['image']
|
||||
|
||||
try:
|
||||
image = service.image()
|
||||
except NoSuchImageError:
|
||||
action = 'build' if 'build' in service.options else 'pull'
|
||||
raise UserError(
|
||||
"Image not found for service '{service}'. "
|
||||
"You might need to run `docker-compose {action} {service}`."
|
||||
.format(service=service.name, action=action))
|
||||
digest = get_digest(service)
|
||||
|
||||
if image['RepoDigests']:
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
return image['RepoDigests'][0]
|
||||
if digest:
|
||||
return digest
|
||||
|
||||
if 'build' not in service.options:
|
||||
raise NeedsPull(service.image_name, service.name)
|
||||
@@ -118,6 +109,32 @@ def get_image_digest(service, allow_push=False):
|
||||
return push_image(service)
|
||||
|
||||
|
||||
def get_digest(service):
|
||||
digest = None
|
||||
try:
|
||||
image = service.image()
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
if image['RepoDigests']:
|
||||
digest = image['RepoDigests'][0]
|
||||
except NoSuchImageError:
|
||||
try:
|
||||
# Fetch the image digest from the registry
|
||||
distribution = service.get_image_registry_data()
|
||||
|
||||
if distribution['Descriptor']['digest']:
|
||||
digest = '{image_name}@{digest}'.format(
|
||||
image_name=service.image_name,
|
||||
digest=distribution['Descriptor']['digest']
|
||||
)
|
||||
except NoSuchImageError:
|
||||
raise UserError(
|
||||
"Digest not found for service '{service}'. "
|
||||
"Repository does not exist or may require 'docker login'"
|
||||
.format(service=service.name))
|
||||
return digest
|
||||
|
||||
|
||||
def push_image(service):
|
||||
try:
|
||||
digest = service.push()
|
||||
@@ -147,10 +164,10 @@ def push_image(service):
|
||||
|
||||
def to_bundle(config, image_digests):
|
||||
if config.networks:
|
||||
log.warn("Unsupported top level key 'networks' - ignoring")
|
||||
log.warning("Unsupported top level key 'networks' - ignoring")
|
||||
|
||||
if config.volumes:
|
||||
log.warn("Unsupported top level key 'volumes' - ignoring")
|
||||
log.warning("Unsupported top level key 'volumes' - ignoring")
|
||||
|
||||
config = denormalize_config(config)
|
||||
|
||||
@@ -175,7 +192,7 @@ def convert_service_to_bundle(name, service_dict, image_digest):
|
||||
continue
|
||||
|
||||
if key not in SUPPORTED_KEYS:
|
||||
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
|
||||
log.warning("Unsupported key '{}' in services.{} - ignoring".format(key, name))
|
||||
continue
|
||||
|
||||
if key == 'environment':
|
||||
@@ -222,7 +239,7 @@ def make_service_networks(name, service_dict):
|
||||
|
||||
for network_name, network_def in get_network_defs_for_service(service_dict).items():
|
||||
for key in network_def.keys():
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
|
||||
.format(key, name, network_name))
|
||||
|
||||
|
||||
@@ -41,9 +41,9 @@ for (name, code) in get_pairs():
|
||||
|
||||
|
||||
def rainbow():
|
||||
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
|
||||
cs = ['cyan', 'yellow', 'green', 'magenta', 'blue',
|
||||
'intense_cyan', 'intense_yellow', 'intense_green',
|
||||
'intense_magenta', 'intense_red', 'intense_blue']
|
||||
'intense_magenta', 'intense_blue']
|
||||
|
||||
for c in cs:
|
||||
yield globals()[c]
|
||||
|
||||
@@ -13,6 +13,9 @@ from .. import config
|
||||
from .. import parallel
|
||||
from ..config.environment import Environment
|
||||
from ..const import API_VERSIONS
|
||||
from ..const import LABEL_CONFIG_FILES
|
||||
from ..const import LABEL_ENVIRONMENT_FILE
|
||||
from ..const import LABEL_WORKING_DIR
|
||||
from ..project import Project
|
||||
from .docker_client import docker_client
|
||||
from .docker_client import get_tls_version
|
||||
@@ -21,10 +24,27 @@ from .utils import get_version_info
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
SILENT_COMMANDS = {
|
||||
'events',
|
||||
'exec',
|
||||
'kill',
|
||||
'logs',
|
||||
'pause',
|
||||
'ps',
|
||||
'restart',
|
||||
'rm',
|
||||
'start',
|
||||
'stop',
|
||||
'top',
|
||||
'unpause',
|
||||
}
|
||||
|
||||
def project_from_options(project_dir, options):
|
||||
|
||||
def project_from_options(project_dir, options, additional_options={}):
|
||||
override_dir = options.get('--project-directory')
|
||||
environment = Environment.from_env_file(override_dir or project_dir)
|
||||
environment_file = options.get('--env-file')
|
||||
environment = Environment.from_env_file(override_dir or project_dir, environment_file)
|
||||
environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
|
||||
set_parallel_limit(environment)
|
||||
|
||||
host = options.get('--host')
|
||||
@@ -40,6 +60,8 @@ def project_from_options(project_dir, options):
|
||||
environment=environment,
|
||||
override_dir=override_dir,
|
||||
compatibility=options.get('--compatibility'),
|
||||
interpolate=(not additional_options.get('--no-interpolate')),
|
||||
environment_file=environment_file
|
||||
)
|
||||
|
||||
|
||||
@@ -59,15 +81,17 @@ def set_parallel_limit(environment):
|
||||
parallel.GlobalLimit.set_global_limit(parallel_limit)
|
||||
|
||||
|
||||
def get_config_from_options(base_dir, options):
|
||||
def get_config_from_options(base_dir, options, additional_options={}):
|
||||
override_dir = options.get('--project-directory')
|
||||
environment = Environment.from_env_file(override_dir or base_dir)
|
||||
environment_file = options.get('--env-file')
|
||||
environment = Environment.from_env_file(override_dir or base_dir, environment_file)
|
||||
config_path = get_config_path_from_options(
|
||||
base_dir, options, environment
|
||||
)
|
||||
return config.load(
|
||||
config.find(base_dir, config_path, environment, override_dir),
|
||||
options.get('--compatibility')
|
||||
options.get('--compatibility'),
|
||||
not additional_options.get('--no-interpolate')
|
||||
)
|
||||
|
||||
|
||||
@@ -105,14 +129,14 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
|
||||
|
||||
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
host=None, tls_config=None, environment=None, override_dir=None,
|
||||
compatibility=False):
|
||||
compatibility=False, interpolate=True, environment_file=None):
|
||||
if not environment:
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
config_details = config.find(project_dir, config_path, environment, override_dir)
|
||||
project_name = get_project_name(
|
||||
config_details.working_dir, project_name, environment
|
||||
)
|
||||
config_data = config.load(config_details, compatibility)
|
||||
config_data = config.load(config_details, compatibility, interpolate)
|
||||
|
||||
api_version = environment.get(
|
||||
'COMPOSE_API_VERSION',
|
||||
@@ -125,10 +149,30 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
|
||||
with errors.handle_connection_errors(client):
|
||||
return Project.from_config(
|
||||
project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
|
||||
project_name,
|
||||
config_data,
|
||||
client,
|
||||
environment.get('DOCKER_DEFAULT_PLATFORM'),
|
||||
execution_context_labels(config_details, environment_file),
|
||||
)
|
||||
|
||||
|
||||
def execution_context_labels(config_details, environment_file):
|
||||
extra_labels = [
|
||||
'{0}={1}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir)),
|
||||
'{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)),
|
||||
]
|
||||
if environment_file is not None:
|
||||
extra_labels.append('{0}={1}'.format(LABEL_ENVIRONMENT_FILE,
|
||||
os.path.normpath(environment_file)))
|
||||
return extra_labels
|
||||
|
||||
|
||||
def config_files_label(config_details):
|
||||
return ",".join(
|
||||
map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
|
||||
|
||||
|
||||
def get_project_name(working_dir, project_name=None, environment=None):
|
||||
def normalize_name(name):
|
||||
return re.sub(r'[^-_a-z0-9]', '', name.lower())
|
||||
|
||||
@@ -31,7 +31,7 @@ def get_tls_version(environment):
|
||||
|
||||
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
|
||||
if not hasattr(ssl, tls_attr_name):
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The "{}" protocol is unavailable. You may need to update your '
|
||||
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
|
||||
.format(compose_tls_version)
|
||||
|
||||
@@ -67,7 +67,9 @@ def handle_connection_errors(client):
|
||||
|
||||
|
||||
def log_windows_pipe_error(exc):
|
||||
if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
if exc.winerror == 2:
|
||||
log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
|
||||
elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
|
||||
log.error(
|
||||
"The current Compose file version is not compatible with your engine version. "
|
||||
"Please upgrade your Compose file to a more recent version, or set "
|
||||
|
||||
@@ -2,25 +2,32 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import six
|
||||
import texttable
|
||||
|
||||
from compose.cli import colors
|
||||
|
||||
if hasattr(shutil, "get_terminal_size"):
|
||||
from shutil import get_terminal_size
|
||||
else:
|
||||
from backports.shutil_get_terminal_size import get_terminal_size
|
||||
|
||||
|
||||
def get_tty_width():
|
||||
tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
|
||||
if len(tty_size) != 2:
|
||||
try:
|
||||
width, _ = get_terminal_size()
|
||||
return int(width)
|
||||
except OSError:
|
||||
return 0
|
||||
_, width = tty_size
|
||||
return int(width)
|
||||
|
||||
|
||||
class Formatter(object):
|
||||
class Formatter:
|
||||
"""Format tabular data for printing."""
|
||||
def table(self, headers, rows):
|
||||
|
||||
@staticmethod
|
||||
def table(headers, rows):
|
||||
table = texttable.Texttable(max_width=get_tty_width())
|
||||
table.set_cols_dtype(['t' for h in headers])
|
||||
table.add_rows([headers] + rows)
|
||||
|
||||
@@ -134,7 +134,10 @@ def build_thread(container, presenter, queue, log_args):
|
||||
def build_thread_map(initial_containers, presenters, thread_args):
|
||||
return {
|
||||
container.id: build_thread(container, next(presenters), *thread_args)
|
||||
for container in initial_containers
|
||||
# Container order is unspecified, so they are sorted by name in order to make
|
||||
# container:presenter (log color) assignment deterministic when given a list of containers
|
||||
# with the same names.
|
||||
for container in sorted(initial_containers, key=lambda c: c.name)
|
||||
}
|
||||
|
||||
|
||||
@@ -230,13 +233,20 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||
|
||||
# Container crashed so we should reattach to it
|
||||
if event['id'] in crashed_containers:
|
||||
event['container'].attach_log_stream()
|
||||
container = event['container']
|
||||
if not container.is_restarting:
|
||||
try:
|
||||
container.attach_log_stream()
|
||||
except APIError:
|
||||
# Just ignore errors when reattaching to already crashed containers
|
||||
pass
|
||||
crashed_containers.remove(event['id'])
|
||||
|
||||
thread_map[event['id']] = build_thread(
|
||||
event['container'],
|
||||
next(presenters),
|
||||
*thread_args)
|
||||
*thread_args
|
||||
)
|
||||
|
||||
|
||||
def consume_queue(queue, cascade_stop):
|
||||
|
||||
@@ -6,6 +6,7 @@ import contextlib
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pipes
|
||||
import re
|
||||
import subprocess
|
||||
@@ -102,9 +103,9 @@ def dispatch():
|
||||
options, handler, command_options = dispatcher.parse(sys.argv[1:])
|
||||
setup_console_handler(console_handler,
|
||||
options.get('--verbose'),
|
||||
options.get('--no-ansi'),
|
||||
set_no_color_if_clicolor(options.get('--no-ansi')),
|
||||
options.get("--log-level"))
|
||||
setup_parallel_logger(options.get('--no-ansi'))
|
||||
setup_parallel_logger(set_no_color_if_clicolor(options.get('--no-ansi')))
|
||||
if options.get('--no-ansi'):
|
||||
command_options['--no-color'] = True
|
||||
return functools.partial(perform_command, options, handler, command_options)
|
||||
@@ -206,8 +207,9 @@ class TopLevelCommand(object):
|
||||
name specified in the client certificate
|
||||
--project-directory PATH Specify an alternate working directory
|
||||
(default: the path of the Compose file)
|
||||
--compatibility If set, Compose will attempt to convert deploy
|
||||
keys in v3 files to their non-Swarm equivalent
|
||||
--compatibility If set, Compose will attempt to convert keys
|
||||
in v3 files to their non-Swarm equivalent
|
||||
--env-file PATH Specify an alternate environment file
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
@@ -246,6 +248,11 @@ class TopLevelCommand(object):
|
||||
def project_dir(self):
|
||||
return self.toplevel_options.get('--project-directory') or '.'
|
||||
|
||||
@property
|
||||
def toplevel_environment(self):
|
||||
environment_file = self.toplevel_options.get('--env-file')
|
||||
return Environment.from_env_file(self.project_dir, environment_file)
|
||||
|
||||
def build(self, options):
|
||||
"""
|
||||
Build or rebuild services.
|
||||
@@ -257,13 +264,18 @@ class TopLevelCommand(object):
|
||||
Usage: build [options] [--build-arg key=val...] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--build-arg key=val Set build-time variables for services.
|
||||
--compress Compress the build context using gzip.
|
||||
--force-rm Always remove intermediate containers.
|
||||
-m, --memory MEM Set memory limit for the build container.
|
||||
--no-cache Do not use cache when building the image.
|
||||
--pull Always attempt to pull a newer version of the image.
|
||||
-m, --memory MEM Sets memory limit for the build container.
|
||||
--build-arg key=val Set build-time variables for services.
|
||||
--no-rm Do not remove intermediate containers after a successful build.
|
||||
--parallel Build images in parallel.
|
||||
--progress string Set type of progress output (auto, plain, tty).
|
||||
EXPERIMENTAL flag for native builder.
|
||||
To enable, run with COMPOSE_DOCKER_CLI_BUILD=1)
|
||||
--pull Always attempt to pull a newer version of the image.
|
||||
-q, --quiet Don't print anything to STDOUT
|
||||
"""
|
||||
service_names = options['SERVICE']
|
||||
build_args = options.get('--build-arg', None)
|
||||
@@ -273,8 +285,9 @@ class TopLevelCommand(object):
|
||||
'--build-arg is only supported when services are specified for API version < 1.25.'
|
||||
' Please use a Compose file version > 2.2 or specify which services to build.'
|
||||
)
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
build_args = resolve_build_args(build_args, environment)
|
||||
build_args = resolve_build_args(build_args, self.toplevel_environment)
|
||||
|
||||
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
|
||||
|
||||
self.project.build(
|
||||
service_names=options['SERVICE'],
|
||||
@@ -282,9 +295,13 @@ class TopLevelCommand(object):
|
||||
pull=bool(options.get('--pull', False)),
|
||||
force_rm=bool(options.get('--force-rm', False)),
|
||||
memory=options.get('--memory'),
|
||||
rm=not bool(options.get('--no-rm', False)),
|
||||
build_args=build_args,
|
||||
gzip=options.get('--compress', False),
|
||||
parallel_build=options.get('--parallel', False),
|
||||
silent=options.get('--quiet', False),
|
||||
cli=native_builder,
|
||||
progress=options.get('--progress'),
|
||||
)
|
||||
|
||||
def bundle(self, options):
|
||||
@@ -327,6 +344,7 @@ class TopLevelCommand(object):
|
||||
|
||||
Options:
|
||||
--resolve-image-digests Pin image tags to digests.
|
||||
--no-interpolate Don't interpolate environment variables
|
||||
-q, --quiet Only validate the configuration, don't print
|
||||
anything.
|
||||
--services Print the service names, one per line.
|
||||
@@ -336,11 +354,12 @@ class TopLevelCommand(object):
|
||||
or use the wildcard symbol to display all services
|
||||
"""
|
||||
|
||||
compose_config = get_config_from_options('.', self.toplevel_options)
|
||||
additional_options = {'--no-interpolate': options.get('--no-interpolate')}
|
||||
compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
|
||||
image_digests = None
|
||||
|
||||
if options['--resolve-image-digests']:
|
||||
self.project = project_from_options('.', self.toplevel_options)
|
||||
self.project = project_from_options('.', self.toplevel_options, additional_options)
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
image_digests = image_digests_for_project(self.project)
|
||||
|
||||
@@ -357,14 +376,14 @@ class TopLevelCommand(object):
|
||||
|
||||
if options['--hash'] is not None:
|
||||
h = options['--hash']
|
||||
self.project = project_from_options('.', self.toplevel_options)
|
||||
self.project = project_from_options('.', self.toplevel_options, additional_options)
|
||||
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
for service in self.project.get_services(services):
|
||||
print('{} {}'.format(service.name, service.config_hash))
|
||||
return
|
||||
|
||||
print(serialize_config(compose_config, image_digests))
|
||||
print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
|
||||
|
||||
def create(self, options):
|
||||
"""
|
||||
@@ -383,7 +402,7 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
service_names = options['SERVICE']
|
||||
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The create command is deprecated. '
|
||||
'Use the up command with the --no-start flag instead.'
|
||||
)
|
||||
@@ -422,8 +441,7 @@ class TopLevelCommand(object):
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
|
||||
(default: 10)
|
||||
"""
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
|
||||
if ignore_orphans and options['--remove-orphans']:
|
||||
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
||||
@@ -480,8 +498,7 @@ class TopLevelCommand(object):
|
||||
not supported in API < 1.25)
|
||||
-w, --workdir DIR Path to workdir directory for this command.
|
||||
"""
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
index = int(options.get('--index'))
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
detach = options.get('--detach')
|
||||
@@ -504,7 +521,7 @@ class TopLevelCommand(object):
|
||||
if IS_WINDOWS_PLATFORM or use_cli and not detach:
|
||||
sys.exit(call_docker(
|
||||
build_exec_command(options, container.id, command),
|
||||
self.toplevel_options)
|
||||
self.toplevel_options, self.toplevel_environment)
|
||||
)
|
||||
|
||||
create_exec_options = {
|
||||
@@ -604,7 +621,7 @@ class TopLevelCommand(object):
|
||||
image_id,
|
||||
size
|
||||
])
|
||||
print(Formatter().table(headers, rows))
|
||||
print(Formatter.table(headers, rows))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
@@ -650,7 +667,7 @@ class TopLevelCommand(object):
|
||||
log_printer_from_project(
|
||||
self.project,
|
||||
containers,
|
||||
options['--no-color'],
|
||||
set_no_color_if_clicolor(options['--no-color']),
|
||||
log_args,
|
||||
event_stream=self.project.events(service_names=options['SERVICE'])).run()
|
||||
|
||||
@@ -694,6 +711,7 @@ class TopLevelCommand(object):
|
||||
-q, --quiet Only display IDs
|
||||
--services Display services
|
||||
--filter KEY=VAL Filter services by a property
|
||||
-a, --all Show all stopped containers (including those created by the run command)
|
||||
"""
|
||||
if options['--quiet'] and options['--services']:
|
||||
raise UserError('--quiet and --services cannot be combined')
|
||||
@@ -706,10 +724,15 @@ class TopLevelCommand(object):
|
||||
print('\n'.join(service.name for service in services))
|
||||
return
|
||||
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
if options['--all']:
|
||||
containers = sorted(self.project.containers(service_names=options['SERVICE'],
|
||||
one_off=OneOffFilter.include, stopped=True),
|
||||
key=attrgetter('name'))
|
||||
else:
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
|
||||
key=attrgetter('name'))
|
||||
|
||||
if options['--quiet']:
|
||||
for container in containers:
|
||||
@@ -732,7 +755,7 @@ class TopLevelCommand(object):
|
||||
container.human_readable_state,
|
||||
container.human_readable_ports,
|
||||
])
|
||||
print(Formatter().table(headers, rows))
|
||||
print(Formatter.table(headers, rows))
|
||||
|
||||
def pull(self, options):
|
||||
"""
|
||||
@@ -748,7 +771,7 @@ class TopLevelCommand(object):
|
||||
--include-deps Also pull services declared as dependencies
|
||||
"""
|
||||
if options.get('--parallel'):
|
||||
log.warn('--parallel option is deprecated and will be removed in future versions.')
|
||||
log.warning('--parallel option is deprecated and will be removed in future versions.')
|
||||
self.project.pull(
|
||||
service_names=options['SERVICE'],
|
||||
ignore_pull_failures=options.get('--ignore-pull-failures'),
|
||||
@@ -789,7 +812,7 @@ class TopLevelCommand(object):
|
||||
-a, --all Deprecated - no effect.
|
||||
"""
|
||||
if options.get('--all'):
|
||||
log.warn(
|
||||
log.warning(
|
||||
'--all flag is obsolete. This is now the default behavior '
|
||||
'of `docker-compose rm`'
|
||||
)
|
||||
@@ -867,10 +890,12 @@ class TopLevelCommand(object):
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
container_options = build_container_options(options, detach, command)
|
||||
options['stdin_open'] = service.options.get('stdin_open', True)
|
||||
|
||||
container_options = build_one_off_container_options(options, detach, command)
|
||||
run_one_off_container(
|
||||
container_options, self.project, service, options,
|
||||
self.toplevel_options, self.project_dir
|
||||
self.toplevel_options, self.toplevel_environment
|
||||
)
|
||||
|
||||
def scale(self, options):
|
||||
@@ -899,7 +924,7 @@ class TopLevelCommand(object):
|
||||
'Use the up command with the --scale flag instead.'
|
||||
)
|
||||
else:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The scale command is deprecated. '
|
||||
'Use the up command with the --scale flag instead.'
|
||||
)
|
||||
@@ -970,7 +995,7 @@ class TopLevelCommand(object):
|
||||
rows.append(process)
|
||||
|
||||
print(container.name)
|
||||
print(Formatter().table(headers, rows))
|
||||
print(Formatter.table(headers, rows))
|
||||
|
||||
def unpause(self, options):
|
||||
"""
|
||||
@@ -1045,8 +1070,7 @@ class TopLevelCommand(object):
|
||||
if detached and (cascade_stop or exit_value_from):
|
||||
raise UserError("--abort-on-container-exit and -d cannot be combined.")
|
||||
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
|
||||
if ignore_orphans and remove_orphans:
|
||||
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
||||
@@ -1055,6 +1079,8 @@ class TopLevelCommand(object):
|
||||
for excluded in [x for x in opts if options.get(x) and no_start]:
|
||||
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
|
||||
|
||||
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
|
||||
|
||||
with up_shutdown_context(self.project, service_names, timeout, detached):
|
||||
warn_for_swarm_mode(self.project.client)
|
||||
|
||||
@@ -1074,6 +1100,7 @@ class TopLevelCommand(object):
|
||||
reset_container_image=rebuild,
|
||||
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
|
||||
silent=options.get('--quiet-pull'),
|
||||
cli=native_builder,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1098,7 +1125,7 @@ class TopLevelCommand(object):
|
||||
log_printer = log_printer_from_project(
|
||||
self.project,
|
||||
attached_containers,
|
||||
options['--no-color'],
|
||||
set_no_color_if_clicolor(options['--no-color']),
|
||||
{'follow': True},
|
||||
cascade_stop,
|
||||
event_stream=self.project.events(service_names=service_names))
|
||||
@@ -1231,7 +1258,7 @@ def exitval_from_opts(options, project):
|
||||
exit_value_from = options.get('--exit-code-from')
|
||||
if exit_value_from:
|
||||
if not options.get('--abort-on-container-exit'):
|
||||
log.warn('using --exit-code-from implies --abort-on-container-exit')
|
||||
log.warning('using --exit-code-from implies --abort-on-container-exit')
|
||||
options['--abort-on-container-exit'] = True
|
||||
if exit_value_from not in [s.name for s in project.get_services()]:
|
||||
log.error('No service named "%s" was found in your compose file.',
|
||||
@@ -1262,11 +1289,11 @@ def build_action_from_opts(options):
|
||||
return BuildAction.none
|
||||
|
||||
|
||||
def build_container_options(options, detach, command):
|
||||
def build_one_off_container_options(options, detach, command):
|
||||
container_options = {
|
||||
'command': command,
|
||||
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
|
||||
'stdin_open': not detach,
|
||||
'stdin_open': options.get('stdin_open'),
|
||||
'detach': detach,
|
||||
}
|
||||
|
||||
@@ -1283,8 +1310,8 @@ def build_container_options(options, detach, command):
|
||||
[""] if options['--entrypoint'] == '' else options['--entrypoint']
|
||||
)
|
||||
|
||||
if options['--rm']:
|
||||
container_options['restart'] = None
|
||||
# Ensure that run command remains one-off (issue #6302)
|
||||
container_options['restart'] = None
|
||||
|
||||
if options['--user']:
|
||||
container_options['user'] = options.get('--user')
|
||||
@@ -1309,7 +1336,7 @@ def build_container_options(options, detach, command):
|
||||
|
||||
|
||||
def run_one_off_container(container_options, project, service, options, toplevel_options,
|
||||
project_dir='.'):
|
||||
toplevel_environment):
|
||||
if not options['--no-deps']:
|
||||
deps = service.get_dependency_names()
|
||||
if deps:
|
||||
@@ -1338,8 +1365,7 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
if options['--rm']:
|
||||
project.client.remove_container(container.id, force=True, v=True)
|
||||
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
|
||||
signals.set_signal_handler_to_shutdown()
|
||||
signals.set_signal_handler_to_hang_up()
|
||||
@@ -1348,8 +1374,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
if IS_WINDOWS_PLATFORM or use_cli:
|
||||
service.connect_container_to_networks(container, use_network_aliases)
|
||||
exit_code = call_docker(
|
||||
["start", "--attach", "--interactive", container.id],
|
||||
toplevel_options
|
||||
get_docker_start_call(container_options, container.id),
|
||||
toplevel_options, toplevel_environment
|
||||
)
|
||||
else:
|
||||
operation = RunOperation(
|
||||
@@ -1375,6 +1401,16 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
def get_docker_start_call(container_options, container_id):
|
||||
docker_call = ["start"]
|
||||
if not container_options.get('detach'):
|
||||
docker_call.append("--attach")
|
||||
if container_options.get('stdin_open'):
|
||||
docker_call.append("--interactive")
|
||||
docker_call.append(container_id)
|
||||
return docker_call
|
||||
|
||||
|
||||
def log_printer_from_project(
|
||||
project,
|
||||
containers,
|
||||
@@ -1429,7 +1465,7 @@ def exit_if(condition, message, exit_code):
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
|
||||
def call_docker(args, dockeropts):
|
||||
def call_docker(args, dockeropts, environment):
|
||||
executable_path = find_executable('docker')
|
||||
if not executable_path:
|
||||
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
|
||||
@@ -1452,12 +1488,14 @@ def call_docker(args, dockeropts):
|
||||
if verify:
|
||||
tls_options.append('--tlsverify')
|
||||
if host:
|
||||
tls_options.extend(['--host', host.lstrip('=')])
|
||||
tls_options.extend(
|
||||
['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
|
||||
)
|
||||
|
||||
args = [executable_path] + tls_options + args
|
||||
log.debug(" ".join(map(pipes.quote, args)))
|
||||
|
||||
return subprocess.call(args)
|
||||
return subprocess.call(args, env=environment)
|
||||
|
||||
|
||||
def parse_scale_args(options):
|
||||
@@ -1558,10 +1596,14 @@ def warn_for_swarm_mode(client):
|
||||
# UCP does multi-node scheduling with traditional Compose files.
|
||||
return
|
||||
|
||||
log.warn(
|
||||
log.warning(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use `docker stack deploy`.\n"
|
||||
)
|
||||
|
||||
|
||||
def set_no_color_if_clicolor(no_color_flag):
|
||||
return no_color_flag or os.environ.get('CLICOLOR') == "0"
|
||||
|
||||
@@ -133,12 +133,12 @@ def generate_user_agent():
|
||||
|
||||
def human_readable_file_size(size):
|
||||
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
|
||||
order = int(math.log(size, 2) / 10) if size else 0
|
||||
order = int(math.log(size, 1000)) if size else 0
|
||||
if order >= len(suffixes):
|
||||
order = len(suffixes) - 1
|
||||
|
||||
return '{0:.3g} {1}'.format(
|
||||
size / float(1 << (order * 10)),
|
||||
return '{0:.4g} {1}'.format(
|
||||
size / pow(10, order * 3),
|
||||
suffixes[order]
|
||||
)
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from . import environment
|
||||
from .config import ConfigurationError
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import find
|
||||
from .config import is_url
|
||||
from .config import load
|
||||
from .config import merge_environment
|
||||
from .config import merge_labels
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import string
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
|
||||
import six
|
||||
import yaml
|
||||
@@ -50,6 +51,7 @@ from .validation import match_named_volumes
|
||||
from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_cpu
|
||||
from .validation import validate_credential_spec
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_healthcheck
|
||||
@@ -91,6 +93,7 @@ DOCKER_CONFIG_KEYS = [
|
||||
'healthcheck',
|
||||
'image',
|
||||
'ipc',
|
||||
'isolation',
|
||||
'labels',
|
||||
'links',
|
||||
'mac_address',
|
||||
@@ -195,9 +198,9 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
version = self.config['version']
|
||||
|
||||
if isinstance(version, dict):
|
||||
log.warn('Unexpected type for "version" key in "{}". Assuming '
|
||||
'"version" is the name of a service, and defaulting to '
|
||||
'Compose file version 1.'.format(self.filename))
|
||||
log.warning('Unexpected type for "version" key in "{}". Assuming '
|
||||
'"version" is the name of a service, and defaulting to '
|
||||
'Compose file version 1.'.format(self.filename))
|
||||
return V1
|
||||
|
||||
if not isinstance(version, six.string_types):
|
||||
@@ -315,8 +318,8 @@ def get_default_config_files(base_dir):
|
||||
winner = candidates[0]
|
||||
|
||||
if len(candidates) > 1:
|
||||
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warn("Using %s\n", winner)
|
||||
log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warning("Using %s\n", winner)
|
||||
|
||||
return [os.path.join(path, winner)] + get_default_override_file(path)
|
||||
|
||||
@@ -359,7 +362,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
def check_swarm_only_key(service_dicts, key):
|
||||
services = [s for s in service_dicts if s.get(key)]
|
||||
if services:
|
||||
log.warn(
|
||||
log.warning(
|
||||
warning_template.format(
|
||||
services=", ".join(sorted(s['name'] for s in services)),
|
||||
key=key
|
||||
@@ -367,11 +370,10 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
)
|
||||
if not compatibility:
|
||||
check_swarm_only_key(service_dicts, 'deploy')
|
||||
check_swarm_only_key(service_dicts, 'credential_spec')
|
||||
check_swarm_only_key(service_dicts, 'configs')
|
||||
|
||||
|
||||
def load(config_details, compatibility=False):
|
||||
def load(config_details, compatibility=False, interpolate=True):
|
||||
"""Load the configuration from a working directory and a list of
|
||||
configuration files. Files are loaded in order, and merged on top
|
||||
of each other to create the final configuration.
|
||||
@@ -381,7 +383,7 @@ def load(config_details, compatibility=False):
|
||||
validate_config_version(config_details.config_files)
|
||||
|
||||
processed_files = [
|
||||
process_config_file(config_file, config_details.environment)
|
||||
process_config_file(config_file, config_details.environment, interpolate=interpolate)
|
||||
for config_file in config_details.config_files
|
||||
]
|
||||
config_details = config_details._replace(config_files=processed_files)
|
||||
@@ -503,7 +505,6 @@ def load_services(config_details, config_file, compatibility=False):
|
||||
|
||||
|
||||
def interpolate_config_section(config_file, config, section, environment):
|
||||
validate_config_section(config_file.filename, config, section)
|
||||
return interpolate_environment_variables(
|
||||
config_file.version,
|
||||
config,
|
||||
@@ -512,38 +513,60 @@ def interpolate_config_section(config_file, config, section, environment):
|
||||
)
|
||||
|
||||
|
||||
def process_config_file(config_file, environment, service_name=None):
|
||||
services = interpolate_config_section(
|
||||
def process_config_section(config_file, config, section, environment, interpolate):
|
||||
validate_config_section(config_file.filename, config, section)
|
||||
if interpolate:
|
||||
return interpolate_environment_variables(
|
||||
config_file.version,
|
||||
config,
|
||||
section,
|
||||
environment
|
||||
)
|
||||
else:
|
||||
return config
|
||||
|
||||
|
||||
def process_config_file(config_file, environment, service_name=None, interpolate=True):
|
||||
services = process_config_section(
|
||||
config_file,
|
||||
config_file.get_service_dicts(),
|
||||
'service',
|
||||
environment)
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
|
||||
if config_file.version > V1:
|
||||
processed_config = dict(config_file.config)
|
||||
processed_config['services'] = services
|
||||
processed_config['volumes'] = interpolate_config_section(
|
||||
processed_config['volumes'] = process_config_section(
|
||||
config_file,
|
||||
config_file.get_volumes(),
|
||||
'volume',
|
||||
environment)
|
||||
processed_config['networks'] = interpolate_config_section(
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
processed_config['networks'] = process_config_section(
|
||||
config_file,
|
||||
config_file.get_networks(),
|
||||
'network',
|
||||
environment)
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
if config_file.version >= const.COMPOSEFILE_V3_1:
|
||||
processed_config['secrets'] = interpolate_config_section(
|
||||
processed_config['secrets'] = process_config_section(
|
||||
config_file,
|
||||
config_file.get_secrets(),
|
||||
'secret',
|
||||
environment)
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
if config_file.version >= const.COMPOSEFILE_V3_3:
|
||||
processed_config['configs'] = interpolate_config_section(
|
||||
processed_config['configs'] = process_config_section(
|
||||
config_file,
|
||||
config_file.get_configs(),
|
||||
'config',
|
||||
environment
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
else:
|
||||
processed_config = services
|
||||
@@ -592,7 +615,7 @@ class ServiceExtendsResolver(object):
|
||||
config_path = self.get_extended_config_path(extends)
|
||||
service_name = extends['service']
|
||||
|
||||
if config_path == self.config_file.filename:
|
||||
if config_path == os.path.abspath(self.config_file.filename):
|
||||
try:
|
||||
service_config = self.config_file.get_service(service_name)
|
||||
except KeyError:
|
||||
@@ -704,6 +727,7 @@ def validate_service(service_config, service_names, config_file):
|
||||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
validate_healthcheck(service_config)
|
||||
validate_credential_spec(service_config)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
@@ -834,6 +858,17 @@ def finalize_service_volumes(service_dict, environment):
|
||||
finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
|
||||
else:
|
||||
finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
|
||||
|
||||
duplicate_mounts = []
|
||||
mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
|
||||
for mount in mounts:
|
||||
if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
|
||||
duplicate_mounts.append(mount.repr())
|
||||
|
||||
if duplicate_mounts:
|
||||
raise ConfigurationError("Duplicate mount points: [%s]" % (
|
||||
', '.join(duplicate_mounts)))
|
||||
|
||||
service_dict['volumes'] = finalized_volumes
|
||||
|
||||
return service_dict
|
||||
@@ -881,11 +916,12 @@ def finalize_service(service_config, service_names, version, environment, compat
|
||||
normalize_build(service_dict, service_config.working_dir, environment)
|
||||
|
||||
if compatibility:
|
||||
service_dict = translate_credential_spec_to_security_opt(service_dict)
|
||||
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
|
||||
service_dict
|
||||
)
|
||||
if ignored_keys:
|
||||
log.warn(
|
||||
log.warning(
|
||||
'The following deploy sub-keys are not supported in compatibility mode and have'
|
||||
' been ignored: {}'.format(', '.join(ignored_keys))
|
||||
)
|
||||
@@ -917,6 +953,25 @@ def convert_restart_policy(name):
|
||||
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
|
||||
|
||||
|
||||
def convert_credential_spec_to_security_opt(credential_spec):
|
||||
if 'file' in credential_spec:
|
||||
return 'file://{file}'.format(file=credential_spec['file'])
|
||||
return 'registry://{registry}'.format(registry=credential_spec['registry'])
|
||||
|
||||
|
||||
def translate_credential_spec_to_security_opt(service_dict):
|
||||
result = []
|
||||
|
||||
if 'credential_spec' in service_dict:
|
||||
spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
|
||||
result.append('credentialspec={spec}'.format(spec=spec))
|
||||
|
||||
if result:
|
||||
service_dict['security_opt'] = result
|
||||
|
||||
return service_dict
|
||||
|
||||
|
||||
def translate_deploy_keys_to_container_config(service_dict):
|
||||
if 'credential_spec' in service_dict:
|
||||
del service_dict['credential_spec']
|
||||
@@ -1039,15 +1094,16 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_mapping('environment', parse_environment)
|
||||
md.merge_mapping('labels', parse_labels)
|
||||
md.merge_mapping('ulimits', parse_flat_dict)
|
||||
md.merge_mapping('networks', parse_networks)
|
||||
md.merge_mapping('sysctls', parse_sysctls)
|
||||
md.merge_mapping('depends_on', parse_depends_on)
|
||||
md.merge_mapping('storage_opt', parse_flat_dict)
|
||||
md.merge_sequence('links', ServiceLink.parse)
|
||||
md.merge_sequence('secrets', types.ServiceSecret.parse)
|
||||
md.merge_sequence('configs', types.ServiceConfig.parse)
|
||||
md.merge_sequence('security_opt', types.SecurityOpt.parse)
|
||||
md.merge_mapping('extra_hosts', parse_extra_hosts)
|
||||
|
||||
md.merge_field('networks', merge_networks, default={})
|
||||
for field in ['volumes', 'devices']:
|
||||
md.merge_field(field, merge_path_mappings)
|
||||
|
||||
@@ -1152,6 +1208,22 @@ def merge_deploy(base, override):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_networks(base, override):
|
||||
merged_networks = {}
|
||||
all_network_names = set(base) | set(override)
|
||||
base = {k: {} for k in base} if isinstance(base, list) else base
|
||||
override = {k: {} for k in override} if isinstance(override, list) else override
|
||||
for network_name in all_network_names:
|
||||
md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
|
||||
md.merge_field('aliases', merge_unique_items_lists, [])
|
||||
md.merge_field('link_local_ips', merge_unique_items_lists, [])
|
||||
md.merge_scalar('priority')
|
||||
md.merge_scalar('ipv4_address')
|
||||
md.merge_scalar('ipv6_address')
|
||||
merged_networks[network_name] = dict(md)
|
||||
return merged_networks
|
||||
|
||||
|
||||
def merge_reservations(base, override):
|
||||
md = MergeDict(base, override)
|
||||
md.merge_scalar('cpus')
|
||||
@@ -1281,7 +1353,7 @@ def resolve_volume_paths(working_dir, service_dict):
|
||||
|
||||
def resolve_volume_path(working_dir, volume):
|
||||
if isinstance(volume, dict):
|
||||
if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
|
||||
if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
|
||||
volume['source'] = expand_path(working_dir, volume['source'])
|
||||
return volume
|
||||
|
||||
|
||||
@@ -5,11 +5,13 @@ import codecs
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import six
|
||||
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
from .errors import ConfigurationError
|
||||
from .errors import EnvFileNotFound
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -17,10 +19,16 @@ log = logging.getLogger(__name__)
|
||||
def split_env(env):
|
||||
if isinstance(env, six.binary_type):
|
||||
env = env.decode('utf-8', 'replace')
|
||||
key = value = None
|
||||
if '=' in env:
|
||||
return env.split('=', 1)
|
||||
key, value = env.split('=', 1)
|
||||
else:
|
||||
return env, None
|
||||
key = env
|
||||
if re.search(r'\s', key):
|
||||
raise ConfigurationError(
|
||||
"environment variable name '{}' may not contain whitespace.".format(key)
|
||||
)
|
||||
return key, value
|
||||
|
||||
|
||||
def env_vars_from_file(filename):
|
||||
@@ -28,16 +36,19 @@ def env_vars_from_file(filename):
|
||||
Read in a line delimited file of environment variables.
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError("Couldn't find env file: %s" % filename)
|
||||
raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
|
||||
elif not os.path.isfile(filename):
|
||||
raise ConfigurationError("%s is not a file." % (filename))
|
||||
raise EnvFileNotFound("{} is not a file.".format(filename))
|
||||
env = {}
|
||||
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
try:
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
except ConfigurationError as e:
|
||||
raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
|
||||
return env
|
||||
|
||||
|
||||
@@ -45,19 +56,24 @@ class Environment(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Environment, self).__init__(*args, **kwargs)
|
||||
self.missing_keys = []
|
||||
self.silent = False
|
||||
|
||||
@classmethod
|
||||
def from_env_file(cls, base_dir):
|
||||
def from_env_file(cls, base_dir, env_file=None):
|
||||
def _initialize():
|
||||
result = cls()
|
||||
if base_dir is None:
|
||||
return result
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
if env_file:
|
||||
env_file_path = os.path.join(base_dir, env_file)
|
||||
else:
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
try:
|
||||
return cls(env_vars_from_file(env_file_path))
|
||||
except ConfigurationError:
|
||||
except EnvFileNotFound:
|
||||
pass
|
||||
return result
|
||||
|
||||
instance = _initialize()
|
||||
instance.update(os.environ)
|
||||
return instance
|
||||
@@ -83,8 +99,8 @@ class Environment(dict):
|
||||
return super(Environment, self).__getitem__(key.upper())
|
||||
except KeyError:
|
||||
pass
|
||||
if key not in self.missing_keys:
|
||||
log.warn(
|
||||
if not self.silent and key not in self.missing_keys:
|
||||
log.warning(
|
||||
"The {} variable is not set. Defaulting to a blank string."
|
||||
.format(key)
|
||||
)
|
||||
|
||||
@@ -19,6 +19,10 @@ class ConfigurationError(Exception):
|
||||
return self.msg
|
||||
|
||||
|
||||
class EnvFileNotFound(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
class DependencyError(ConfigurationError):
|
||||
pass
|
||||
|
||||
|
||||
@@ -64,12 +64,12 @@ def interpolate_value(name, config_key, value, section, interpolator):
|
||||
string=e.string))
|
||||
except UnsetRequiredSubstitution as e:
|
||||
raise ConfigurationError(
|
||||
'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
|
||||
config_key=config_key,
|
||||
name=name,
|
||||
section=section,
|
||||
err=e.err
|
||||
)
|
||||
'Missing mandatory value for "{config_key}" option interpolating {value} '
|
||||
'in {section} "{name}": {err}'.format(config_key=config_key,
|
||||
value=value,
|
||||
name=name,
|
||||
section=section,
|
||||
err=e.err)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -24,14 +24,12 @@ def serialize_dict_type(dumper, data):
|
||||
|
||||
|
||||
def serialize_string(dumper, data):
|
||||
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
|
||||
""" Ensure boolean-like strings are quoted in the output """
|
||||
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
|
||||
|
||||
if isinstance(data, six.binary_type):
|
||||
data = data.decode('utf-8')
|
||||
|
||||
data = data.replace('$', '$$')
|
||||
|
||||
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
|
||||
# Empirically only y/n appears to be an issue, but this might change
|
||||
# depending on which PyYaml version is being used. Err on safe side.
|
||||
@@ -39,6 +37,12 @@ def serialize_string(dumper, data):
|
||||
return representer(data)
|
||||
|
||||
|
||||
def serialize_string_escape_dollar(dumper, data):
|
||||
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
|
||||
data = data.replace('$', '$$')
|
||||
return serialize_string(dumper, data)
|
||||
|
||||
|
||||
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
|
||||
@@ -46,8 +50,6 @@ yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(str, serialize_string)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
|
||||
|
||||
|
||||
def denormalize_config(config, image_digests=None):
|
||||
@@ -93,7 +95,13 @@ def v3_introduced_name_key(key):
|
||||
return V3_5
|
||||
|
||||
|
||||
def serialize_config(config, image_digests=None):
|
||||
def serialize_config(config, image_digests=None, escape_dollar=True):
|
||||
if escape_dollar:
|
||||
yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string_escape_dollar)
|
||||
else:
|
||||
yaml.SafeDumper.add_representer(str, serialize_string)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
|
||||
return yaml.safe_dump(
|
||||
denormalize_config(config, image_digests),
|
||||
default_flow_style=False,
|
||||
|
||||
@@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
|
||||
)
|
||||
|
||||
|
||||
def validate_credential_spec(service_config):
|
||||
credential_spec = service_config.config.get('credential_spec')
|
||||
if not credential_spec:
|
||||
return
|
||||
|
||||
if 'registry' not in credential_spec and 'file' not in credential_spec:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' is missing 'credential_spec.file' or "
|
||||
"credential_spec.registry'".format(s=service_config)
|
||||
)
|
||||
|
||||
|
||||
def get_unsupported_config_msg(path, error_key):
|
||||
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
|
||||
if error_key in DOCKER_CONFIG_HINTS:
|
||||
@@ -330,7 +342,10 @@ def handle_generic_error(error, path):
|
||||
|
||||
|
||||
def parse_key_from_error_msg(error):
|
||||
return error.message.split("'")[1]
|
||||
try:
|
||||
return error.message.split("'")[1]
|
||||
except IndexError:
|
||||
return error.message.split('(')[1].split(' ')[0].strip("'")
|
||||
|
||||
|
||||
def path_string(path):
|
||||
|
||||
@@ -7,11 +7,13 @@ from .version import ComposeVersion
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
HTTP_TIMEOUT = 60
|
||||
IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
|
||||
LABEL_PROJECT = 'com.docker.compose.project'
|
||||
LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir'
|
||||
LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files'
|
||||
LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file'
|
||||
LABEL_SERVICE = 'com.docker.compose.service'
|
||||
LABEL_NETWORK = 'com.docker.compose.network'
|
||||
LABEL_VERSION = 'com.docker.compose.version'
|
||||
|
||||
@@ -7,6 +7,7 @@ import six
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from .const import LABEL_CONTAINER_NUMBER
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
from .const import LABEL_SLUG
|
||||
@@ -82,12 +83,16 @@ class Container(object):
|
||||
@property
|
||||
def name_without_project(self):
|
||||
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
|
||||
return '{0}_{1}{2}'.format(self.service, self.number, '_' + self.slug if self.slug else '')
|
||||
return '{0}_{1}'.format(self.service, self.number if self.number is not None else self.slug)
|
||||
else:
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def number(self):
|
||||
if self.one_off:
|
||||
# One-off containers are no longer assigned numbers and use slugs instead.
|
||||
return None
|
||||
|
||||
number = self.labels.get(LABEL_CONTAINER_NUMBER)
|
||||
if not number:
|
||||
raise ValueError("Container {0} does not have a {1} label".format(
|
||||
@@ -104,6 +109,10 @@ class Container(object):
|
||||
def full_slug(self):
|
||||
return self.labels.get(LABEL_SLUG)
|
||||
|
||||
@property
|
||||
def one_off(self):
|
||||
return self.labels.get(LABEL_ONE_OFF) == 'True'
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
self.inspect_if_not_inspected()
|
||||
|
||||
@@ -226,12 +226,12 @@ def check_remote_network_config(remote, local):
|
||||
raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
|
||||
|
||||
local_labels = local.labels or {}
|
||||
remote_labels = remote.get('Labels', {})
|
||||
remote_labels = remote.get('Labels') or {}
|
||||
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
|
||||
if k.startswith('com.docker.'): # We are only interested in user-specified labels
|
||||
continue
|
||||
if remote_labels.get(k) != local_labels.get(k):
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Network {}: label "{}" has changed. It may need to be'
|
||||
' recreated.'.format(local.true_name, k)
|
||||
)
|
||||
@@ -276,7 +276,7 @@ class ProjectNetworks(object):
|
||||
}
|
||||
unused = set(networks) - set(service_networks) - {'default'}
|
||||
if unused:
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Some networks were defined but are not used by any service: "
|
||||
"{}".format(", ".join(unused)))
|
||||
return cls(service_networks, use_networking)
|
||||
@@ -288,7 +288,7 @@ class ProjectNetworks(object):
|
||||
try:
|
||||
network.remove()
|
||||
except NotFound:
|
||||
log.warn("Network %s not found.", network.true_name)
|
||||
log.warning("Network %s not found.", network.true_name)
|
||||
|
||||
def initialize(self):
|
||||
if not self.use_networking:
|
||||
|
||||
@@ -43,14 +43,17 @@ class GlobalLimit(object):
|
||||
cls.global_limiter = Semaphore(value)
|
||||
|
||||
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
|
||||
""" Watch events from a parallel execution, update status and fill errors and results.
|
||||
Returns exception to re-raise.
|
||||
"""
|
||||
error_to_reraise = None
|
||||
for obj, result, exception in events:
|
||||
if exception is None:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
if fail_check is not None and fail_check(obj):
|
||||
writer.write(msg, get_name(obj), 'failed', red)
|
||||
else:
|
||||
writer.write(msg, get_name(obj), 'done', green)
|
||||
results.append(result)
|
||||
elif isinstance(exception, ImageNotFound):
|
||||
# This is to bubble up ImageNotFound exceptions to the client so we
|
||||
@@ -72,12 +75,14 @@ def parallel_execute_watch(events, writer, errors, results, msg, get_name):
|
||||
return error_to_reraise
|
||||
|
||||
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
|
||||
"""Runs func on objects in parallel while ensuring that func is
|
||||
ran on object only after it is ran on all its dependencies.
|
||||
|
||||
get_deps called on object must return a collection with its dependencies.
|
||||
get_name called on object must return its name.
|
||||
fail_check is an additional failure check for cases that should display as a failure
|
||||
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
|
||||
"""
|
||||
objects = list(objects)
|
||||
stream = get_output_stream(sys.stderr)
|
||||
@@ -96,7 +101,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
|
||||
|
||||
errors = {}
|
||||
results = []
|
||||
error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
|
||||
error_to_reraise = parallel_execute_watch(
|
||||
events, writer, errors, results, msg, get_name, fail_check
|
||||
)
|
||||
|
||||
for obj_name, error in errors.items():
|
||||
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
|
||||
|
||||
@@ -98,14 +98,14 @@ def print_output_event(event, stream, is_terminal):
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
digest = None
|
||||
for event in events:
|
||||
status = event.get('status')
|
||||
if not status or 'Digest' not in status:
|
||||
continue
|
||||
|
||||
_, digest = status.split(':', 1)
|
||||
return digest.strip()
|
||||
return None
|
||||
else:
|
||||
digest = status.split(':', 1)[1].strip()
|
||||
return digest
|
||||
|
||||
|
||||
def get_digest_from_push(events):
|
||||
|
||||
@@ -6,17 +6,18 @@ import logging
|
||||
import operator
|
||||
import re
|
||||
from functools import reduce
|
||||
from os import path
|
||||
|
||||
import enum
|
||||
import six
|
||||
from docker.errors import APIError
|
||||
from docker.utils import version_lt
|
||||
|
||||
from . import parallel
|
||||
from .config import ConfigurationError
|
||||
from .config.config import V1
|
||||
from .config.sort_services import get_container_name_from_network_mode
|
||||
from .config.sort_services import get_service_name_from_network_mode
|
||||
from .const import IMAGE_EVENTS
|
||||
from .const import LABEL_ONE_OFF
|
||||
from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
@@ -29,6 +30,7 @@ from .service import ContainerNetworkMode
|
||||
from .service import ContainerPidMode
|
||||
from .service import ConvergenceStrategy
|
||||
from .service import NetworkMode
|
||||
from .service import parse_repository_tag
|
||||
from .service import PidMode
|
||||
from .service import Service
|
||||
from .service import ServiceNetworkMode
|
||||
@@ -81,7 +83,7 @@ class Project(object):
|
||||
return labels
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, name, config_data, client, default_platform=None):
|
||||
def from_config(cls, name, config_data, client, default_platform=None, extra_labels=[]):
|
||||
"""
|
||||
Construct a Project from a config.Config object.
|
||||
"""
|
||||
@@ -134,6 +136,7 @@ class Project(object):
|
||||
pid_mode=pid_mode,
|
||||
platform=service_dict.pop('platform', None),
|
||||
default_platform=default_platform,
|
||||
extra_labels=extra_labels,
|
||||
**service_dict)
|
||||
)
|
||||
|
||||
@@ -279,6 +282,7 @@ class Project(object):
|
||||
operator.attrgetter('name'),
|
||||
'Starting',
|
||||
get_deps,
|
||||
fail_check=lambda obj: not obj.containers(),
|
||||
)
|
||||
|
||||
return containers
|
||||
@@ -353,18 +357,27 @@ class Project(object):
|
||||
return containers
|
||||
|
||||
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
|
||||
build_args=None, gzip=False, parallel_build=False):
|
||||
build_args=None, gzip=False, parallel_build=False, rm=True, silent=False, cli=False,
|
||||
progress=None):
|
||||
|
||||
services = []
|
||||
for service in self.get_services(service_names):
|
||||
if service.can_be_built():
|
||||
services.append(service)
|
||||
else:
|
||||
elif not silent:
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
def build_service(service):
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
||||
if cli:
|
||||
log.warning("Native build is an experimental feature and could change at any time")
|
||||
if parallel_build:
|
||||
log.warning("Flag '--parallel' is ignored when building with "
|
||||
"COMPOSE_DOCKER_CLI_BUILD=1")
|
||||
if gzip:
|
||||
log.warning("Flag '--compress' is ignored when building with "
|
||||
"COMPOSE_DOCKER_CLI_BUILD=1")
|
||||
|
||||
def build_service(service):
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress)
|
||||
if parallel_build:
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
@@ -401,11 +414,13 @@ class Project(object):
|
||||
detached=True,
|
||||
start=False)
|
||||
|
||||
def events(self, service_names=None):
|
||||
def _legacy_event_processor(self, service_names):
|
||||
# Only for v1 files or when Compose is forced to use an older API version
|
||||
def build_container_event(event, container):
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano']))
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
@@ -424,17 +439,15 @@ class Project(object):
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# The first part of this condition is a guard against some events
|
||||
# broadcasted by swarm that don't have a status field.
|
||||
# This is a guard against some events broadcasted by swarm that
|
||||
# don't have a status field.
|
||||
# See https://github.com/docker/compose/issues/3316
|
||||
if 'status' not in event or event['status'] in IMAGE_EVENTS:
|
||||
# We don't receive any image events because labels aren't applied
|
||||
# to images
|
||||
if 'status' not in event:
|
||||
continue
|
||||
|
||||
# TODO: get labels from the API v1.22 , see github issue 2618
|
||||
try:
|
||||
# this can fail if the container has been removed
|
||||
# this can fail if the container has been removed or if the event
|
||||
# refers to an image
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
continue
|
||||
@@ -442,6 +455,56 @@ class Project(object):
|
||||
continue
|
||||
yield build_container_event(event, container)
|
||||
|
||||
def events(self, service_names=None):
|
||||
if version_lt(self.client.api_version, '1.22'):
|
||||
# New, better event API was introduced in 1.22.
|
||||
return self._legacy_event_processor(service_names)
|
||||
|
||||
def build_container_event(event):
|
||||
container_attrs = event['Actor']['Attributes']
|
||||
time = datetime.datetime.fromtimestamp(event['time'])
|
||||
time = time.replace(
|
||||
microsecond=microseconds_from_time_nano(event['timeNano'])
|
||||
)
|
||||
|
||||
container = None
|
||||
try:
|
||||
container = Container.from_id(self.client, event['id'])
|
||||
except APIError:
|
||||
# Container may have been removed (e.g. if this is a destroy event)
|
||||
pass
|
||||
|
||||
return {
|
||||
'time': time,
|
||||
'type': 'container',
|
||||
'action': event['status'],
|
||||
'id': event['Actor']['ID'],
|
||||
'service': container_attrs.get(LABEL_SERVICE),
|
||||
'attributes': dict([
|
||||
(k, v) for k, v in container_attrs.items()
|
||||
if not k.startswith('com.docker.compose.')
|
||||
]),
|
||||
'container': container,
|
||||
}
|
||||
|
||||
def yield_loop(service_names):
|
||||
for event in self.client.events(
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
# TODO: support other event types
|
||||
if event.get('Type') != 'container':
|
||||
continue
|
||||
|
||||
try:
|
||||
if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names:
|
||||
continue
|
||||
except KeyError:
|
||||
continue
|
||||
yield build_container_event(event)
|
||||
|
||||
return yield_loop(set(service_names) if service_names else self.service_names)
|
||||
|
||||
def up(self,
|
||||
service_names=None,
|
||||
start_deps=True,
|
||||
@@ -458,8 +521,12 @@ class Project(object):
|
||||
reset_container_image=False,
|
||||
renew_anonymous_volumes=False,
|
||||
silent=False,
|
||||
cli=False,
|
||||
):
|
||||
|
||||
if cli:
|
||||
log.warning("Native build is an experimental feature and could change at any time")
|
||||
|
||||
self.initialize()
|
||||
if not ignore_orphans:
|
||||
self.find_orphan_containers(remove_orphans)
|
||||
@@ -472,7 +539,7 @@ class Project(object):
|
||||
include_deps=start_deps)
|
||||
|
||||
for svc in services:
|
||||
svc.ensure_image_exists(do_build=do_build, silent=silent)
|
||||
svc.ensure_image_exists(do_build=do_build, silent=silent, cli=cli)
|
||||
plans = self._get_convergence_plans(
|
||||
services, strategy, always_recreate_deps=always_recreate_deps)
|
||||
|
||||
@@ -535,8 +602,10 @@ class Project(object):
|
||||
", ".join(updated_dependencies))
|
||||
containers_stopped = any(
|
||||
service.containers(stopped=True, filters={'status': ['created', 'exited']}))
|
||||
has_links = any(c.get('HostConfig.Links') for c in service.containers())
|
||||
if always_recreate_deps or containers_stopped or not has_links:
|
||||
service_has_links = any(service.get_link_names())
|
||||
container_has_links = any(c.get('HostConfig.Links') for c in service.containers())
|
||||
should_recreate_for_links = service_has_links ^ container_has_links
|
||||
if always_recreate_deps or containers_stopped or should_recreate_for_links:
|
||||
plan = service.convergence_plan(ConvergenceStrategy.always)
|
||||
else:
|
||||
plan = service.convergence_plan(strategy)
|
||||
@@ -550,6 +619,9 @@ class Project(object):
|
||||
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
|
||||
include_deps=False):
|
||||
services = self.get_services(service_names, include_deps)
|
||||
images_to_build = {service.image_name for service in services if service.can_be_built()}
|
||||
services_to_pull = [service for service in services if service.image_name not in images_to_build]
|
||||
|
||||
msg = not silent and 'Pulling' or None
|
||||
|
||||
if parallel_pull:
|
||||
@@ -575,7 +647,7 @@ class Project(object):
|
||||
)
|
||||
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
services_to_pull,
|
||||
pull_service,
|
||||
operator.attrgetter('name'),
|
||||
msg,
|
||||
@@ -588,12 +660,19 @@ class Project(object):
|
||||
raise ProjectError(combined_errors)
|
||||
|
||||
else:
|
||||
for service in services:
|
||||
for service in services_to_pull:
|
||||
service.pull(ignore_pull_failures, silent=silent)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
unique_images = set()
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.push(ignore_push_failures)
|
||||
# Considering <image> and <image:latest> as the same
|
||||
repo, tag, sep = parse_repository_tag(service.image_name)
|
||||
service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest'))
|
||||
|
||||
if service_image_name not in unique_images:
|
||||
service.push(ignore_push_failures)
|
||||
unique_images.add(service_image_name)
|
||||
|
||||
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
|
||||
ctnrs = list(filter(None, [
|
||||
@@ -627,7 +706,7 @@ class Project(object):
|
||||
|
||||
def find_orphan_containers(self, remove_orphans):
|
||||
def _find():
|
||||
containers = self._labeled_containers()
|
||||
containers = set(self._labeled_containers() + self._labeled_containers(stopped=True))
|
||||
for ctnr in containers:
|
||||
service_name = ctnr.labels.get(LABEL_SERVICE)
|
||||
if service_name not in self.service_names:
|
||||
@@ -638,7 +717,10 @@ class Project(object):
|
||||
if remove_orphans:
|
||||
for ctnr in orphans:
|
||||
log.info('Removing orphan container "{0}"'.format(ctnr.name))
|
||||
ctnr.kill()
|
||||
try:
|
||||
ctnr.kill()
|
||||
except APIError:
|
||||
pass
|
||||
ctnr.remove(force=True)
|
||||
else:
|
||||
log.warning(
|
||||
@@ -666,10 +748,11 @@ class Project(object):
|
||||
|
||||
def build_container_operation_with_timeout_func(self, operation, options):
|
||||
def container_operation_with_timeout(container):
|
||||
if options.get('timeout') is None:
|
||||
_options = options.copy()
|
||||
if _options.get('timeout') is None:
|
||||
service = self.get_service(container.service)
|
||||
options['timeout'] = service.stop_timeout(None)
|
||||
return getattr(container, operation)(**options)
|
||||
_options['timeout'] = service.stop_timeout(None)
|
||||
return getattr(container, operation)(**_options)
|
||||
return container_operation_with_timeout
|
||||
|
||||
|
||||
@@ -712,13 +795,13 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
.format(service=service, secret=secret.source))
|
||||
|
||||
if secret_def.get('external'):
|
||||
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
|
||||
"External secrets are not available to containers created by "
|
||||
"docker-compose.".format(service=service, secret=secret.source))
|
||||
log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
|
||||
"External secrets are not available to containers created by "
|
||||
"docker-compose.".format(service=service, secret=secret.source))
|
||||
continue
|
||||
|
||||
if secret.uid or secret.gid or secret.mode:
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Service \"{service}\" uses secret \"{secret}\" with uid, "
|
||||
"gid, or mode. These fields are not supported by this "
|
||||
"implementation of the Compose file".format(
|
||||
@@ -726,7 +809,15 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
)
|
||||
)
|
||||
|
||||
secrets.append({'secret': secret, 'file': secret_def.get('file')})
|
||||
secret_file = secret_def.get('file')
|
||||
if not path.isfile(str(secret_file)):
|
||||
log.warning(
|
||||
"Service \"{service}\" uses an undefined secret file \"{secret_file}\", "
|
||||
"the following file should be created \"{secret_file}\"".format(
|
||||
service=service, secret_file=secret_file
|
||||
)
|
||||
)
|
||||
secrets.append({'secret': secret, 'file': secret_file})
|
||||
|
||||
return secrets
|
||||
|
||||
|
||||
@@ -2,10 +2,12 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
from collections import namedtuple
|
||||
from collections import OrderedDict
|
||||
from operator import attrgetter
|
||||
@@ -27,6 +29,7 @@ from . import __version__
|
||||
from . import const
|
||||
from . import progress_stream
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import is_url
|
||||
from .config import merge_environment
|
||||
from .config import merge_labels
|
||||
from .config.errors import DependencyError
|
||||
@@ -58,10 +61,13 @@ from .utils import parse_seconds_float
|
||||
from .utils import truncate_id
|
||||
from .utils import unique_everseen
|
||||
|
||||
if six.PY2:
|
||||
import subprocess32 as subprocess
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
HOST_CONFIG_KEYS = [
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
@@ -85,6 +91,7 @@ HOST_CONFIG_KEYS = [
|
||||
'group_add',
|
||||
'init',
|
||||
'ipc',
|
||||
'isolation',
|
||||
'read_only',
|
||||
'log_driver',
|
||||
'log_opt',
|
||||
@@ -127,8 +134,7 @@ class NoSuchImageError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
ServiceName = namedtuple('ServiceName', 'project service number slug')
|
||||
|
||||
ServiceName = namedtuple('ServiceName', 'project service number')
|
||||
|
||||
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
|
||||
|
||||
@@ -165,20 +171,21 @@ class BuildAction(enum.Enum):
|
||||
|
||||
class Service(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
client=None,
|
||||
project='default',
|
||||
use_networking=False,
|
||||
links=None,
|
||||
volumes_from=None,
|
||||
network_mode=None,
|
||||
networks=None,
|
||||
secrets=None,
|
||||
scale=None,
|
||||
pid_mode=None,
|
||||
default_platform=None,
|
||||
**options
|
||||
self,
|
||||
name,
|
||||
client=None,
|
||||
project='default',
|
||||
use_networking=False,
|
||||
links=None,
|
||||
volumes_from=None,
|
||||
network_mode=None,
|
||||
networks=None,
|
||||
secrets=None,
|
||||
scale=1,
|
||||
pid_mode=None,
|
||||
default_platform=None,
|
||||
extra_labels=[],
|
||||
**options
|
||||
):
|
||||
self.name = name
|
||||
self.client = client
|
||||
@@ -190,14 +197,17 @@ class Service(object):
|
||||
self.pid_mode = pid_mode or PidMode(None)
|
||||
self.networks = networks or {}
|
||||
self.secrets = secrets or []
|
||||
self.scale_num = scale or 1
|
||||
self.scale_num = scale
|
||||
self.default_platform = default_platform
|
||||
self.options = options
|
||||
self.extra_labels = extra_labels
|
||||
|
||||
def __repr__(self):
|
||||
return '<Service: {}>'.format(self.name)
|
||||
|
||||
def containers(self, stopped=False, one_off=False, filters={}, labels=None):
|
||||
def containers(self, stopped=False, one_off=False, filters=None, labels=None):
|
||||
if filters is None:
|
||||
filters = {}
|
||||
filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
|
||||
|
||||
result = list(filter(None, [
|
||||
@@ -205,7 +215,7 @@ class Service(object):
|
||||
for container in self.client.containers(
|
||||
all=stopped,
|
||||
filters=filters)])
|
||||
)
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
@@ -237,15 +247,15 @@ class Service(object):
|
||||
|
||||
def show_scale_warnings(self, desired_num):
|
||||
if self.custom_container_name and desired_num > 1:
|
||||
log.warn('The "%s" service is using the custom container name "%s". '
|
||||
'Docker requires each container to have a unique name. '
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
log.warning('The "%s" service is using the custom container name "%s". '
|
||||
'Docker requires each container to have a unique name. '
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
|
||||
if self.specifies_host_port() and desired_num > 1:
|
||||
log.warn('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
log.warning('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
|
||||
def scale(self, desired_num, timeout=None):
|
||||
"""
|
||||
@@ -287,7 +297,7 @@ class Service(object):
|
||||
c for c in stopped_containers if self._containers_have_diverged([c])
|
||||
]
|
||||
for c in divergent_containers:
|
||||
c.remove()
|
||||
c.remove()
|
||||
|
||||
all_containers = list(set(all_containers) - set(divergent_containers))
|
||||
|
||||
@@ -335,9 +345,9 @@ class Service(object):
|
||||
raise OperationFailedError("Cannot create container for service %s: %s" %
|
||||
(self.name, ex.explanation))
|
||||
|
||||
def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
|
||||
def ensure_image_exists(self, do_build=BuildAction.none, silent=False, cli=False):
|
||||
if self.can_be_built() and do_build == BuildAction.force:
|
||||
self.build()
|
||||
self.build(cli=cli)
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -353,12 +363,18 @@ class Service(object):
|
||||
if do_build == BuildAction.skip:
|
||||
raise NeedsBuildError(self)
|
||||
|
||||
self.build()
|
||||
log.warn(
|
||||
self.build(cli=cli)
|
||||
log.warning(
|
||||
"Image for service {} was built because it did not already exist. To "
|
||||
"rebuild this image you must use `docker-compose build` or "
|
||||
"`docker-compose up --build`.".format(self.name))
|
||||
|
||||
def get_image_registry_data(self):
|
||||
try:
|
||||
return self.client.inspect_distribution(self.image_name)
|
||||
except APIError:
|
||||
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
|
||||
|
||||
def image(self):
|
||||
try:
|
||||
return self.client.inspect_image(self.image_name)
|
||||
@@ -388,8 +404,8 @@ class Service(object):
|
||||
return ConvergencePlan('start', containers)
|
||||
|
||||
if (
|
||||
strategy is ConvergenceStrategy.always or
|
||||
self._containers_have_diverged(containers)
|
||||
strategy is ConvergenceStrategy.always or
|
||||
self._containers_have_diverged(containers)
|
||||
):
|
||||
return ConvergencePlan('recreate', containers)
|
||||
|
||||
@@ -443,13 +459,11 @@ class Service(object):
|
||||
|
||||
containers, errors = parallel_execute(
|
||||
[
|
||||
ServiceName(self.project, self.name, index, generate_random_id())
|
||||
ServiceName(self.project, self.name, index)
|
||||
for index in range(i, i + scale)
|
||||
],
|
||||
lambda service_name: create_and_start(self, service_name.number),
|
||||
lambda service_name: self.get_container_name(
|
||||
service_name.service, service_name.number, service_name.slug
|
||||
),
|
||||
lambda service_name: self.get_container_name(service_name.service, service_name.number),
|
||||
"Creating"
|
||||
)
|
||||
for error in errors.values():
|
||||
@@ -459,50 +473,51 @@ class Service(object):
|
||||
|
||||
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
|
||||
renew_anonymous_volumes):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
def recreate(container):
|
||||
return self.recreate_container(
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
lambda c: c.name,
|
||||
"Recreating",
|
||||
)
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
|
||||
if scale is not None and len(containers) > scale:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
)
|
||||
|
||||
for error in errors.values():
|
||||
raise OperationFailedError(error)
|
||||
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
if scale is not None and len(containers) < scale:
|
||||
containers.extend(self._execute_convergence_create(
|
||||
scale - len(containers), detached, start
|
||||
))
|
||||
return containers
|
||||
|
||||
def _downscale(self, containers, timeout=None):
|
||||
def stop_and_remove(container):
|
||||
@@ -609,6 +624,8 @@ class Service(object):
|
||||
try:
|
||||
container.start()
|
||||
except APIError as ex:
|
||||
if "driver failed programming external connectivity" in ex.explanation:
|
||||
log.warn("Host is already in use by another container")
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
|
||||
return container
|
||||
|
||||
@@ -678,6 +695,7 @@ class Service(object):
|
||||
'links': self.get_link_names(),
|
||||
'net': self.network_mode.id,
|
||||
'networks': self.networks,
|
||||
'secrets': self.secrets,
|
||||
'volumes_from': [
|
||||
(v.source.name, v.mode)
|
||||
for v in self.volumes_from if isinstance(v.source, Service)
|
||||
@@ -688,11 +706,11 @@ class Service(object):
|
||||
net_name = self.network_mode.service_name
|
||||
pid_namespace = self.pid_mode.service_name
|
||||
return (
|
||||
self.get_linked_service_names() +
|
||||
self.get_volumes_from_names() +
|
||||
([net_name] if net_name else []) +
|
||||
([pid_namespace] if pid_namespace else []) +
|
||||
list(self.options.get('depends_on', {}).keys())
|
||||
self.get_linked_service_names() +
|
||||
self.get_volumes_from_names() +
|
||||
([net_name] if net_name else []) +
|
||||
([pid_namespace] if pid_namespace else []) +
|
||||
list(self.options.get('depends_on', {}).keys())
|
||||
)
|
||||
|
||||
def get_dependency_configs(self):
|
||||
@@ -734,16 +752,18 @@ class Service(object):
|
||||
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
|
||||
|
||||
def _next_container_number(self, one_off=False):
|
||||
if one_off:
|
||||
return None
|
||||
containers = itertools.chain(
|
||||
self._fetch_containers(
|
||||
all=True,
|
||||
filters={'label': self.labels(one_off=one_off)}
|
||||
filters={'label': self.labels(one_off=False)}
|
||||
), self._fetch_containers(
|
||||
all=True,
|
||||
filters={'label': self.labels(one_off=one_off, legacy=True)}
|
||||
filters={'label': self.labels(one_off=False, legacy=True)}
|
||||
)
|
||||
)
|
||||
numbers = [c.number for c in containers]
|
||||
numbers = [c.number for c in containers if c.number is not None]
|
||||
return 1 if not numbers else max(numbers) + 1
|
||||
|
||||
def _fetch_containers(self, **fetch_options):
|
||||
@@ -821,7 +841,7 @@ class Service(object):
|
||||
one_off=False,
|
||||
previous_container=None):
|
||||
add_config_hash = (not one_off and not override_options)
|
||||
slug = generate_random_id() if previous_container is None else previous_container.full_slug
|
||||
slug = generate_random_id() if one_off else None
|
||||
|
||||
container_options = dict(
|
||||
(k, self.options[k])
|
||||
@@ -830,7 +850,7 @@ class Service(object):
|
||||
container_options.update(override_options)
|
||||
|
||||
if not container_options.get('name'):
|
||||
container_options['name'] = self.get_container_name(self.name, number, slug, one_off)
|
||||
container_options['name'] = self.get_container_name(self.name, number, slug)
|
||||
|
||||
container_options.setdefault('detach', True)
|
||||
|
||||
@@ -880,7 +900,7 @@ class Service(object):
|
||||
|
||||
container_options['labels'] = build_container_labels(
|
||||
container_options.get('labels', {}),
|
||||
self.labels(one_off=one_off),
|
||||
self.labels(one_off=one_off) + self.extra_labels,
|
||||
number,
|
||||
self.config_hash if add_config_hash else None,
|
||||
slug
|
||||
@@ -1039,8 +1059,11 @@ class Service(object):
|
||||
return [build_spec(secret) for secret in self.secrets]
|
||||
|
||||
def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
|
||||
gzip=False):
|
||||
log.info('Building %s' % self.name)
|
||||
gzip=False, rm=True, silent=False, cli=False, progress=None):
|
||||
output_stream = open(os.devnull, 'w')
|
||||
if not silent:
|
||||
output_stream = sys.stdout
|
||||
log.info('Building %s' % self.name)
|
||||
|
||||
build_opts = self.options.get('build', {})
|
||||
|
||||
@@ -1057,15 +1080,16 @@ class Service(object):
|
||||
'Impossible to perform platform-targeted builds for API version < 1.35'
|
||||
)
|
||||
|
||||
build_output = self.client.build(
|
||||
builder = self.client if not cli else _CLIBuilder(progress)
|
||||
build_output = builder.build(
|
||||
path=path,
|
||||
tag=self.image_name,
|
||||
rm=True,
|
||||
rm=rm,
|
||||
forcerm=force_rm,
|
||||
pull=pull,
|
||||
nocache=no_cache,
|
||||
dockerfile=build_opts.get('dockerfile', None),
|
||||
cache_from=build_opts.get('cache_from', None),
|
||||
cache_from=self.get_cache_from(build_opts),
|
||||
labels=build_opts.get('labels', None),
|
||||
buildargs=build_args,
|
||||
network_mode=build_opts.get('network', None),
|
||||
@@ -1081,7 +1105,7 @@ class Service(object):
|
||||
)
|
||||
|
||||
try:
|
||||
all_events = list(stream_output(build_output, sys.stdout))
|
||||
all_events = list(stream_output(build_output, output_stream))
|
||||
except StreamOutputError as e:
|
||||
raise BuildError(self, six.text_type(e))
|
||||
|
||||
@@ -1103,6 +1127,12 @@ class Service(object):
|
||||
|
||||
return image_id
|
||||
|
||||
def get_cache_from(self, build_opts):
|
||||
cache_from = build_opts.get('cache_from', None)
|
||||
if cache_from is not None:
|
||||
cache_from = [tag for tag in cache_from if tag]
|
||||
return cache_from
|
||||
|
||||
def can_be_built(self):
|
||||
return 'build' in self.options
|
||||
|
||||
@@ -1118,12 +1148,12 @@ class Service(object):
|
||||
def custom_container_name(self):
|
||||
return self.options.get('container_name')
|
||||
|
||||
def get_container_name(self, service_name, number, slug, one_off=False):
|
||||
if self.custom_container_name and not one_off:
|
||||
def get_container_name(self, service_name, number, slug=None):
|
||||
if self.custom_container_name and slug is None:
|
||||
return self.custom_container_name
|
||||
|
||||
container_name = build_container_name(
|
||||
self.project, service_name, number, slug, one_off,
|
||||
self.project, service_name, number, slug,
|
||||
)
|
||||
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
||||
if container_name in ext_links_origins:
|
||||
@@ -1144,6 +1174,9 @@ class Service(object):
|
||||
try:
|
||||
self.client.remove_image(self.image_name)
|
||||
return True
|
||||
except ImageNotFound:
|
||||
log.warning("Image %s not found.", self.image_name)
|
||||
return False
|
||||
except APIError as e:
|
||||
log.error("Failed to remove image for service %s: %s", self.name, e)
|
||||
return False
|
||||
@@ -1309,7 +1342,7 @@ class ServicePidMode(PidMode):
|
||||
if containers:
|
||||
return 'container:' + containers[0].id
|
||||
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Service %s is trying to use reuse the PID namespace "
|
||||
"of another service that is not running." % (self.service_name)
|
||||
)
|
||||
@@ -1372,21 +1405,21 @@ class ServiceNetworkMode(object):
|
||||
if containers:
|
||||
return 'container:' + containers[0].id
|
||||
|
||||
log.warn("Service %s is trying to use reuse the network stack "
|
||||
"of another service that is not running." % (self.id))
|
||||
log.warning("Service %s is trying to use reuse the network stack "
|
||||
"of another service that is not running." % (self.id))
|
||||
return None
|
||||
|
||||
|
||||
# Names
|
||||
|
||||
|
||||
def build_container_name(project, service, number, slug, one_off=False):
|
||||
def build_container_name(project, service, number, slug=None):
|
||||
bits = [project.lstrip('-_'), service]
|
||||
if one_off:
|
||||
bits.append('run')
|
||||
return '_'.join(
|
||||
bits + ([str(number), truncate_id(slug)] if slug else [str(number)])
|
||||
)
|
||||
if slug:
|
||||
bits.extend(['run', truncate_id(slug)])
|
||||
else:
|
||||
bits.append(str(number))
|
||||
return '_'.join(bits)
|
||||
|
||||
|
||||
# Images
|
||||
@@ -1520,11 +1553,11 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
|
||||
|
||||
for volume in volumes_option:
|
||||
if (
|
||||
volume.external and
|
||||
volume.internal in container_volumes and
|
||||
container_volumes.get(volume.internal) != volume.external
|
||||
volume.external and
|
||||
volume.internal in container_volumes and
|
||||
container_volumes.get(volume.internal) != volume.external
|
||||
):
|
||||
log.warn((
|
||||
log.warning((
|
||||
"Service \"{service}\" is using volume \"{volume}\" from the "
|
||||
"previous container. Host mapping \"{host_path}\" has no effect. "
|
||||
"Remove the existing containers (with `docker-compose rm {service}`) "
|
||||
@@ -1569,14 +1602,17 @@ def build_mount(mount_spec):
|
||||
read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
|
||||
)
|
||||
|
||||
|
||||
# Labels
|
||||
|
||||
|
||||
def build_container_labels(label_options, service_labels, number, config_hash, slug):
|
||||
labels = dict(label_options or {})
|
||||
labels.update(label.split('=', 1) for label in service_labels)
|
||||
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
||||
labels[LABEL_SLUG] = slug
|
||||
if number is not None:
|
||||
labels[LABEL_CONTAINER_NUMBER] = str(number)
|
||||
if slug is not None:
|
||||
labels[LABEL_SLUG] = slug
|
||||
labels[LABEL_VERSION] = __version__
|
||||
|
||||
if config_hash:
|
||||
@@ -1621,6 +1657,7 @@ def format_environment(environment):
|
||||
if isinstance(value, six.binary_type):
|
||||
value = value.decode('utf-8')
|
||||
return '{key}={value}'.format(key=key, value=value)
|
||||
|
||||
return [format_env(*item) for item in environment.items()]
|
||||
|
||||
|
||||
@@ -1673,7 +1710,143 @@ def rewrite_build_path(path):
|
||||
if not six.PY3 and not IS_WINDOWS_PLATFORM:
|
||||
path = path.encode('utf8')
|
||||
|
||||
if IS_WINDOWS_PLATFORM and not path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||
if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||
path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
class _CLIBuilder(object):
|
||||
def __init__(self, progress):
|
||||
self._progress = progress
|
||||
|
||||
def build(self, path, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, timeout=None,
|
||||
custom_context=False, encoding=None, pull=False,
|
||||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None, cache_from=None, target=None, network_mode=None,
|
||||
squash=None, extra_hosts=None, platform=None, isolation=None,
|
||||
use_config_proxy=True):
|
||||
"""
|
||||
Args:
|
||||
path (str): Path to the directory containing the Dockerfile
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
cache_from (:py:class:`list`): A list of images used for build
|
||||
cache resolution
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
- memory (int): set memory limit for build
|
||||
- memswap (int): Total memory (memory + swap), -1 to disable
|
||||
swap
|
||||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
compressing
|
||||
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
|
||||
containers, as a mapping of hostname to IP address.
|
||||
fileobj: A file object to use as the Dockerfile. (Or a file-like
|
||||
object)
|
||||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
isolation (str): Isolation technology used during build.
|
||||
Default: `None`.
|
||||
labels (dict): A dictionary of labels to set on the image
|
||||
network_mode (str): networking mode for the run commands during
|
||||
build
|
||||
nocache (bool): Don't use the cache when set to ``True``
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
quiet (bool): Whether to return the status
|
||||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB
|
||||
squash (bool): Squash the resulting images layers into a
|
||||
single layer.
|
||||
tag (str): A tag to add to the final image
|
||||
target (str): Name of the build-stage to build in a multi-stage
|
||||
Dockerfile
|
||||
timeout (int): HTTP timeout
|
||||
use_config_proxy (bool): If ``True``, and if the docker client
|
||||
configuration file (``~/.docker/config.json`` by default)
|
||||
contains a proxy configuration, the corresponding environment
|
||||
variables will be set in the container being built.
|
||||
Returns:
|
||||
A generator for the build output.
|
||||
"""
|
||||
if dockerfile:
|
||||
dockerfile = os.path.join(path, dockerfile)
|
||||
iidfile = tempfile.mktemp()
|
||||
|
||||
command_builder = _CommandBuilder()
|
||||
command_builder.add_params("--build-arg", buildargs)
|
||||
command_builder.add_list("--cache-from", cache_from)
|
||||
command_builder.add_arg("--file", dockerfile)
|
||||
command_builder.add_flag("--force-rm", forcerm)
|
||||
command_builder.add_arg("--memory", container_limits.get("memory"))
|
||||
command_builder.add_flag("--no-cache", nocache)
|
||||
command_builder.add_arg("--progress", self._progress)
|
||||
command_builder.add_flag("--pull", pull)
|
||||
command_builder.add_arg("--tag", tag)
|
||||
command_builder.add_arg("--target", target)
|
||||
command_builder.add_arg("--iidfile", iidfile)
|
||||
args = command_builder.build([path])
|
||||
|
||||
magic_word = "Successfully built "
|
||||
appear = False
|
||||
with subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True) as p:
|
||||
while True:
|
||||
line = p.stdout.readline()
|
||||
if not line:
|
||||
break
|
||||
# Fix non ascii chars on Python2. To remove when #6890 is complete.
|
||||
if six.PY2:
|
||||
magic_word = str(magic_word)
|
||||
if line.startswith(magic_word):
|
||||
appear = True
|
||||
yield json.dumps({"stream": line})
|
||||
|
||||
with open(iidfile) as f:
|
||||
line = f.readline()
|
||||
image_id = line.split(":")[1].strip()
|
||||
os.remove(iidfile)
|
||||
|
||||
# In case of `DOCKER_BUILDKIT=1`
|
||||
# there is no success message already present in the output.
|
||||
# Since that's the way `Service::build` gets the `image_id`
|
||||
# it has to be added `manually`
|
||||
if not appear:
|
||||
yield json.dumps({"stream": "{}{}\n".format(magic_word, image_id)})
|
||||
|
||||
|
||||
class _CommandBuilder(object):
|
||||
def __init__(self):
|
||||
self._args = ["docker", "build"]
|
||||
|
||||
def add_arg(self, name, value):
|
||||
if value:
|
||||
self._args.extend([name, str(value)])
|
||||
|
||||
def add_flag(self, name, flag):
|
||||
if flag:
|
||||
self._args.extend([name])
|
||||
|
||||
def add_params(self, name, params):
|
||||
if params:
|
||||
for key, val in params.items():
|
||||
self._args.extend([name, "{}={}".format(key, val)])
|
||||
|
||||
def add_list(self, name, values):
|
||||
if values:
|
||||
for val in values:
|
||||
self._args.extend([name, val])
|
||||
|
||||
def build(self, args):
|
||||
return self._args + args
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import codecs
|
||||
import hashlib
|
||||
import json
|
||||
import json.decoder
|
||||
import logging
|
||||
import ntpath
|
||||
|
||||
@@ -127,7 +127,7 @@ class ProjectVolumes(object):
|
||||
try:
|
||||
volume.remove()
|
||||
except NotFound:
|
||||
log.warn("Volume %s not found.", volume.true_name)
|
||||
log.warning("Volume %s not found.", volume.true_name)
|
||||
|
||||
def initialize(self):
|
||||
try:
|
||||
@@ -209,7 +209,7 @@ def check_remote_volume_config(remote, local):
|
||||
if k.startswith('com.docker.'): # We are only interested in user-specified labels
|
||||
continue
|
||||
if remote_labels.get(k) != local_labels.get(k):
|
||||
log.warn(
|
||||
log.warning(
|
||||
'Volume {}: label "{}" has changed. It may need to be'
|
||||
' recreated.'.format(local.name, k)
|
||||
)
|
||||
|
||||
@@ -110,11 +110,14 @@ _docker_compose_build() {
|
||||
__docker_compose_nospace
|
||||
return
|
||||
;;
|
||||
--memory|-m)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services --filter source=build
|
||||
@@ -147,7 +150,7 @@ _docker_compose_config() {
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--hash --help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--hash --help --no-interpolate --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
@@ -181,6 +184,10 @@ _docker_compose_docker_compose() {
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--env-file)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
$(__docker_compose_to_extglob "$daemon_options_with_args") )
|
||||
return
|
||||
;;
|
||||
@@ -361,7 +368,7 @@ _docker_compose_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services
|
||||
@@ -609,6 +616,7 @@ _docker_compose() {
|
||||
--tlsverify
|
||||
"
|
||||
local daemon_options_with_args="
|
||||
--env-file
|
||||
--file -f
|
||||
--host -H
|
||||
--project-directory
|
||||
|
||||
@@ -12,6 +12,7 @@ end
|
||||
|
||||
complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file'
|
||||
complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name'
|
||||
complete -c docker-compose -l env-file -r -d 'Specify an alternate environment file (default: .env)'
|
||||
complete -c docker-compose -l verbose -d 'Show more output'
|
||||
complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to'
|
||||
complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify'
|
||||
|
||||
@@ -113,10 +113,12 @@ __docker-compose_subcommand() {
|
||||
$opts_help \
|
||||
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
|
||||
'--force-rm[Always remove intermediate containers.]' \
|
||||
'(--quiet -q)'{--quiet,-q}'[Curb build output]' \
|
||||
'(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'--compress[Compress the build context using gzip.]' \
|
||||
'--parallel[Build images in parallel.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
@@ -339,7 +341,8 @@ _docker-compose() {
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
"--compatibility[If set, Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent]" \
|
||||
'--env-file[Specify an alternate environment file (default: .env)]:env-file:_files' \
|
||||
"--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'--verbose[Show more output]' \
|
||||
'--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
|
||||
@@ -354,9 +357,10 @@ _docker-compose() {
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
|
||||
local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--env-file"
|
||||
"--file" "-f"
|
||||
"--host" "-H"
|
||||
"--project-name" "-p"
|
||||
@@ -368,6 +372,10 @@ _docker-compose() {
|
||||
"--skip-hostname-check"
|
||||
)
|
||||
|
||||
relevant_compose_repeatable_flags=(
|
||||
"--file" "-f"
|
||||
)
|
||||
|
||||
relevant_docker_flags=(
|
||||
"--host" "-H"
|
||||
"--tls"
|
||||
@@ -385,9 +393,18 @@ _docker-compose() {
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
if [[ -n "${relevant_compose_repeatable_flags[(r)$k]}" ]]; then
|
||||
values=("${(@s/:/)opt_args[$k]}")
|
||||
for value in $values
|
||||
do
|
||||
compose_options+=$k
|
||||
compose_options+=$value
|
||||
done
|
||||
else
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -44,7 +44,7 @@ def warn_for_links(name, service):
|
||||
links = service.get('links')
|
||||
if links:
|
||||
example_service = links[0].partition(':')[0]
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Service {name} has links, which no longer create environment "
|
||||
"variables such as {example_service_upper}_PORT. "
|
||||
"If you are using those in your application code, you should "
|
||||
@@ -57,7 +57,7 @@ def warn_for_links(name, service):
|
||||
def warn_for_external_links(name, service):
|
||||
external_links = service.get('external_links')
|
||||
if external_links:
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Service {name} has external_links: {ext}, which now work "
|
||||
"slightly differently. In particular, two containers must be "
|
||||
"connected to at least one network in common in order to "
|
||||
@@ -107,7 +107,7 @@ def rewrite_volumes_from(service, service_names):
|
||||
def create_volumes_section(data):
|
||||
named_volumes = get_named_volumes(data['services'])
|
||||
if named_volumes:
|
||||
log.warn(
|
||||
log.warning(
|
||||
"Named volumes ({names}) must be explicitly declared. Creating a "
|
||||
"'volumes' section with declarations.\n\n"
|
||||
"For backwards-compatibility, they've been declared as external. "
|
||||
|
||||
20
docker-compose-entrypoint.sh
Executable file
20
docker-compose-entrypoint.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# first arg is `-f` or `--some-option`
|
||||
if [ "${1#-}" != "$1" ]; then
|
||||
set -- docker-compose "$@"
|
||||
fi
|
||||
|
||||
# if our command is a valid Docker subcommand, let's invoke it through Docker instead
|
||||
# (this allows for "docker run docker ps", etc)
|
||||
if docker-compose help "$1" > /dev/null 2>&1; then
|
||||
set -- docker-compose "$@"
|
||||
fi
|
||||
|
||||
# if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically
|
||||
if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
|
||||
export DOCKER_HOST='tcp://docker:2375'
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
@@ -98,4 +98,5 @@ exe = EXE(pyz,
|
||||
debug=False,
|
||||
strip=None,
|
||||
upx=True,
|
||||
console=True)
|
||||
console=True,
|
||||
bootloader_ignore_signals=True)
|
||||
|
||||
@@ -6,11 +6,9 @@ The documentation for Compose has been merged into
|
||||
The docs for Compose are now here:
|
||||
https://github.com/docker/docker.github.io/tree/master/compose
|
||||
|
||||
Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
|
||||
Please submit pull requests for unreleased features/changes on the `master` branch (https://github.com/docker/docker.github.io/tree/master), please prefix the PR title with `[WIP]` to indicate that it relates to an unreleased change.
|
||||
|
||||
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
|
||||
|
||||
PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
|
||||
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided.
|
||||
|
||||
As always, the docs remain open-source and we appreciate your feedback and
|
||||
pull requests!
|
||||
|
||||
13
pyinstaller/ldd
Executable file
13
pyinstaller/ldd
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
# From http://wiki.musl-libc.org/wiki/FAQ#Q:_where_is_ldd_.3F
|
||||
#
|
||||
# Musl's dynlinker comes with ldd functionality built in. just create a
|
||||
# symlink from ld-musl-$ARCH.so to /bin/ldd. If the dynlinker was started
|
||||
# as "ldd", it will detect that and print the appropriate DSO information.
|
||||
#
|
||||
# Instead, this string replaced "ldd" with the package so that pyinstaller
|
||||
# can find the actual lib.
|
||||
exec /usr/bin/ldd "$@" | \
|
||||
sed -r 's/([^[:space:]]+) => ldd/\1 => \/lib\/\1/g' | \
|
||||
sed -r 's/ldd \(.*\)//g'
|
||||
@@ -1 +1 @@
|
||||
pyinstaller==3.3.1
|
||||
pyinstaller==3.5
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
coverage==4.4.2
|
||||
ddt==1.2.0
|
||||
flake8==3.5.0
|
||||
mock>=1.0.1
|
||||
mock==3.0.5
|
||||
pytest==3.6.3
|
||||
pytest-cov==2.5.1
|
||||
|
||||
@@ -1,23 +1,25 @@
|
||||
backports.shutil_get_terminal_size==1.0.0
|
||||
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
||||
cached-property==1.3.0
|
||||
certifi==2017.4.17
|
||||
chardet==3.0.4
|
||||
colorama==0.4.0; sys_platform == 'win32'
|
||||
docker==3.5.0
|
||||
docker-pycreds==0.3.0
|
||||
docker==4.1.0
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
jsonschema==2.6.0
|
||||
jsonschema==3.0.1
|
||||
paramiko==2.6.0
|
||||
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
||||
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||
PySocks==1.6.7
|
||||
PyYAML==3.12
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
texttable==0.9.1
|
||||
urllib3==1.21.1; python_version == '3.3'
|
||||
PyYAML==4.2b1
|
||||
requests==2.22.0
|
||||
six==1.12.0
|
||||
texttable==1.6.2
|
||||
urllib3==1.24.2; python_version == '3.3'
|
||||
websocket-client==0.32.0
|
||||
|
||||
20
script/Jenkinsfile.fossa
Normal file
20
script/Jenkinsfile.fossa
Normal file
@@ -0,0 +1,20 @@
|
||||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage("License Scan") {
|
||||
agent {
|
||||
label 'ubuntu-1604-aufs-edge'
|
||||
}
|
||||
|
||||
steps {
|
||||
withCredentials([
|
||||
string(credentialsId: 'fossa-api-key', variable: 'FOSSA_API_KEY')
|
||||
]) {
|
||||
checkout scm
|
||||
sh "FOSSA_API_KEY='${FOSSA_API_KEY}' BRANCH_NAME='${env.BRANCH_NAME}' make -f script/fossa.mk fossa-analyze"
|
||||
sh "FOSSA_API_KEY='${FOSSA_API_KEY}' make -f script/fossa.mk fossa-test"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,11 +7,14 @@ if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG=$1
|
||||
TAG="$1"
|
||||
|
||||
VERSION="$(python setup.py --version)"
|
||||
|
||||
./script/build/write-git-sha
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
python setup.py sdist bdist_wheel
|
||||
./script/build/linux
|
||||
docker build -t docker/compose:$TAG -f Dockerfile.run .
|
||||
|
||||
docker build \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \
|
||||
-t "${TAG}" .
|
||||
|
||||
@@ -4,10 +4,15 @@ set -ex
|
||||
|
||||
./script/clean
|
||||
|
||||
TAG="docker-compose"
|
||||
docker build -t "$TAG" . | tail -n 200
|
||||
docker run \
|
||||
--rm --entrypoint="script/build/linux-entrypoint" \
|
||||
-v $(pwd)/dist:/code/dist \
|
||||
-v $(pwd)/.git:/code/.git \
|
||||
"$TAG"
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
TAG="docker/compose:tmp-glibc-linux-binary-${DOCKER_COMPOSE_GITSHA}"
|
||||
|
||||
docker build -t "${TAG}" . \
|
||||
--build-arg BUILD_PLATFORM=debian \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
|
||||
TMP_CONTAINER=$(docker create "${TAG}")
|
||||
mkdir -p dist
|
||||
ARCH=$(uname -m)
|
||||
docker cp "${TMP_CONTAINER}":/usr/local/bin/docker-compose "dist/docker-compose-Linux-${ARCH}"
|
||||
docker container rm -f "${TMP_CONTAINER}"
|
||||
docker image rm -f "${TAG}"
|
||||
|
||||
@@ -2,14 +2,39 @@
|
||||
|
||||
set -ex
|
||||
|
||||
TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
|
||||
VENV=/code/.tox/py36
|
||||
CODE_PATH=/code
|
||||
VENV="${CODE_PATH}"/.tox/py37
|
||||
|
||||
mkdir -p `pwd`/dist
|
||||
chmod 777 `pwd`/dist
|
||||
cd "${CODE_PATH}"
|
||||
mkdir -p dist
|
||||
chmod 777 dist
|
||||
|
||||
$VENV/bin/pip install -q -r requirements-build.txt
|
||||
./script/build/write-git-sha
|
||||
su -c "$VENV/bin/pyinstaller docker-compose.spec" user
|
||||
mv dist/docker-compose $TARGET
|
||||
$TARGET version
|
||||
"${VENV}"/bin/pip3 install -q -r requirements-build.txt
|
||||
|
||||
# TODO(ulyssessouza) To check if really needed
|
||||
if [ -z "${DOCKER_COMPOSE_GITSHA}" ]; then
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
export PATH="${CODE_PATH}/pyinstaller:${PATH}"
|
||||
|
||||
if [ ! -z "${BUILD_BOOTLOADER}" ]; then
|
||||
# Build bootloader for alpine; develop is the main branch
|
||||
git clone --single-branch --branch develop https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller
|
||||
cd /tmp/pyinstaller/bootloader
|
||||
# Checkout commit corresponding to version in requirements-build
|
||||
git checkout v3.5
|
||||
"${VENV}"/bin/python3 ./waf configure --no-lsb all
|
||||
"${VENV}"/bin/pip3 install ..
|
||||
cd "${CODE_PATH}"
|
||||
rm -Rf /tmp/pyinstaller
|
||||
else
|
||||
echo "NOT compiling bootloader!!!"
|
||||
fi
|
||||
|
||||
"${VENV}"/bin/pyinstaller --exclude-module pycrypto --exclude-module PyInstaller docker-compose.spec
|
||||
ls -la dist/
|
||||
ldd dist/docker-compose
|
||||
mv dist/docker-compose /usr/local/bin
|
||||
docker-compose version
|
||||
|
||||
@@ -5,11 +5,12 @@ TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
|
||||
|
||||
rm -rf venv
|
||||
|
||||
virtualenv -p ${TOOLCHAIN_PATH}/bin/python3 venv
|
||||
virtualenv -p "${TOOLCHAIN_PATH}"/bin/python3 venv
|
||||
venv/bin/pip install -r requirements.txt
|
||||
venv/bin/pip install -r requirements-build.txt
|
||||
venv/bin/pip install --no-deps .
|
||||
./script/build/write-git-sha
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
venv/bin/pyinstaller docker-compose.spec
|
||||
mv dist/docker-compose dist/docker-compose-Darwin-x86_64
|
||||
dist/docker-compose-Darwin-x86_64 version
|
||||
|
||||
@@ -7,11 +7,12 @@ if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG=$1
|
||||
TAG="$1"
|
||||
IMAGE="docker/compose-tests"
|
||||
|
||||
docker build -t docker-compose-tests:tmp .
|
||||
ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp)
|
||||
docker commit $ctnr_id docker/compose-tests:latest
|
||||
docker tag docker/compose-tests:latest docker/compose-tests:$TAG
|
||||
docker rm -f $ctnr_id
|
||||
docker rmi -f docker-compose-tests:tmp
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
docker build -t "${IMAGE}:${TAG}" . \
|
||||
--target build \
|
||||
--build-arg BUILD_PLATFORM="debian" \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
|
||||
docker tag "${IMAGE}":"${TAG}" "${IMAGE}":latest
|
||||
|
||||
@@ -6,17 +6,17 @@
|
||||
#
|
||||
# http://git-scm.com/download/win
|
||||
#
|
||||
# 2. Install Python 3.6.4:
|
||||
# 2. Install Python 3.7.2:
|
||||
#
|
||||
# https://www.python.org/downloads/
|
||||
#
|
||||
# 3. Append ";C:\Python36;C:\Python36\Scripts" to the "Path" environment variable:
|
||||
# 3. Append ";C:\Python37;C:\Python37\Scripts" to the "Path" environment variable:
|
||||
#
|
||||
# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
|
||||
#
|
||||
# 4. In Powershell, run the following commands:
|
||||
#
|
||||
# $ pip install 'virtualenv>=15.1.0'
|
||||
# $ pip install 'virtualenv==16.2.0'
|
||||
# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
|
||||
#
|
||||
# 5. Clone the repository:
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
#
|
||||
# Write the current commit sha to the file GITSHA. This file is included in
|
||||
# packaging so that `docker-compose version` can include the git sha.
|
||||
#
|
||||
set -e
|
||||
git rev-parse --short HEAD > compose/GITSHA
|
||||
# sets to 'unknown' and echoes a message if the command is not successful
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(git rev-parse --short HEAD)"
|
||||
if [[ "${?}" != "0" ]]; then
|
||||
echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
|
||||
DOCKER_COMPOSE_GITSHA="unknown"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}"
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \
|
||||
https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH}
|
||||
|
||||
|
||||
16
script/fossa.mk
Normal file
16
script/fossa.mk
Normal file
@@ -0,0 +1,16 @@
|
||||
# Variables for Fossa
|
||||
BUILD_ANALYZER?=docker/fossa-analyzer
|
||||
FOSSA_OPTS?=--option all-tags:true --option allow-unresolved:true
|
||||
|
||||
fossa-analyze:
|
||||
docker run --rm -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
|
||||
-v $(CURDIR)/$*:/go/src/github.com/docker/compose \
|
||||
-w /go/src/github.com/docker/compose \
|
||||
$(BUILD_ANALYZER) analyze ${FOSSA_OPTS} --branch ${BRANCH_NAME}
|
||||
|
||||
# This command is used to run the fossa test command
|
||||
fossa-test:
|
||||
docker run -i -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
|
||||
-v $(CURDIR)/$*:/go/src/github.com/docker/compose \
|
||||
-w /go/src/github.com/docker/compose \
|
||||
$(BUILD_ANALYZER) test
|
||||
@@ -40,7 +40,7 @@ This API token should be exposed to the release script through the
|
||||
### A Bintray account and Bintray API key
|
||||
|
||||
Your Bintray account will need to be an admin member of the
|
||||
[docker-compose organization](https://github.com/settings/tokens).
|
||||
[docker-compose organization](https://bintray.com/docker-compose).
|
||||
Additionally, you should generate a personal API key. To do so, click your
|
||||
username in the top-right hand corner and select "Edit profile" ; on the new
|
||||
page, select "API key" in the left-side menu.
|
||||
@@ -129,7 +129,7 @@ assets public), proceed to the "Finalize a release" section of this guide.
|
||||
Once you're ready to make your release public, you may execute the following
|
||||
command from the root of the Compose repository:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEAE_VERSION
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
|
||||
```
|
||||
|
||||
Note that this command will create and publish versioned assets to the public.
|
||||
@@ -192,6 +192,8 @@ be handled manually by the operator:
|
||||
- Bump the version in `compose/__init__.py` to the *next* minor version
|
||||
number with `dev` appended. For example, if you just released `1.4.0`,
|
||||
update it to `1.5.0dev`
|
||||
- Update compose_version in [github.com/docker/docker.github.io/blob/master/_config.yml](https://github.com/docker/docker.github.io/blob/master/_config.yml) and [github.com/docker/docker.github.io/blob/master/_config_authoring.yml](https://github.com/docker/docker.github.io/blob/master/_config_authoring.yml)
|
||||
- Update the release note in [github.com/docker/docker.github.io](https://github.com/docker/docker.github.io/blob/master/release-notes/docker-compose.md)
|
||||
|
||||
## Advanced options
|
||||
|
||||
|
||||
@@ -26,12 +26,6 @@ if [ -z "$(command -v jq 2> /dev/null)" ]; then
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
|
||||
>&2 echo "$0 requires http://pandoc.org/"
|
||||
>&2 echo "Please install it and make sure it is available on your \$PATH."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
API=https://api.github.com/repos
|
||||
REPO=docker/compose
|
||||
GITHUB_REPO=git@github.com:$REPO
|
||||
@@ -59,8 +53,6 @@ docker push docker/compose-tests:latest
|
||||
docker push docker/compose-tests:$VERSION
|
||||
|
||||
echo "Uploading package to PyPI"
|
||||
pandoc -f markdown -t rst README.md -o README.rst
|
||||
sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
|
||||
./script/build/write-git-sha
|
||||
python setup.py sdist bdist_wheel
|
||||
if [ "$(command -v twine 2> /dev/null)" ]; then
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
|
||||
If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker Desktop for Mac and Windows](https://www.docker.com/products/docker-desktop)**.
|
||||
|
||||
Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
|
||||
Docker Desktop will automatically install the latest version of Docker Engine for you.
|
||||
|
||||
Alternatively, you can use the usual commands to install or upgrade Compose:
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from distutils.core import run_setup
|
||||
|
||||
import pypandoc
|
||||
from jinja2 import Template
|
||||
from release.bintray import BintrayAPI
|
||||
from release.const import BINTRAY_ORG
|
||||
@@ -17,6 +15,7 @@ from release.const import NAME
|
||||
from release.const import REPO_ROOT
|
||||
from release.downloader import BinaryDownloader
|
||||
from release.images import ImageManager
|
||||
from release.images import is_tag_latest
|
||||
from release.pypi import check_pypirc
|
||||
from release.pypi import pypi_upload
|
||||
from release.repository import delete_assets
|
||||
@@ -206,7 +205,7 @@ def resume(args):
|
||||
delete_assets(gh_release)
|
||||
upload_assets(gh_release, files)
|
||||
img_manager = ImageManager(args.release)
|
||||
img_manager.build_images(repository, files)
|
||||
img_manager.build_images(repository)
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
@@ -246,7 +245,7 @@ def start(args):
|
||||
gh_release = create_release_draft(repository, args.release, pr_data, files)
|
||||
upload_assets(gh_release, files)
|
||||
img_manager = ImageManager(args.release)
|
||||
img_manager.build_images(repository, files)
|
||||
img_manager.build_images(repository)
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
@@ -260,7 +259,8 @@ def finalize(args):
|
||||
try:
|
||||
check_pypirc()
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
img_manager = ImageManager(args.release)
|
||||
tag_as_latest = is_tag_latest(args.release)
|
||||
img_manager = ImageManager(args.release, tag_as_latest)
|
||||
pr_data = repository.find_release_pr(args.release)
|
||||
if not pr_data:
|
||||
raise ScriptError('No PR found for {}'.format(args.release))
|
||||
@@ -277,10 +277,8 @@ def finalize(args):
|
||||
|
||||
repository.checkout_branch(br_name)
|
||||
|
||||
pypandoc.convert_file(
|
||||
os.path.join(REPO_ROOT, 'README.md'), 'rst', outputfile=os.path.join(REPO_ROOT, 'README.rst')
|
||||
)
|
||||
run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel'])
|
||||
os.system('python {setup_script} sdist bdist_wheel'.format(
|
||||
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
|
||||
|
||||
merge_status = pr_data.merge()
|
||||
if not merge_status.merged and not args.finalize_resume:
|
||||
|
||||
@@ -6,4 +6,5 @@ import os
|
||||
|
||||
REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
|
||||
NAME = 'docker/compose'
|
||||
COMPOSE_TESTS_IMAGE_BASE_NAME = NAME + '-tests'
|
||||
BINTRAY_ORG = 'docker-compose'
|
||||
|
||||
@@ -5,18 +5,36 @@ from __future__ import unicode_literals
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import docker
|
||||
from enum import Enum
|
||||
|
||||
from .const import NAME
|
||||
from .const import REPO_ROOT
|
||||
from .utils import ScriptError
|
||||
from .utils import yesno
|
||||
from script.release.release.const import COMPOSE_TESTS_IMAGE_BASE_NAME
|
||||
|
||||
|
||||
class Platform(Enum):
|
||||
ALPINE = 'alpine'
|
||||
DEBIAN = 'debian'
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
# Checks if this version respects the GA version format ('x.y.z') and not an RC
|
||||
def is_tag_latest(version):
|
||||
ga_version = all(n.isdigit() for n in version.split('.')) and version.count('.') == 2
|
||||
return ga_version and yesno('Should this release be tagged as \"latest\"? [Y/n]: ', default=True)
|
||||
|
||||
|
||||
class ImageManager(object):
|
||||
def __init__(self, version):
|
||||
def __init__(self, version, latest=False):
|
||||
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
self.version = version
|
||||
self.latest = latest
|
||||
if 'HUB_CREDENTIALS' in os.environ:
|
||||
print('HUB_CREDENTIALS found in environment, issuing login')
|
||||
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
|
||||
@@ -24,16 +42,36 @@ class ImageManager(object):
|
||||
username=credentials['Username'], password=credentials['Password']
|
||||
)
|
||||
|
||||
def build_images(self, repository, files):
|
||||
print("Building release images...")
|
||||
repository.write_git_sha()
|
||||
distdir = os.path.join(REPO_ROOT, 'dist')
|
||||
os.makedirs(distdir, exist_ok=True)
|
||||
shutil.copy(files['docker-compose-Linux-x86_64'][0], distdir)
|
||||
os.chmod(os.path.join(distdir, 'docker-compose-Linux-x86_64'), 0o755)
|
||||
print('Building docker/compose image')
|
||||
def _tag(self, image, existing_tag, new_tag):
|
||||
existing_repo_tag = '{image}:{tag}'.format(image=image, tag=existing_tag)
|
||||
new_repo_tag = '{image}:{tag}'.format(image=image, tag=new_tag)
|
||||
self.docker_client.tag(existing_repo_tag, new_repo_tag)
|
||||
|
||||
def get_full_version(self, platform=None):
|
||||
return self.version + '-' + platform.__str__() if platform else self.version
|
||||
|
||||
def get_runtime_image_tag(self, tag):
|
||||
return '{image_base_image}:{tag}'.format(
|
||||
image_base_image=NAME,
|
||||
tag=self.get_full_version(tag)
|
||||
)
|
||||
|
||||
def build_runtime_image(self, repository, platform):
|
||||
git_sha = repository.write_git_sha()
|
||||
compose_image_base_name = NAME
|
||||
print('Building {image} image ({platform} based)'.format(
|
||||
image=compose_image_base_name,
|
||||
platform=platform
|
||||
))
|
||||
full_version = self.get_full_version(platform)
|
||||
build_tag = self.get_runtime_image_tag(platform)
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker/compose:{}'.format(self.version), dockerfile='Dockerfile.run',
|
||||
REPO_ROOT,
|
||||
tag=build_tag,
|
||||
buildargs={
|
||||
'BUILD_PLATFORM': platform.value,
|
||||
'GIT_COMMIT': git_sha,
|
||||
},
|
||||
decode=True
|
||||
)
|
||||
for chunk in logstream:
|
||||
@@ -42,9 +80,33 @@ class ImageManager(object):
|
||||
if 'stream' in chunk:
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
print('Building test image (for UCP e2e)')
|
||||
if platform == Platform.ALPINE:
|
||||
self._tag(compose_image_base_name, full_version, self.version)
|
||||
if self.latest:
|
||||
self._tag(compose_image_base_name, full_version, platform)
|
||||
if platform == Platform.ALPINE:
|
||||
self._tag(compose_image_base_name, full_version, 'latest')
|
||||
|
||||
def get_ucp_test_image_tag(self, tag=None):
|
||||
return '{image}:{tag}'.format(
|
||||
image=COMPOSE_TESTS_IMAGE_BASE_NAME,
|
||||
tag=tag or self.version
|
||||
)
|
||||
|
||||
# Used for producing a test image for UCP
|
||||
def build_ucp_test_image(self, repository):
|
||||
print('Building test image (debian based for UCP e2e)')
|
||||
git_sha = repository.write_git_sha()
|
||||
ucp_test_image_tag = self.get_ucp_test_image_tag()
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker-compose-tests:tmp', decode=True
|
||||
REPO_ROOT,
|
||||
tag=ucp_test_image_tag,
|
||||
target='build',
|
||||
buildargs={
|
||||
'BUILD_PLATFORM': Platform.DEBIAN.value,
|
||||
'GIT_COMMIT': git_sha,
|
||||
},
|
||||
decode=True
|
||||
)
|
||||
for chunk in logstream:
|
||||
if 'error' in chunk:
|
||||
@@ -52,26 +114,15 @@ class ImageManager(object):
|
||||
if 'stream' in chunk:
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
container = self.docker_client.create_container(
|
||||
'docker-compose-tests:tmp', entrypoint='tox'
|
||||
)
|
||||
self.docker_client.commit(container, 'docker/compose-tests', 'latest')
|
||||
self.docker_client.tag(
|
||||
'docker/compose-tests:latest', 'docker/compose-tests:{}'.format(self.version)
|
||||
)
|
||||
self.docker_client.remove_container(container, force=True)
|
||||
self.docker_client.remove_image('docker-compose-tests:tmp', force=True)
|
||||
self._tag(COMPOSE_TESTS_IMAGE_BASE_NAME, self.version, 'latest')
|
||||
|
||||
@property
|
||||
def image_names(self):
|
||||
return [
|
||||
'docker/compose-tests:latest',
|
||||
'docker/compose-tests:{}'.format(self.version),
|
||||
'docker/compose:{}'.format(self.version)
|
||||
]
|
||||
def build_images(self, repository):
|
||||
self.build_runtime_image(repository, Platform.ALPINE)
|
||||
self.build_runtime_image(repository, Platform.DEBIAN)
|
||||
self.build_ucp_test_image(repository)
|
||||
|
||||
def check_images(self):
|
||||
for name in self.image_names:
|
||||
for name in self.get_images_to_push():
|
||||
try:
|
||||
self.docker_client.inspect_image(name)
|
||||
except docker.errors.ImageNotFound:
|
||||
@@ -79,8 +130,22 @@ class ImageManager(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_images_to_push(self):
|
||||
tags_to_push = {
|
||||
"{}:{}".format(NAME, self.version),
|
||||
self.get_runtime_image_tag(Platform.ALPINE),
|
||||
self.get_runtime_image_tag(Platform.DEBIAN),
|
||||
self.get_ucp_test_image_tag(),
|
||||
self.get_ucp_test_image_tag('latest'),
|
||||
}
|
||||
if is_tag_latest(self.version):
|
||||
tags_to_push.add("{}:latest".format(NAME))
|
||||
return tags_to_push
|
||||
|
||||
def push_images(self):
|
||||
for name in self.image_names:
|
||||
tags_to_push = self.get_images_to_push()
|
||||
print('Build tags to push {}'.format(tags_to_push))
|
||||
for name in tags_to_push:
|
||||
print('Pushing {} to Docker Hub'.format(name))
|
||||
logstream = self.docker_client.push(name, stream=True, decode=True)
|
||||
for chunk in logstream:
|
||||
|
||||
@@ -18,7 +18,7 @@ def pypi_upload(args):
|
||||
'dist/docker-compose-{}*.tar.gz'.format(rel)
|
||||
])
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 400 and 'File already exists' in e.message:
|
||||
if e.response.status_code == 400 and 'File already exists' in str(e):
|
||||
if not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Package already uploaded on PyPi.'
|
||||
|
||||
@@ -175,6 +175,7 @@ class Repository(object):
|
||||
def write_git_sha(self):
|
||||
with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f:
|
||||
f.write(self.git_repo.head.commit.hexsha[:7])
|
||||
return self.git_repo.head.commit.hexsha[:7]
|
||||
|
||||
def cherry_pick_prs(self, release_branch, ids):
|
||||
if not ids:
|
||||
@@ -219,6 +220,8 @@ def get_contributors(pr_data):
|
||||
commits = pr_data.get_commits()
|
||||
authors = {}
|
||||
for commit in commits:
|
||||
if not commit or not commit.author or not commit.author.login:
|
||||
continue
|
||||
author = commit.author.login
|
||||
authors[author] = authors.get(author, 0) + 1
|
||||
return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
|
||||
|
||||
@@ -39,9 +39,9 @@ fi
|
||||
|
||||
$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
|
||||
PyGithub==1.39 \
|
||||
pypandoc==1.4 \
|
||||
GitPython==2.1.9 \
|
||||
requests==2.18.4 \
|
||||
setuptools==40.6.2 \
|
||||
twine==1.11.0
|
||||
|
||||
$VENV_PYTHONBIN setup.py develop
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.23.1"
|
||||
VERSION="1.24.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
@@ -47,14 +47,14 @@ if [ -n "$HOME" ]; then
|
||||
fi
|
||||
|
||||
# Only allocate tty if we detect one
|
||||
if [ -t 0 ]; then
|
||||
if [ -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
fi
|
||||
else
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
if [ -t 0 -a -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
fi
|
||||
|
||||
# Always set -i to support piped and terminal input in run/exec
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
|
||||
|
||||
# Handle userns security
|
||||
if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
|
||||
|
||||
@@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
||||
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||
fi
|
||||
|
||||
OPENSSL_VERSION=1.1.0h
|
||||
OPENSSL_VERSION=1.1.1c
|
||||
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||
OPENSSL_SHA1=0fc39f6aa91b6e7f4d05018f7c5e991e1d2491fd
|
||||
OPENSSL_SHA1=71b830a077276cbeccc994369538617a21bee808
|
||||
|
||||
PYTHON_VERSION=3.6.6
|
||||
PYTHON_VERSION=3.7.4
|
||||
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||
PYTHON_SHA1=ae1fc9ddd29ad8c1d5f7b0d799ff0787efeb9652
|
||||
PYTHON_SHA1=fb1d764be8a9dcd40f2f152a610a0ab04e0d0ed3
|
||||
|
||||
#
|
||||
# Install prerequisites.
|
||||
@@ -36,7 +36,7 @@ if ! [ -x "$(command -v python3)" ]; then
|
||||
brew install python3
|
||||
fi
|
||||
if ! [ -x "$(command -v virtualenv)" ]; then
|
||||
pip install virtualenv
|
||||
pip install virtualenv==16.2.0
|
||||
fi
|
||||
|
||||
#
|
||||
@@ -50,7 +50,7 @@ mkdir -p ${TOOLCHAIN_PATH}
|
||||
#
|
||||
# Set macOS SDK.
|
||||
#
|
||||
if [ ${SDK_FETCH} ]; then
|
||||
if [[ ${SDK_FETCH} && ! -f ${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk/SDKSettings.plist ]]; then
|
||||
SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
|
||||
fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
|
||||
else
|
||||
@@ -61,7 +61,7 @@ fi
|
||||
# Build OpenSSL.
|
||||
#
|
||||
OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/openssl ]; then
|
||||
if ! [[ $(${TOOLCHAIN_PATH}/bin/openssl version) == *"${OPENSSL_VERSION}"* ]]; then
|
||||
rm -rf ${OPENSSL_SRC_PATH}
|
||||
fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
|
||||
(
|
||||
@@ -77,7 +77,7 @@ fi
|
||||
# Build Python.
|
||||
#
|
||||
PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||
if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]]; then
|
||||
rm -rf ${PYTHON_SRC_PATH}
|
||||
fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
|
||||
(
|
||||
@@ -87,9 +87,10 @@ if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||
--datarootdir=${TOOLCHAIN_PATH}/share \
|
||||
--datadir=${TOOLCHAIN_PATH}/share \
|
||||
--enable-framework=${TOOLCHAIN_PATH}/Frameworks \
|
||||
--with-openssl=${TOOLCHAIN_PATH} \
|
||||
MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
|
||||
CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
|
||||
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}include" \
|
||||
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}/include" \
|
||||
LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
|
||||
make -j 4
|
||||
make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
|
||||
@@ -97,6 +98,11 @@ if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Smoke test built Python.
|
||||
#
|
||||
openssl_version ${TOOLCHAIN_PATH}
|
||||
|
||||
echo ""
|
||||
echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
|
||||
echo "*** Using SDK ${SDK_PATH}"
|
||||
|
||||
@@ -8,8 +8,7 @@ set -e
|
||||
docker run --rm \
|
||||
--tty \
|
||||
${GIT_VOLUME} \
|
||||
--entrypoint="tox" \
|
||||
"$TAG" -e pre-commit
|
||||
"$TAG" tox -e pre-commit
|
||||
|
||||
get_versions="docker run --rm
|
||||
--entrypoint=/code/.tox/py27/bin/python
|
||||
@@ -24,7 +23,7 @@ fi
|
||||
|
||||
|
||||
BUILD_NUMBER=${BUILD_NUMBER-$USER}
|
||||
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py36}
|
||||
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py37}
|
||||
|
||||
for version in $DOCKER_VERSIONS; do
|
||||
>&2 echo "Running tests against Docker $version"
|
||||
|
||||
@@ -20,6 +20,3 @@ export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
|
||||
|
||||
GIT_VOLUME="--volumes-from=$(hostname)"
|
||||
. script/test/all
|
||||
|
||||
>&2 echo "Building Linux binary"
|
||||
. script/build/linux-entrypoint
|
||||
|
||||
@@ -3,17 +3,18 @@
|
||||
|
||||
set -ex
|
||||
|
||||
TAG="docker-compose:$(git rev-parse --short HEAD)"
|
||||
TAG="docker-compose:alpine-$(git rev-parse --short HEAD)"
|
||||
|
||||
# By default use the Dockerfile, but can be overridden to use an alternative file
|
||||
# e.g DOCKERFILE=Dockerfile.armhf script/test/default
|
||||
# e.g DOCKERFILE=Dockerfile.s390x script/test/default
|
||||
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
|
||||
DOCKER_BUILD_TARGET="${DOCKER_BUILD_TARGET:-build}"
|
||||
|
||||
rm -rf coverage-html
|
||||
# Create the host directory so it's owned by $USER
|
||||
mkdir -p coverage-html
|
||||
|
||||
docker build -f ${DOCKERFILE} -t "$TAG" .
|
||||
docker build -f "${DOCKERFILE}" -t "${TAG}" --target "${DOCKER_BUILD_TARGET}" .
|
||||
|
||||
GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
|
||||
. script/test/all
|
||||
|
||||
45
setup.py
45
setup.py
@@ -31,31 +31,33 @@ def find_version(*file_paths):
|
||||
|
||||
install_requires = [
|
||||
'cached-property >= 1.2.0, < 2',
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4',
|
||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.21',
|
||||
'texttable >= 0.9.0, < 0.10',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker >= 3.5.0, < 4.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'docopt >= 0.6.1, < 1',
|
||||
'PyYAML >= 3.10, < 5',
|
||||
'requests >= 2.20.0, < 3',
|
||||
'texttable >= 0.9.0, < 2',
|
||||
'websocket-client >= 0.32.0, < 1',
|
||||
'docker[ssh] >= 3.7.0, < 5',
|
||||
'dockerpty >= 0.4.1, < 1',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
'jsonschema >= 2.5.1, < 4',
|
||||
]
|
||||
|
||||
|
||||
tests_require = [
|
||||
'pytest',
|
||||
'pytest < 6',
|
||||
]
|
||||
|
||||
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
tests_require.append('mock >= 1.0.1')
|
||||
tests_require.append('mock >= 1.0.1, < 4')
|
||||
|
||||
extras_require = {
|
||||
':python_version < "3.2"': ['subprocess32 >= 3.5.4, < 4'],
|
||||
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
|
||||
':python_version < "3.3"': ['ipaddress >= 1.0.16'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 0.5'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5, < 4'],
|
||||
':python_version < "3.3"': ['backports.shutil_get_terminal_size == 1.0.0',
|
||||
'ipaddress >= 1.0.16, < 2'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 1'],
|
||||
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
|
||||
}
|
||||
|
||||
@@ -77,19 +79,26 @@ setup(
|
||||
name='docker-compose',
|
||||
version=find_version("compose", "__init__.py"),
|
||||
description='Multi-container orchestration for Docker',
|
||||
long_description=read('README.md'),
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://www.docker.com/',
|
||||
project_urls={
|
||||
'Documentation': 'https://docs.docker.com/compose/overview',
|
||||
'Changelog': 'https://github.com/docker/compose/blob/release/CHANGELOG.md',
|
||||
'Source': 'https://github.com/docker/compose',
|
||||
'Tracker': 'https://github.com/docker/compose/issues',
|
||||
},
|
||||
author='Docker, Inc.',
|
||||
license='Apache License 2.0',
|
||||
packages=find_packages(exclude=['tests.*', 'tests']),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
tests_require=tests_require,
|
||||
entry_points="""
|
||||
[console_scripts]
|
||||
docker-compose=compose.cli.main:main
|
||||
""",
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
entry_points={
|
||||
'console_scripts': ['docker-compose=compose.cli.main:main'],
|
||||
},
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import signal
|
||||
@@ -12,6 +11,7 @@ import subprocess
|
||||
import time
|
||||
from collections import Counter
|
||||
from collections import namedtuple
|
||||
from functools import reduce
|
||||
from operator import attrgetter
|
||||
|
||||
import pytest
|
||||
@@ -20,6 +20,7 @@ import yaml
|
||||
from docker import errors
|
||||
|
||||
from .. import mock
|
||||
from ..helpers import BUSYBOX_IMAGE_WITH_TAG
|
||||
from ..helpers import create_host_file
|
||||
from compose.cli.command import get_project
|
||||
from compose.config.errors import DuplicateOverrideFileFound
|
||||
@@ -41,7 +42,7 @@ ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
|
||||
|
||||
|
||||
BUILD_CACHE_TEXT = 'Using cache'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
|
||||
|
||||
|
||||
def start_process(base_dir, options):
|
||||
@@ -63,6 +64,12 @@ def wait_on_process(proc, returncode=0):
|
||||
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
|
||||
|
||||
|
||||
def dispatch(base_dir, options, project_options=None, returncode=0):
|
||||
project_options = project_options or []
|
||||
proc = start_process(base_dir, project_options + options)
|
||||
return wait_on_process(proc, returncode=returncode)
|
||||
|
||||
|
||||
def wait_on_condition(condition, delay=0.1, timeout=40):
|
||||
start_time = time.time()
|
||||
while not condition():
|
||||
@@ -150,9 +157,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
return self._project
|
||||
|
||||
def dispatch(self, options, project_options=None, returncode=0):
|
||||
project_options = project_options or []
|
||||
proc = start_process(self.base_dir, project_options + options)
|
||||
return wait_on_process(proc, returncode=returncode)
|
||||
return dispatch(self.base_dir, options, project_options, returncode)
|
||||
|
||||
def execute(self, container, cmd):
|
||||
# Remove once Hijack and CloseNotifier sign a peace treaty
|
||||
@@ -171,6 +176,13 @@ class CLITestCase(DockerClientTestCase):
|
||||
# Prevent tearDown from trying to create a project
|
||||
self.base_dir = None
|
||||
|
||||
def test_quiet_build(self):
|
||||
self.base_dir = 'tests/fixtures/build-args'
|
||||
result = self.dispatch(['build'], None)
|
||||
quietResult = self.dispatch(['build', '-q'], None)
|
||||
assert result.stdout != ""
|
||||
assert quietResult.stdout == ""
|
||||
|
||||
def test_help_nonexistent(self):
|
||||
self.base_dir = 'tests/fixtures/no-composefile'
|
||||
result = self.dispatch(['help', 'foobar'], returncode=1)
|
||||
@@ -259,7 +271,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
'volumes_from': ['service:other:rw'],
|
||||
},
|
||||
'other': {
|
||||
'image': 'busybox:latest',
|
||||
'image': BUSYBOX_IMAGE_WITH_TAG,
|
||||
'command': 'top',
|
||||
'volumes': ['/data'],
|
||||
},
|
||||
@@ -325,6 +337,21 @@ class CLITestCase(DockerClientTestCase):
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_with_env_file(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['--env-file', '.env2', 'config'])
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'false',
|
||||
'image': 'alpine:latest',
|
||||
'ports': ['5644/tcp', '9998/tcp']
|
||||
}
|
||||
},
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_with_dot_env_and_override_dir(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['--project-directory', 'alt/', 'config'])
|
||||
@@ -333,7 +360,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'echo uwu',
|
||||
'image': 'alpine:3.4',
|
||||
'image': 'alpine:3.10.1',
|
||||
'ports': ['3341/tcp', '4449/tcp']
|
||||
}
|
||||
},
|
||||
@@ -532,7 +559,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
'services': {
|
||||
'foo': {
|
||||
'command': '/bin/true',
|
||||
'image': 'alpine:3.7',
|
||||
'image': 'alpine:3.10.1',
|
||||
'scale': 3,
|
||||
'restart': 'always:7',
|
||||
'mem_limit': '300M',
|
||||
@@ -599,15 +626,25 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert 'with_build' in running.stdout
|
||||
assert 'with_image' in running.stdout
|
||||
|
||||
def test_ps_all(self):
|
||||
self.project.get_service('simple').create_container(one_off='blahblah')
|
||||
result = self.dispatch(['ps'])
|
||||
assert 'simple-composefile_simple_run_' not in result.stdout
|
||||
|
||||
result2 = self.dispatch(['ps', '--all'])
|
||||
assert 'simple-composefile_simple_run_' in result2.stdout
|
||||
|
||||
def test_pull(self):
|
||||
result = self.dispatch(['pull'])
|
||||
assert 'Pulling simple' in result.stderr
|
||||
assert 'Pulling another' in result.stderr
|
||||
assert 'done' in result.stderr
|
||||
assert 'failed' not in result.stderr
|
||||
|
||||
def test_pull_with_digest(self):
|
||||
result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
|
||||
|
||||
assert 'Pulling simple (busybox:latest)...' in result.stderr
|
||||
assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
|
||||
assert ('Pulling digest (busybox@'
|
||||
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
|
||||
'04ee8502d)...') in result.stderr
|
||||
@@ -618,12 +655,19 @@ class CLITestCase(DockerClientTestCase):
|
||||
'pull', '--ignore-pull-failures', '--no-parallel']
|
||||
)
|
||||
|
||||
assert 'Pulling simple (busybox:latest)...' in result.stderr
|
||||
assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
|
||||
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
|
||||
assert ('repository nonexisting-image not found' in result.stderr or
|
||||
'image library/nonexisting-image:latest not found' in result.stderr or
|
||||
'pull access denied for nonexisting-image' in result.stderr)
|
||||
|
||||
def test_pull_with_build(self):
|
||||
result = self.dispatch(['-f', 'pull-with-build.yml', 'pull'])
|
||||
|
||||
assert 'Pulling simple' not in result.stderr
|
||||
assert 'Pulling from_simple' not in result.stderr
|
||||
assert 'Pulling another ...' in result.stderr
|
||||
|
||||
def test_pull_with_quiet(self):
|
||||
assert self.dispatch(['pull', '--quiet']).stderr == ''
|
||||
assert self.dispatch(['pull', '--quiet']).stdout == ''
|
||||
@@ -649,15 +693,15 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_pull_with_include_deps(self):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web'])
|
||||
assert sorted(result.stderr.split('\n'))[1:] == [
|
||||
'Pulling db (busybox:latest)...',
|
||||
'Pulling web (busybox:latest)...',
|
||||
'Pulling db (busybox:1.27.2)...',
|
||||
'Pulling web (busybox:1.27.2)...',
|
||||
]
|
||||
|
||||
def test_build_plain(self):
|
||||
@@ -738,6 +782,27 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
assert not containers
|
||||
|
||||
@pytest.mark.xfail(True, reason='Flaky on local')
|
||||
def test_build_rm(self):
|
||||
containers = [
|
||||
Container.from_ps(self.project.client, c)
|
||||
for c in self.project.client.containers(all=True)
|
||||
]
|
||||
|
||||
assert not containers
|
||||
|
||||
self.base_dir = 'tests/fixtures/simple-dockerfile'
|
||||
self.dispatch(['build', '--no-rm', 'simple'], returncode=0)
|
||||
|
||||
containers = [
|
||||
Container.from_ps(self.project.client, c)
|
||||
for c in self.project.client.containers(all=True)
|
||||
]
|
||||
assert containers
|
||||
|
||||
for c in self.project.client.containers(all=True):
|
||||
self.addCleanup(self.project.client.remove_container, c, force=True)
|
||||
|
||||
def test_build_shm_size_build_option(self):
|
||||
pull_busybox(self.client)
|
||||
self.base_dir = 'tests/fixtures/build-shm-size'
|
||||
@@ -965,11 +1030,11 @@ class CLITestCase(DockerClientTestCase):
|
||||
result = self.dispatch(['down', '--rmi=local', '--volumes'])
|
||||
assert 'Stopping v2-full_web_1' in result.stderr
|
||||
assert 'Stopping v2-full_other_1' in result.stderr
|
||||
assert 'Stopping v2-full_web_run_2' in result.stderr
|
||||
assert 'Stopping v2-full_web_run_' in result.stderr
|
||||
assert 'Removing v2-full_web_1' in result.stderr
|
||||
assert 'Removing v2-full_other_1' in result.stderr
|
||||
assert 'Removing v2-full_web_run_1' in result.stderr
|
||||
assert 'Removing v2-full_web_run_2' in result.stderr
|
||||
assert 'Removing v2-full_web_run_' in result.stderr
|
||||
assert 'Removing v2-full_web_run_' in result.stderr
|
||||
assert 'Removing volume v2-full_data' in result.stderr
|
||||
assert 'Removing image v2-full_web' in result.stderr
|
||||
assert 'Removing image busybox' not in result.stderr
|
||||
@@ -1031,8 +1096,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
stopped=True
|
||||
)[0].name_without_project
|
||||
|
||||
assert '{} | simple'.format(simple_name) in result.stdout
|
||||
assert '{} | another'.format(another_name) in result.stdout
|
||||
assert '{} | simple'.format(simple_name) in result.stdout
|
||||
assert '{} | another'.format(another_name) in result.stdout
|
||||
assert '{} exited with code 0'.format(simple_name) in result.stdout
|
||||
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||
|
||||
@@ -1099,6 +1164,22 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
assert len(remote_volumes) > 0
|
||||
|
||||
@v2_only()
|
||||
def test_up_no_start_remove_orphans(self):
|
||||
self.base_dir = 'tests/fixtures/v2-simple'
|
||||
self.dispatch(['up', '--no-start'], None)
|
||||
|
||||
services = self.project.get_services()
|
||||
|
||||
stopped = reduce((lambda prev, next: prev.containers(
|
||||
stopped=True) + next.containers(stopped=True)), services)
|
||||
assert len(stopped) == 2
|
||||
|
||||
self.dispatch(['-f', 'one-container.yml', 'up', '--no-start', '--remove-orphans'], None)
|
||||
stopped2 = reduce((lambda prev, next: prev.containers(
|
||||
stopped=True) + next.containers(stopped=True)), services)
|
||||
assert len(stopped2) == 1
|
||||
|
||||
@v2_only()
|
||||
def test_up_no_ansi(self):
|
||||
self.base_dir = 'tests/fixtures/v2-simple'
|
||||
@@ -1371,7 +1452,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == {volume_with_label}
|
||||
assert 'label_key' in volumes[0]['Labels']
|
||||
assert volumes[0]['Labels']['label_key'] == 'label_val'
|
||||
|
||||
@@ -2036,7 +2117,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
for _, config in networks.items():
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
aliases = set(config['Aliases'] or []) - {container.short_id}
|
||||
assert not aliases
|
||||
|
||||
@v2_only()
|
||||
@@ -2056,7 +2137,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
for _, config in networks.items():
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
aliases = set(config['Aliases'] or []) - {container.short_id}
|
||||
assert not aliases
|
||||
|
||||
assert self.lookup(container, 'app')
|
||||
@@ -2221,6 +2302,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_start_no_containers(self):
|
||||
result = self.dispatch(['start'], returncode=1)
|
||||
assert 'failed' in result.stderr
|
||||
assert 'No containers to start' in result.stderr
|
||||
|
||||
@v2_only()
|
||||
@@ -2291,6 +2373,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert 'another' in result.stdout
|
||||
assert 'exited with code 0' in result.stdout
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_follow_logs_from_new_containers(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
self.dispatch(['up', '-d', 'simple'])
|
||||
@@ -2317,6 +2400,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||
assert '{} exited with code 137'.format(simple_name) in result.stdout
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_follow_logs_from_restarted_containers(self):
|
||||
self.base_dir = 'tests/fixtures/logs-restart-composefile'
|
||||
proc = start_process(self.base_dir, ['up'])
|
||||
@@ -2332,12 +2416,12 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
result = wait_on_process(proc)
|
||||
|
||||
assert len(re.findall(
|
||||
r'logs-restart-composefile_another_1_[a-f0-9]{12} exited with code 1',
|
||||
result.stdout
|
||||
)) == 3
|
||||
assert result.stdout.count(
|
||||
r'logs-restart-composefile_another_1 exited with code 1'
|
||||
) == 3
|
||||
assert result.stdout.count('world') == 3
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_default(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
self.dispatch(['up', '-d'])
|
||||
@@ -2464,10 +2548,12 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'worker=1'])
|
||||
assert len(project.get_service('web').containers()) == 3
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 1
|
||||
|
||||
def test_up_scale_scale_down(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
@@ -2476,22 +2562,26 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=1'])
|
||||
assert len(project.get_service('web').containers()) == 1
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_up_scale_reset(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
project = self.project
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3', '--scale', 'worker=3'])
|
||||
assert len(project.get_service('web').containers()) == 3
|
||||
assert len(project.get_service('db').containers()) == 3
|
||||
assert len(project.get_service('worker').containers()) == 3
|
||||
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_up_scale_to_zero(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
@@ -2500,10 +2590,12 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0', '--scale', 'worker=0'])
|
||||
assert len(project.get_service('web').containers()) == 0
|
||||
assert len(project.get_service('db').containers()) == 0
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_port(self):
|
||||
self.base_dir = 'tests/fixtures/ports-composefile'
|
||||
@@ -2583,7 +2675,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
container, = self.project.containers()
|
||||
expected_template = ' container {} {}'
|
||||
expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_']
|
||||
expected_meta_info = ['image=busybox:1.27.2', 'name=simple-composefile_simple_']
|
||||
|
||||
assert expected_template.format('create', container.id) in lines[0]
|
||||
assert expected_template.format('start', container.id) in lines[1]
|
||||
@@ -2655,7 +2747,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/extends'
|
||||
self.dispatch(['up', '-d'], None)
|
||||
|
||||
assert set([s.name for s in self.project.services]) == set(['mydb', 'myweb'])
|
||||
assert set([s.name for s in self.project.services]) == {'mydb', 'myweb'}
|
||||
|
||||
# Sort by name so we get [db, web]
|
||||
containers = sorted(
|
||||
@@ -2667,15 +2759,9 @@ class CLITestCase(DockerClientTestCase):
|
||||
web = containers[1]
|
||||
db_name = containers[0].name_without_project
|
||||
|
||||
assert set(get_links(web)) == set(
|
||||
['db', db_name, 'extends_{}'.format(db_name)]
|
||||
)
|
||||
assert set(get_links(web)) == {'db', db_name, 'extends_{}'.format(db_name)}
|
||||
|
||||
expected_env = set([
|
||||
"FOO=1",
|
||||
"BAR=2",
|
||||
"BAZ=2",
|
||||
])
|
||||
expected_env = {"FOO=1", "BAR=2", "BAZ=2"}
|
||||
assert expected_env <= set(web.get('Config.Env'))
|
||||
|
||||
def test_top_services_not_running(self):
|
||||
@@ -2706,7 +2792,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
)
|
||||
|
||||
result = wait_on_process(proc, returncode=1)
|
||||
assert re.findall(r'exit-code-from_another_1_[a-f0-9]{12} exited with code 1', result.stdout)
|
||||
assert 'exit-code-from_another_1 exited with code 1' in result.stdout
|
||||
|
||||
def test_exit_code_from_signal_stop(self):
|
||||
self.base_dir = 'tests/fixtures/exit-code-from'
|
||||
@@ -2730,8 +2816,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
result = self.dispatch(['images'])
|
||||
|
||||
assert 'busybox' in result.stdout
|
||||
assert 'multiple-composefiles_another_1' in result.stdout
|
||||
assert 'multiple-composefiles_simple_1' in result.stdout
|
||||
assert '_another_1' in result.stdout
|
||||
assert '_simple_1' in result.stdout
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_images_tagless_image(self):
|
||||
@@ -2779,4 +2865,4 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None
|
||||
assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None
|
||||
assert re.search(r'foo3.+_foo3[ \t]+latest', result.stdout) is not None
|
||||
assert re.search(r'foo3.+test[ \t]+latest', result.stdout) is not None
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: ls .
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: ls /thecakeisalie
|
||||
|
||||
2
tests/fixtures/build-args/Dockerfile
vendored
2
tests/fixtures/build-args/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
LABEL com.docker.compose.test_image=true
|
||||
ARG favorite_th_character
|
||||
RUN echo "Favorite Touhou Character: ${favorite_th_character}"
|
||||
|
||||
2
tests/fixtures/build-ctx/Dockerfile
vendored
2
tests/fixtures/build-ctx/Dockerfile
vendored
@@ -1,3 +1,3 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
2
tests/fixtures/build-memory/Dockerfile
vendored
2
tests/fixtures/build-memory/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM busybox
|
||||
FROM busybox:1.31.0-uclibc
|
||||
|
||||
# Report the memory (through the size of the group memory)
|
||||
RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
RUN echo a
|
||||
CMD top
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
RUN echo b
|
||||
CMD top
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version: '3.5'
|
||||
services:
|
||||
foo:
|
||||
image: alpine:3.7
|
||||
image: alpine:3.10.1
|
||||
command: /bin/true
|
||||
deploy:
|
||||
replicas: 3
|
||||
|
||||
4
tests/fixtures/default-env-file/.env2
vendored
Normal file
4
tests/fixtures/default-env-file/.env2
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
IMAGE=alpine:latest
|
||||
COMMAND=false
|
||||
PORT1=5644
|
||||
PORT2=9998
|
||||
2
tests/fixtures/default-env-file/alt/.env
vendored
2
tests/fixtures/default-env-file/alt/.env
vendored
@@ -1,4 +1,4 @@
|
||||
IMAGE=alpine:3.4
|
||||
IMAGE=alpine:3.10.1
|
||||
COMMAND=echo uwu
|
||||
PORT1=3341
|
||||
PORT2=4449
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
LABEL com.docker.compose.test_image=true
|
||||
VOLUME /data
|
||||
CMD top
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
web:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: "sleep 100"
|
||||
links:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: "sleep 200"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: echo simple
|
||||
another:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: echo another
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM busybox:latest
|
||||
FROM busybox:1.31.0-uclibc
|
||||
LABEL com.docker.compose.test_image=true
|
||||
ENTRYPOINT ["printf"]
|
||||
CMD ["default", "args"]
|
||||
|
||||
2
tests/fixtures/env-file-override/.env.conf
vendored
Normal file
2
tests/fixtures/env-file-override/.env.conf
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
WHEREAMI
|
||||
DEFAULT_CONF_LOADED=true
|
||||
1
tests/fixtures/env-file-override/.env.override
vendored
Normal file
1
tests/fixtures/env-file-override/.env.override
vendored
Normal file
@@ -0,0 +1 @@
|
||||
WHEREAMI=override
|
||||
6
tests/fixtures/env-file-override/docker-compose.yml
vendored
Normal file
6
tests/fixtures/env-file-override/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
test:
|
||||
image: busybox
|
||||
env_file: .env.conf
|
||||
entrypoint: env
|
||||
@@ -1,5 +1,5 @@
|
||||
service:
|
||||
image: busybox:latest
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
|
||||
environment:
|
||||
|
||||
@@ -2,7 +2,7 @@ version: "2.2"
|
||||
|
||||
services:
|
||||
service:
|
||||
image: busybox:latest
|
||||
image: busybox:1.27.2
|
||||
command: top
|
||||
|
||||
environment:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user