mirror of
https://github.com/docker/compose.git
synced 2026-02-12 03:29:27 +08:00
Compare commits
489 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25853874c4 | ||
|
|
46f034705e | ||
|
|
4d85caf143 | ||
|
|
efda1efffe | ||
|
|
7e40754ffc | ||
|
|
70e90a07d3 | ||
|
|
b426868eda | ||
|
|
181a4e990e | ||
|
|
6d02f3fb23 | ||
|
|
f4c037d223 | ||
|
|
39d91cae95 | ||
|
|
f4125b3444 | ||
|
|
fcd38d3c4b | ||
|
|
f9dfb006b5 | ||
|
|
e6985de971 | ||
|
|
e002171ab1 | ||
|
|
f89ca3e147 | ||
|
|
24e9f66d79 | ||
|
|
a2da43b997 | ||
|
|
252d15a4a9 | ||
|
|
77b5ac4e54 | ||
|
|
a406378a1f | ||
|
|
29f9594ab9 | ||
|
|
ca529d36f8 | ||
|
|
455fde15c6 | ||
|
|
7d03c1fe29 | ||
|
|
0ca4d9c274 | ||
|
|
28788bd9b5 | ||
|
|
c36bb3f8d2 | ||
|
|
3147090fb4 | ||
|
|
de15ec835a | ||
|
|
2d3c76950f | ||
|
|
932b3cc10a | ||
|
|
db5d2bcb10 | ||
|
|
cd94c37f5d | ||
|
|
efb09af271 | ||
|
|
f60c60205b | ||
|
|
14716bc2da | ||
|
|
882084932d | ||
|
|
a5c1e4b0cd | ||
|
|
95c6cc0420 | ||
|
|
50faddb683 | ||
|
|
925915eb25 | ||
|
|
8314a48a2e | ||
|
|
8b383ad795 | ||
|
|
22d91f60be | ||
|
|
bdcce13f4a | ||
|
|
e4bb9bde30 | ||
|
|
7f1606545d | ||
|
|
0603b445e2 | ||
|
|
9248298a4a | ||
|
|
bc3ed6dead | ||
|
|
e5ded6ff9b | ||
|
|
804555bc4d | ||
|
|
17c5b45641 | ||
|
|
2bce81508e | ||
|
|
8f636d8279 | ||
|
|
b50b14f937 | ||
|
|
54103dc954 | ||
|
|
28546d1f81 | ||
|
|
a8ff4285d1 | ||
|
|
eac01a7cd5 | ||
|
|
08f1ea7a93 | ||
|
|
85d6d7955f | ||
|
|
1a4e81920b | ||
|
|
fe08be698d | ||
|
|
007cf96452 | ||
|
|
dc8a39f70d | ||
|
|
b05c6c6fe9 | ||
|
|
de90765531 | ||
|
|
5667de87e8 | ||
|
|
f65f89ad8c | ||
|
|
e216a31f1e | ||
|
|
e8903da96c | ||
|
|
613e060f0d | ||
|
|
070f8b3992 | ||
|
|
016197c16e | ||
|
|
6abdd9cc32 | ||
|
|
fd7c16f1a4 | ||
|
|
20a511e961 | ||
|
|
9e18929d60 | ||
|
|
4b17aa1b9e | ||
|
|
bd7db570bd | ||
|
|
1c07d6453f | ||
|
|
093b040b8e | ||
|
|
b801f275d7 | ||
|
|
5b27389571 | ||
|
|
53fa44c01e | ||
|
|
7687412e03 | ||
|
|
33424189d4 | ||
|
|
7911659266 | ||
|
|
1648a3e257 | ||
|
|
fc6791f3f0 | ||
|
|
1d03baa9b1 | ||
|
|
64517e31fc | ||
|
|
66b395d950 | ||
|
|
fd254caa68 | ||
|
|
651283eef9 | ||
|
|
f065ce0f73 | ||
|
|
3fcd648ba2 | ||
|
|
7dd2e33057 | ||
|
|
1b2354c6f4 | ||
|
|
ac9198e638 | ||
|
|
9faa852d41 | ||
|
|
9759f27fa6 | ||
|
|
b64bd07f22 | ||
|
|
dada36f732 | ||
|
|
817c76c8e9 | ||
|
|
3dec600d82 | ||
|
|
c197e02ecc | ||
|
|
267a243f80 | ||
|
|
08179fc7e3 | ||
|
|
bf91c64983 | ||
|
|
acfe100686 | ||
|
|
d824cb9b06 | ||
|
|
13bcd85eb3 | ||
|
|
c0305024f5 | ||
|
|
17f46f8999 | ||
|
|
4cba653eeb | ||
|
|
d29f8e1022 | ||
|
|
9abbe1b7f8 | ||
|
|
b3a4d76d4f | ||
|
|
589fb4925e | ||
|
|
48258e2b46 | ||
|
|
6ce066274e | ||
|
|
6f4be1cffc | ||
|
|
5aeeecb6f2 | ||
|
|
dec2c83014 | ||
|
|
d5765b7856 | ||
|
|
6ab0607e61 | ||
|
|
1110af1bae | ||
|
|
7fafd72c1e | ||
|
|
60622026fa | ||
|
|
22c0779a49 | ||
|
|
94f7016fb7 | ||
|
|
f3628c7a5e | ||
|
|
e115eaf6fc | ||
|
|
1bf0cd07de | ||
|
|
647f260b0a | ||
|
|
ec825af3d3 | ||
|
|
cefa239c2e | ||
|
|
6f3e4bbc6c | ||
|
|
87b6b3c139 | ||
|
|
1fb5c4b15a | ||
|
|
35ed189981 | ||
|
|
c392acc56b | ||
|
|
cd267d5121 | ||
|
|
cb076a57b9 | ||
|
|
6246a2592e | ||
|
|
606358cfb7 | ||
|
|
0488dd3709 | ||
|
|
b72f911ccf | ||
|
|
f9c5816ab8 | ||
|
|
2ecbf25445 | ||
|
|
8f842d55d7 | ||
|
|
ad19ff6c67 | ||
|
|
4fb7033d9c | ||
|
|
e1b7510e4a | ||
|
|
f7853a30bd | ||
|
|
6633f1962c | ||
|
|
3b4a53c959 | ||
|
|
e9d62e8404 | ||
|
|
ef4ad93d1e | ||
|
|
69b91ef6ba | ||
|
|
1877a41b92 | ||
|
|
5f0186e008 | ||
|
|
7f3375c2ce | ||
|
|
2fec6966d4 | ||
|
|
4a8b88bc2e | ||
|
|
8ffbe8e083 | ||
|
|
28e6508f4a | ||
|
|
8924f6c05c | ||
|
|
4598dfc79f | ||
|
|
887ed8d1b6 | ||
|
|
583bbb4635 | ||
|
|
2c9e46f60f | ||
|
|
07e2426d89 | ||
|
|
619bf4c4df | ||
|
|
10749e187c | ||
|
|
7f65caa97b | ||
|
|
a9b5e5abe0 | ||
|
|
d6f70dddc7 | ||
|
|
3124fec01a | ||
|
|
5cdf30fc12 | ||
|
|
26b819ca67 | ||
|
|
0483bcb472 | ||
|
|
6649e9aba3 | ||
|
|
425303992c | ||
|
|
9ab1d55d06 | ||
|
|
907b0690e6 | ||
|
|
d7db15ce94 | ||
|
|
593d1aeb09 | ||
|
|
1e60030b94 | ||
|
|
83f35e132b | ||
|
|
fea970dff3 | ||
|
|
c3fd6a8f4d | ||
|
|
4207d43b85 | ||
|
|
576a2ee7ae | ||
|
|
49d4fd2795 | ||
|
|
79ddf06267 | ||
|
|
7f54850b4a | ||
|
|
c72c966abc | ||
|
|
801167d271 | ||
|
|
89c9aed339 | ||
|
|
08127625a0 | ||
|
|
408e4719e1 | ||
|
|
dbf40d8244 | ||
|
|
6021237a69 | ||
|
|
e4159cfd42 | ||
|
|
d8ec9c1572 | ||
|
|
244b303625 | ||
|
|
217f762a60 | ||
|
|
70da16103a | ||
|
|
6b71645ed7 | ||
|
|
05bf9a054a | ||
|
|
6e3d82eea6 | ||
|
|
21d114b879 | ||
|
|
72849d99c0 | ||
|
|
85e3ad2655 | ||
|
|
a3e30c3eed | ||
|
|
6fd77fa698 | ||
|
|
a7fc3e2220 | ||
|
|
db02c9f537 | ||
|
|
33cc601176 | ||
|
|
44e82edc5f | ||
|
|
53341b82f9 | ||
|
|
1e176b58c5 | ||
|
|
98818d4e50 | ||
|
|
554dc2496a | ||
|
|
dcc09b677b | ||
|
|
949b88fff9 | ||
|
|
e659cd139b | ||
|
|
5dabc81c16 | ||
|
|
6fe5d2b543 | ||
|
|
967dce5807 | ||
|
|
edd3637b9b | ||
|
|
f9f151a51f | ||
|
|
931b01acf9 | ||
|
|
f1974f6c5e | ||
|
|
3d0a1de023 | ||
|
|
50d5aab8ad | ||
|
|
10ae81f8cf | ||
|
|
5d244ef6d8 | ||
|
|
a62739b906 | ||
|
|
3d20e25bf8 | ||
|
|
bc0939dcf0 | ||
|
|
c0237a487b | ||
|
|
3c8fdb8752 | ||
|
|
622de27c1e | ||
|
|
2b6ea847b9 | ||
|
|
da2aae2a29 | ||
|
|
edd28f09d1 | ||
|
|
93cc2675c8 | ||
|
|
3649df83ab | ||
|
|
b89dc99e8e | ||
|
|
a822406eb0 | ||
|
|
72d3d5d84b | ||
|
|
55e20d1727 | ||
|
|
7c06f60573 | ||
|
|
00decf8677 | ||
|
|
5640bd42a8 | ||
|
|
95207561bb | ||
|
|
8e04582052 | ||
|
|
058a7659ba | ||
|
|
fe0654603c | ||
|
|
c3247e7af8 | ||
|
|
b3d4e9c9d7 | ||
|
|
0058b4ba0c | ||
|
|
612d263d74 | ||
|
|
048408af48 | ||
|
|
d990f7899c | ||
|
|
97ba14c82a | ||
|
|
1b5a94f4e4 | ||
|
|
8d2fbe3a55 | ||
|
|
eb10f41d13 | ||
|
|
73a1b60ced | ||
|
|
b3d9652cc3 | ||
|
|
cbb44b1a14 | ||
|
|
a0a90b2352 | ||
|
|
aa7f522ab0 | ||
|
|
5c1abe2e29 | ||
|
|
f77dbc06cc | ||
|
|
f49b624d95 | ||
|
|
e5c5dc09f8 | ||
|
|
3c77db709f | ||
|
|
80af26d2bb | ||
|
|
352cdf0a80 | ||
|
|
020d46ff21 | ||
|
|
0de9a1b388 | ||
|
|
9bf6bc6dbd | ||
|
|
80afbd3961 | ||
|
|
317cd98c0d | ||
|
|
f8f582f349 | ||
|
|
a56e44f96e | ||
|
|
ee68a51e28 | ||
|
|
de374d845e | ||
|
|
0fe82614a6 | ||
|
|
8f8c17bf66 | ||
|
|
9b7bd69cfc | ||
|
|
1ea9dda1d3 | ||
|
|
21f20cbc9b | ||
|
|
68d73183eb | ||
|
|
2123906586 | ||
|
|
59e96fea4f | ||
|
|
60f7e021ad | ||
|
|
61324ef308 | ||
|
|
0287486b14 | ||
|
|
9ddb7f3c90 | ||
|
|
e502417df2 | ||
|
|
6e2d1eb80e | ||
|
|
ea640f3821 | ||
|
|
427ec899df | ||
|
|
e7a8b2fed5 | ||
|
|
17b219454f | ||
|
|
dd3590180d | ||
|
|
a67ba5536d | ||
|
|
90fba58df9 | ||
|
|
c148849f0e | ||
|
|
1298b9aa5d | ||
|
|
e3e8a619cc | ||
|
|
c46737ed02 | ||
|
|
86b5ed1a84 | ||
|
|
4bb80c25d3 | ||
|
|
048360d1ed | ||
|
|
a34cd5ed54 | ||
|
|
c4229b469a | ||
|
|
842e372258 | ||
|
|
33bed5c706 | ||
|
|
7763122ecb | ||
|
|
2b5b665d3a | ||
|
|
9a39208741 | ||
|
|
ce8df9e789 | ||
|
|
0c8aeb9e05 | ||
|
|
db0a6cf2bb | ||
|
|
844b7d463f | ||
|
|
ca3aef0c84 | ||
|
|
e5645595e3 | ||
|
|
c9fe8920c9 | ||
|
|
4bf5271ae2 | ||
|
|
72f6a5c8d3 | ||
|
|
dc88e54010 | ||
|
|
4b01f6dcd6 | ||
|
|
f1603a3ee2 | ||
|
|
5fa81c4044 | ||
|
|
6c29830127 | ||
|
|
0a9ab358bf | ||
|
|
3c424b709e | ||
|
|
47a40d42c7 | ||
|
|
f316b448c2 | ||
|
|
6bfdde6855 | ||
|
|
2a08d4731e | ||
|
|
11d8093fc8 | ||
|
|
d0b46ca9b2 | ||
|
|
b7f9fc4b28 | ||
|
|
70a605acac | ||
|
|
85b85bc675 | ||
|
|
b334b6f059 | ||
|
|
0c1c338a02 | ||
|
|
f655a8af95 | ||
|
|
f7cd94d4a9 | ||
|
|
e4d2d7ed8a | ||
|
|
2a8c2c8ad6 | ||
|
|
5852db4d72 | ||
|
|
250a7a530b | ||
|
|
4e8b017283 | ||
|
|
a86a195c50 | ||
|
|
3368887a29 | ||
|
|
e5f1429ce1 | ||
|
|
65b0e5973b | ||
|
|
9cf483e224 | ||
|
|
1e164ca802 | ||
|
|
a2ded237e4 | ||
|
|
8a9ab69a1c | ||
|
|
9cfbfd55c4 | ||
|
|
d41e6e00fa | ||
|
|
3b7191f246 | ||
|
|
3bf75b7330 | ||
|
|
c1c8c70800 | ||
|
|
94bcbd1fb6 | ||
|
|
3e11a95056 | ||
|
|
310b3d9441 | ||
|
|
28fb91b344 | ||
|
|
c41f30c3ff | ||
|
|
fe17e0f948 | ||
|
|
bd7ec24e25 | ||
|
|
e4bb678875 | ||
|
|
84aa39e978 | ||
|
|
8cc7d68a00 | ||
|
|
0b24883cef | ||
|
|
2efcec776c | ||
|
|
61794ba97c | ||
|
|
87ee38ed2c | ||
|
|
7ad7eb71ca | ||
|
|
d3e645488a | ||
|
|
756ef14edc | ||
|
|
6064d200f9 | ||
|
|
84a3e2fe79 | ||
|
|
a4d3dd6197 | ||
|
|
0cd35913c3 | ||
|
|
26fe8213aa | ||
|
|
ac82597ac1 | ||
|
|
1988dfeaf0 | ||
|
|
da1d603463 | ||
|
|
75bcc382d9 | ||
|
|
b67f110620 | ||
|
|
27628f8655 | ||
|
|
55fcd1c3e3 | ||
|
|
a0aea42f75 | ||
|
|
6ff3c47630 | ||
|
|
8a34ee0eaa | ||
|
|
ba10f1cd55 | ||
|
|
cafe1315b2 | ||
|
|
984f839d33 | ||
|
|
d21e1c5a30 | ||
|
|
52fa010ac7 | ||
|
|
d4bebbb1ba | ||
|
|
6d2805917c | ||
|
|
377be5aa1f | ||
|
|
68272b0216 | ||
|
|
56c6e29819 | ||
|
|
4702703615 | ||
|
|
ad306f0479 | ||
|
|
e1356e1f6f | ||
|
|
abb5ae7fe4 | ||
|
|
e71c62b8d1 | ||
|
|
e2cb7b0237 | ||
|
|
aebb3d5d0a | ||
|
|
50287722f2 | ||
|
|
e8da6cb631 | ||
|
|
339ebc0483 | ||
|
|
ae46bf8907 | ||
|
|
276738f733 | ||
|
|
f10bc8072e | ||
|
|
7781f62ddf | ||
|
|
3d3f331404 | ||
|
|
d05feb1a4d | ||
|
|
7cfb5e7bc9 | ||
|
|
3722bb38c6 | ||
|
|
15c5bc2e6c | ||
|
|
0671b8b8c3 | ||
|
|
0e3db185cf | ||
|
|
97467c7dec | ||
|
|
4192a009da | ||
|
|
80614cff9b | ||
|
|
d4e9a3b6b1 | ||
|
|
3ca8858897 | ||
|
|
83df95d511 | ||
|
|
e5443717fb | ||
|
|
91a545813a | ||
|
|
be27e266da | ||
|
|
5450a67c2d | ||
|
|
54b6fc4219 | ||
|
|
ffab27c049 | ||
|
|
3720b50c3b | ||
|
|
af9526fb82 | ||
|
|
141b96bb31 | ||
|
|
bcdf541c8c | ||
|
|
f5b80640fe | ||
|
|
9f47e43b5c | ||
|
|
5d0aab4a8e | ||
|
|
3ef6b17bfc | ||
|
|
958f96c78a | ||
|
|
b33d7b3dd8 | ||
|
|
129fb5b356 | ||
|
|
86530287d6 | ||
|
|
c1026e815a | ||
|
|
c72d62f96d | ||
|
|
a63a05964b | ||
|
|
9509508f3e | ||
|
|
1a7a65f84d | ||
|
|
3034803258 | ||
|
|
78a8be07ad | ||
|
|
d27b82207c | ||
|
|
71c86acaa4 | ||
|
|
85e2fb63b3 | ||
|
|
024a810617 | ||
|
|
668d45c7cc | ||
|
|
09ea74245d | ||
|
|
386edd892c | ||
|
|
7116aefe43 | ||
|
|
93901ec480 | ||
|
|
8ae8f7ed4b | ||
|
|
9729c0d3c7 | ||
|
|
9d58b19ecc | ||
|
|
63b448120a | ||
|
|
0f1fb42326 | ||
|
|
a53b29467a | ||
|
|
7fc40dd7cc | ||
|
|
000eaee16a |
203
CHANGELOG.md
203
CHANGELOG.md
@@ -1,6 +1,209 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.9.0 (2016-11-16)
|
||||
-----------------
|
||||
|
||||
**Breaking changes**
|
||||
|
||||
- When using Compose with Docker Toolbox/Machine on Windows, volume paths are
|
||||
no longer converted from `C:\Users` to `/c/Users`-style by default. To
|
||||
re-enable this conversion so that your volumes keep working, set the
|
||||
environment variable `COMPOSE_CONVERT_WINDOWS_PATHS=1`. Users of
|
||||
Docker for Windows are not affected and do not need to set the variable.
|
||||
|
||||
New Features
|
||||
|
||||
- Interactive mode for `docker-compose run` and `docker-compose exec` is
|
||||
now supported on Windows platforms. Please note that the `docker` binary
|
||||
is required to be present on the system for this feature to work.
|
||||
|
||||
- Introduced version 2.1 of the `docker-compose.yml` specification. This
|
||||
version requires to be used with Docker Engine 1.12 or above.
|
||||
- Added support for setting volume labels and network labels in
|
||||
`docker-compose.yml`.
|
||||
- Added support for the `isolation` parameter in service definitions.
|
||||
- Added support for link-local IPs in the service networks definitions.
|
||||
- Added support for shell-style inline defaults in variable interpolation.
|
||||
The supported forms are `${FOO-default}` (fall back if FOO is unset) and
|
||||
`${FOO:-default}` (fall back if FOO is unset or empty).
|
||||
|
||||
- Added support for the `group_add` and `oom_score_adj` parameters in
|
||||
service definitions.
|
||||
|
||||
- Added support for the `internal` and `enable_ipv6` parameters in network
|
||||
definitions.
|
||||
|
||||
- Compose now defaults to using the `npipe` protocol on Windows.
|
||||
|
||||
- Overriding a `logging` configuration will now properly merge the `options`
|
||||
mappings if the `driver` values do not conflict.
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed several bugs related to `npipe` protocol support on Windows.
|
||||
|
||||
- Fixed an issue with Windows paths being incorrectly converted when
|
||||
using Docker on Windows Server.
|
||||
|
||||
- Fixed a bug where an empty `restart` value would sometimes result in an
|
||||
exception being raised.
|
||||
|
||||
- Fixed an issue where service logs containing unicode characters would
|
||||
sometimes cause an error to occur.
|
||||
|
||||
- Fixed a bug where unicode values in environment variables would sometimes
|
||||
raise a unicode exception when retrieved.
|
||||
|
||||
- Fixed an issue where Compose would incorrectly detect a configuration
|
||||
mismatch for overlay networks.
|
||||
|
||||
|
||||
1.8.1 (2016-09-22)
|
||||
-----------------
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed a bug where users using a credentials store were not able
|
||||
to access their private images.
|
||||
|
||||
- Fixed a bug where users using identity tokens to authenticate
|
||||
were not able to access their private images.
|
||||
|
||||
- Fixed a bug where an `HttpHeaders` entry in the docker configuration
|
||||
file would cause Compose to crash when trying to build an image.
|
||||
|
||||
- Fixed a few bugs related to the handling of Windows paths in volume
|
||||
binding declarations.
|
||||
|
||||
- Fixed a bug where Compose would sometimes crash while trying to
|
||||
read a streaming response from the engine.
|
||||
|
||||
- Fixed an issue where Compose would crash when encountering an API error
|
||||
while streaming container logs.
|
||||
|
||||
- Fixed an issue where Compose would erroneously try to output logs from
|
||||
drivers not handled by the Engine's API.
|
||||
|
||||
- Fixed a bug where options from the `docker-machine config` command would
|
||||
not be properly interpreted by Compose.
|
||||
|
||||
- Fixed a bug where the connection to the Docker Engine would
|
||||
sometimes fail when running a large number of services simultaneously.
|
||||
|
||||
- Fixed an issue where Compose would sometimes print a misleading
|
||||
suggestion message when running the `bundle` command.
|
||||
|
||||
- Fixed a bug where connection errors would not be handled properly by
|
||||
Compose during the project initialization phase.
|
||||
|
||||
- Fixed a bug where a misleading error would appear when encountering
|
||||
a connection timeout.
|
||||
|
||||
|
||||
1.8.0 (2016-06-14)
|
||||
-----------------
|
||||
|
||||
**Breaking Changes**
|
||||
|
||||
- As announced in 1.7.0, `docker-compose rm` now removes containers
|
||||
created by `docker-compose run` by default.
|
||||
|
||||
- Setting `entrypoint` on a service now empties out any default
|
||||
command that was set on the image (i.e. any `CMD` instruction in the
|
||||
Dockerfile used to build it). This makes it consistent with
|
||||
the `--entrypoint` flag to `docker run`.
|
||||
|
||||
New Features
|
||||
|
||||
- Added `docker-compose bundle`, a command that builds a bundle file
|
||||
to be consumed by the new *Docker Stack* commands in Docker 1.12.
|
||||
|
||||
- Added `docker-compose push`, a command that pushes service images
|
||||
to a registry.
|
||||
|
||||
- Compose now supports specifying a custom TLS version for
|
||||
interaction with the Docker Engine using the `COMPOSE_TLS_VERSION`
|
||||
environment variable.
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed a bug where Compose would erroneously try to read `.env`
|
||||
at the project's root when it is a directory.
|
||||
|
||||
- `docker-compose run -e VAR` now passes `VAR` through from the shell
|
||||
to the container, as with `docker run -e VAR`.
|
||||
|
||||
- Improved config merging when multiple compose files are involved
|
||||
for several service sub-keys.
|
||||
|
||||
- Fixed a bug where volume mappings containing Windows drives would
|
||||
sometimes be parsed incorrectly.
|
||||
|
||||
- Fixed a bug in Windows environment where volume mappings of the
|
||||
host's root directory would be parsed incorrectly.
|
||||
|
||||
- Fixed a bug where `docker-compose config` would ouput an invalid
|
||||
Compose file if external networks were specified.
|
||||
|
||||
- Fixed an issue where unset buildargs would be assigned a string
|
||||
containing `'None'` instead of the expected empty value.
|
||||
|
||||
- Fixed a bug where yes/no prompts on Windows would not show before
|
||||
receiving input.
|
||||
|
||||
- Fixed a bug where trying to `docker-compose exec` on Windows
|
||||
without the `-d` option would exit with a stacktrace. This will
|
||||
still fail for the time being, but should do so gracefully.
|
||||
|
||||
- Fixed a bug where errors during `docker-compose up` would show
|
||||
an unrelated stacktrace at the end of the process.
|
||||
|
||||
- `docker-compose create` and `docker-compose start` show more
|
||||
descriptive error messages when something goes wrong.
|
||||
|
||||
|
||||
1.7.1 (2016-05-04)
|
||||
-----------------
|
||||
|
||||
Bug Fixes
|
||||
|
||||
- Fixed a bug where the output of `docker-compose config` for v1 files
|
||||
would be an invalid configuration file.
|
||||
|
||||
- Fixed a bug where `docker-compose config` would not check the validity
|
||||
of links.
|
||||
|
||||
- Fixed an issue where `docker-compose help` would not output a list of
|
||||
available commands and generic options as expected.
|
||||
|
||||
- Fixed an issue where filtering by service when using `docker-compose logs`
|
||||
would not apply for newly created services.
|
||||
|
||||
- Fixed a bug where unchanged services would sometimes be recreated in
|
||||
in the up phase when using Compose with Python 3.
|
||||
|
||||
- Fixed an issue where API errors encountered during the up phase would
|
||||
not be recognized as a failure state by Compose.
|
||||
|
||||
- Fixed a bug where Compose would raise a NameError because of an undefined
|
||||
exception name on non-Windows platforms.
|
||||
|
||||
- Fixed a bug where the wrong version of `docker-py` would sometimes be
|
||||
installed alongside Compose.
|
||||
|
||||
- Fixed a bug where the host value output by `docker-machine config default`
|
||||
would not be recognized as valid options by the `docker-compose`
|
||||
command line.
|
||||
|
||||
- Fixed an issue where Compose would sometimes exit unexpectedly while
|
||||
reading events broadcasted by a Swarm cluster.
|
||||
|
||||
- Corrected a statement in the docs about the location of the `.env` file,
|
||||
which is indeed read from the current directory, instead of in the same
|
||||
location as the Compose file.
|
||||
|
||||
|
||||
1.7.0 (2016-04-13)
|
||||
------------------
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ that should get you started.
|
||||
This step is optional, but recommended. Pre-commit hooks will run style checks
|
||||
and in some cases fix style issues for you, when you commit code.
|
||||
|
||||
Install the git pre-commit hooks using [tox](https://tox.readthedocs.org) by
|
||||
Install the git pre-commit hooks using [tox](https://tox.readthedocs.io) by
|
||||
running `tox -e pre-commit` or by following the
|
||||
[pre-commit install guide](http://pre-commit.com/#install).
|
||||
|
||||
|
||||
@@ -49,11 +49,11 @@ RUN set -ex; \
|
||||
|
||||
# Install pip
|
||||
RUN set -ex; \
|
||||
curl -L https://pypi.python.org/packages/source/p/pip/pip-7.0.1.tar.gz | tar -xz; \
|
||||
cd pip-7.0.1; \
|
||||
curl -L https://pypi.python.org/packages/source/p/pip/pip-8.1.1.tar.gz | tar -xz; \
|
||||
cd pip-8.1.1; \
|
||||
python setup.py install; \
|
||||
cd ..; \
|
||||
rm -rf pip-7.0.1
|
||||
rm -rf pip-8.1.1
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
FROM alpine:edge
|
||||
FROM alpine:3.4
|
||||
RUN apk -U add \
|
||||
python \
|
||||
py-pip
|
||||
|
||||
80
Jenkinsfile
vendored
Normal file
80
Jenkinsfile
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
#!groovy
|
||||
|
||||
def image
|
||||
|
||||
def checkDocs = { ->
|
||||
wrappedNode(label: 'linux') {
|
||||
deleteDir(); checkout(scm)
|
||||
documentationChecker("docs")
|
||||
}
|
||||
}
|
||||
|
||||
def buildImage = { ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("build image") {
|
||||
deleteDir(); checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${gitCommit()}"
|
||||
image = docker.image(imageName)
|
||||
try {
|
||||
image.pull()
|
||||
} catch (Exception exc) {
|
||||
image = docker.build(imageName, ".")
|
||||
image.push()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def runTests = { Map settings ->
|
||||
def dockerVersions = settings.get("dockerVersions", null)
|
||||
def pythonVersions = settings.get("pythonVersions", null)
|
||||
|
||||
if (!pythonVersions) {
|
||||
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py34')`")
|
||||
}
|
||||
if (!dockerVersions) {
|
||||
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
|
||||
}
|
||||
|
||||
{ ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("test python=${pythonVersions} / docker=${dockerVersions}") {
|
||||
deleteDir(); checkout(scm)
|
||||
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
|
||||
echo "Using local system's storage driver: ${storageDriver}"
|
||||
sh """docker run \\
|
||||
-t \\
|
||||
--rm \\
|
||||
--privileged \\
|
||||
--volume="\$(pwd)/.git:/code/.git" \\
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
|
||||
-e "TAG=${image.id}" \\
|
||||
-e "STORAGE_DRIVER=${storageDriver}" \\
|
||||
-e "DOCKER_VERSIONS=${dockerVersions}" \\
|
||||
-e "BUILD_NUMBER=\$BUILD_TAG" \\
|
||||
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
|
||||
--entrypoint="script/ci" \\
|
||||
${image.id} \\
|
||||
--verbose
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def buildAndTest = { ->
|
||||
buildImage()
|
||||
// TODO: break this out into meaningful "DOCKER_VERSIONS" values instead of all
|
||||
parallel(
|
||||
failFast: true,
|
||||
all_py27: runTests(pythonVersions: "py27", dockerVersions: "all"),
|
||||
all_py34: runTests(pythonVersions: "py34", dockerVersions: "all"),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
parallel(
|
||||
failFast: false,
|
||||
docs: checkDocs,
|
||||
test: buildAndTest
|
||||
)
|
||||
21
README.md
21
README.md
@@ -22,16 +22,17 @@ they can be run together in an isolated environment:
|
||||
|
||||
A `docker-compose.yml` looks like this:
|
||||
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
links:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](https://github.com/docker/compose/blob/release/docs/compose-file.md)
|
||||
|
||||
19
ROADMAP.md
19
ROADMAP.md
@@ -1,13 +1,21 @@
|
||||
# Roadmap
|
||||
|
||||
## An even better tool for development environments
|
||||
|
||||
Compose is a great tool for development environments, but it could be even better. For example:
|
||||
|
||||
- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
|
||||
|
||||
## More than just development environments
|
||||
|
||||
Over time we will extend Compose's remit to cover test, staging and production environments. This is not a simple task, and will take many incremental improvements such as:
|
||||
Compose currently works really well in development, but we want to make the Compose file format better for test, staging, and production environments. To support these use cases, there will need to be improvements to the file format, improvements to the command-line tool, integrations with other tools, and perhaps new tools altogether.
|
||||
|
||||
Some specific things we are considering:
|
||||
|
||||
- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
|
||||
- It should roll back to a known good state if it fails.
|
||||
- It should allow a user to check the actions it is about to perform before running them.
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#1377](https://github.com/docker/compose/issues/1377))
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
|
||||
- Compose should recommend a technique for zero-downtime deploys.
|
||||
- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
|
||||
|
||||
@@ -22,10 +30,3 @@ The current state of integration is documented in [SWARM.md](SWARM.md).
|
||||
Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well.
|
||||
|
||||
There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318).
|
||||
|
||||
## An even better tool for development environments
|
||||
|
||||
Compose is a great tool for development environments, but it could be even better. For example:
|
||||
|
||||
- [Compose could watch your code and automatically kick off builds when something changes.](https://github.com/docker/fig/issues/184)
|
||||
- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.7.0'
|
||||
__version__ = '1.9.0'
|
||||
|
||||
258
compose/bundle.py
Normal file
258
compose/bundle.py
Normal file
@@ -0,0 +1,258 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
import six
|
||||
from docker.utils import split_command
|
||||
from docker.utils.ports import split_port
|
||||
|
||||
from .cli.errors import UserError
|
||||
from .config.serialize import denormalize_config
|
||||
from .network import get_network_defs_for_service
|
||||
from .service import format_environment
|
||||
from .service import NoSuchImageError
|
||||
from .service import parse_repository_tag
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SERVICE_KEYS = {
|
||||
'working_dir': 'WorkingDir',
|
||||
'user': 'User',
|
||||
'labels': 'Labels',
|
||||
}
|
||||
|
||||
IGNORED_KEYS = {'build'}
|
||||
|
||||
SUPPORTED_KEYS = {
|
||||
'image',
|
||||
'ports',
|
||||
'expose',
|
||||
'networks',
|
||||
'command',
|
||||
'environment',
|
||||
'entrypoint',
|
||||
} | set(SERVICE_KEYS)
|
||||
|
||||
VERSION = '0.1'
|
||||
|
||||
|
||||
class NeedsPush(Exception):
|
||||
def __init__(self, image_name):
|
||||
self.image_name = image_name
|
||||
|
||||
|
||||
class NeedsPull(Exception):
|
||||
def __init__(self, image_name, service_name):
|
||||
self.image_name = image_name
|
||||
self.service_name = service_name
|
||||
|
||||
|
||||
class MissingDigests(Exception):
|
||||
def __init__(self, needs_push, needs_pull):
|
||||
self.needs_push = needs_push
|
||||
self.needs_pull = needs_pull
|
||||
|
||||
|
||||
def serialize_bundle(config, image_digests):
|
||||
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def get_image_digests(project, allow_push=False):
|
||||
digests = {}
|
||||
needs_push = set()
|
||||
needs_pull = set()
|
||||
|
||||
for service in project.services:
|
||||
try:
|
||||
digests[service.name] = get_image_digest(
|
||||
service,
|
||||
allow_push=allow_push,
|
||||
)
|
||||
except NeedsPush as e:
|
||||
needs_push.add(e.image_name)
|
||||
except NeedsPull as e:
|
||||
needs_pull.add(e.service_name)
|
||||
|
||||
if needs_push or needs_pull:
|
||||
raise MissingDigests(needs_push, needs_pull)
|
||||
|
||||
return digests
|
||||
|
||||
|
||||
def get_image_digest(service, allow_push=False):
|
||||
if 'image' not in service.options:
|
||||
raise UserError(
|
||||
"Service '{s.name}' doesn't define an image tag. An image name is "
|
||||
"required to generate a proper image digest for the bundle. Specify "
|
||||
"an image repo and tag with the 'image' option.".format(s=service))
|
||||
|
||||
_, _, separator = parse_repository_tag(service.options['image'])
|
||||
# Compose file already uses a digest, no lookup required
|
||||
if separator == '@':
|
||||
return service.options['image']
|
||||
|
||||
try:
|
||||
image = service.image()
|
||||
except NoSuchImageError:
|
||||
action = 'build' if 'build' in service.options else 'pull'
|
||||
raise UserError(
|
||||
"Image not found for service '{service}'. "
|
||||
"You might need to run `docker-compose {action} {service}`."
|
||||
.format(service=service.name, action=action))
|
||||
|
||||
if image['RepoDigests']:
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
return image['RepoDigests'][0]
|
||||
|
||||
if 'build' not in service.options:
|
||||
raise NeedsPull(service.image_name, service.name)
|
||||
|
||||
if not allow_push:
|
||||
raise NeedsPush(service.image_name)
|
||||
|
||||
return push_image(service)
|
||||
|
||||
|
||||
def push_image(service):
|
||||
try:
|
||||
digest = service.push()
|
||||
except:
|
||||
log.error(
|
||||
"Failed to push image for service '{s.name}'. Please use an "
|
||||
"image tag that can be pushed to a Docker "
|
||||
"registry.".format(s=service))
|
||||
raise
|
||||
|
||||
if not digest:
|
||||
raise ValueError("Failed to get digest for %s" % service.name)
|
||||
|
||||
repo, _, _ = parse_repository_tag(service.options['image'])
|
||||
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
|
||||
|
||||
# only do this if RepoDigests isn't already populated
|
||||
image = service.image()
|
||||
if not image['RepoDigests']:
|
||||
# Pull by digest so that image['RepoDigests'] is populated for next time
|
||||
# and we don't have to pull/push again
|
||||
service.client.pull(identifier)
|
||||
log.info("Stored digest for {}".format(service.image_name))
|
||||
|
||||
return identifier
|
||||
|
||||
|
||||
def to_bundle(config, image_digests):
|
||||
if config.networks:
|
||||
log.warn("Unsupported top level key 'networks' - ignoring")
|
||||
|
||||
if config.volumes:
|
||||
log.warn("Unsupported top level key 'volumes' - ignoring")
|
||||
|
||||
config = denormalize_config(config)
|
||||
|
||||
return {
|
||||
'Version': VERSION,
|
||||
'Services': {
|
||||
name: convert_service_to_bundle(
|
||||
name,
|
||||
service_dict,
|
||||
image_digests[name],
|
||||
)
|
||||
for name, service_dict in config['services'].items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def convert_service_to_bundle(name, service_dict, image_digest):
|
||||
container_config = {'Image': image_digest}
|
||||
|
||||
for key, value in service_dict.items():
|
||||
if key in IGNORED_KEYS:
|
||||
continue
|
||||
|
||||
if key not in SUPPORTED_KEYS:
|
||||
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
|
||||
continue
|
||||
|
||||
if key == 'environment':
|
||||
container_config['Env'] = format_environment({
|
||||
envkey: envvalue for envkey, envvalue in value.items()
|
||||
if envvalue
|
||||
})
|
||||
continue
|
||||
|
||||
if key in SERVICE_KEYS:
|
||||
container_config[SERVICE_KEYS[key]] = value
|
||||
continue
|
||||
|
||||
set_command_and_args(
|
||||
container_config,
|
||||
service_dict.get('entrypoint', []),
|
||||
service_dict.get('command', []))
|
||||
container_config['Networks'] = make_service_networks(name, service_dict)
|
||||
|
||||
ports = make_port_specs(service_dict)
|
||||
if ports:
|
||||
container_config['Ports'] = ports
|
||||
|
||||
return container_config
|
||||
|
||||
|
||||
# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
|
||||
def set_command_and_args(config, entrypoint, command):
|
||||
if isinstance(entrypoint, six.string_types):
|
||||
entrypoint = split_command(entrypoint)
|
||||
if isinstance(command, six.string_types):
|
||||
command = split_command(command)
|
||||
|
||||
if entrypoint:
|
||||
config['Command'] = entrypoint + command
|
||||
return
|
||||
|
||||
if command:
|
||||
config['Args'] = command
|
||||
|
||||
|
||||
def make_service_networks(name, service_dict):
|
||||
networks = []
|
||||
|
||||
for network_name, network_def in get_network_defs_for_service(service_dict).items():
|
||||
for key in network_def.keys():
|
||||
log.warn(
|
||||
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
|
||||
.format(key, name, network_name))
|
||||
|
||||
networks.append(network_name)
|
||||
|
||||
return networks
|
||||
|
||||
|
||||
def make_port_specs(service_dict):
|
||||
ports = []
|
||||
|
||||
internal_ports = [
|
||||
internal_port
|
||||
for port_def in service_dict.get('ports', [])
|
||||
for internal_port in split_port(port_def)[0]
|
||||
]
|
||||
|
||||
internal_ports += service_dict.get('expose', [])
|
||||
|
||||
for internal_port in internal_ports:
|
||||
spec = make_port_spec(internal_port)
|
||||
if spec not in ports:
|
||||
ports.append(spec)
|
||||
|
||||
return ports
|
||||
|
||||
|
||||
def make_port_spec(value):
|
||||
components = six.text_type(value).partition('/')
|
||||
return {
|
||||
'Protocol': components[2] or 'tcp',
|
||||
'Port': int(components[0]),
|
||||
}
|
||||
@@ -4,9 +4,11 @@ from __future__ import unicode_literals
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import ssl
|
||||
|
||||
import six
|
||||
|
||||
from . import errors
|
||||
from . import verbose_proxy
|
||||
from .. import config
|
||||
from ..config.environment import Environment
|
||||
@@ -21,17 +23,30 @@ log = logging.getLogger(__name__)
|
||||
|
||||
def project_from_options(project_dir, options):
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
host = options.get('--host')
|
||||
if host is not None:
|
||||
host = host.lstrip('=')
|
||||
return get_project(
|
||||
project_dir,
|
||||
get_config_path_from_options(project_dir, options, environment),
|
||||
project_name=options.get('--project-name'),
|
||||
verbose=options.get('--verbose'),
|
||||
host=options.get('--host'),
|
||||
host=host,
|
||||
tls_config=tls_config_from_options(options),
|
||||
environment=environment
|
||||
)
|
||||
|
||||
|
||||
def get_config_from_options(base_dir, options):
|
||||
environment = Environment.from_env_file(base_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
base_dir, options, environment
|
||||
)
|
||||
return config.load(
|
||||
config.find(base_dir, config_path, environment)
|
||||
)
|
||||
|
||||
|
||||
def get_config_path_from_options(base_dir, options, environment):
|
||||
file_option = options.get('--file')
|
||||
if file_option:
|
||||
@@ -43,10 +58,29 @@ def get_config_path_from_options(base_dir, options, environment):
|
||||
return None
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, tls_config=None, host=None):
|
||||
def get_tls_version(environment):
|
||||
compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
|
||||
if not compose_tls_version:
|
||||
return None
|
||||
|
||||
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
|
||||
if not hasattr(ssl, tls_attr_name):
|
||||
log.warn(
|
||||
'The "{}" protocol is unavailable. You may need to update your '
|
||||
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
|
||||
.format(compose_tls_version)
|
||||
)
|
||||
return None
|
||||
|
||||
return getattr(ssl, tls_attr_name)
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
|
||||
client = docker_client(
|
||||
version=version, tls_config=tls_config, host=host,
|
||||
environment=environment
|
||||
environment=environment, tls_version=get_tls_version(environment)
|
||||
)
|
||||
if verbose:
|
||||
version_info = six.iteritems(client.version())
|
||||
@@ -71,12 +105,14 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
api_version = environment.get(
|
||||
'COMPOSE_API_VERSION',
|
||||
API_VERSIONS[config_data.version])
|
||||
|
||||
client = get_client(
|
||||
verbose=verbose, version=api_version, tls_config=tls_config,
|
||||
host=host, environment=environment
|
||||
)
|
||||
|
||||
return Project.from_config(project_name, config_data, client)
|
||||
with errors.handle_connection_errors(client):
|
||||
return Project.from_config(project_name, config_data, client)
|
||||
|
||||
|
||||
def get_project_name(working_dir, project_name=None, environment=None):
|
||||
|
||||
@@ -10,15 +10,17 @@ from docker.utils import kwargs_from_env
|
||||
|
||||
from ..const import HTTP_TIMEOUT
|
||||
from .errors import UserError
|
||||
from .utils import generate_user_agent
|
||||
from .utils import unquote_path
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tls_config_from_options(options):
|
||||
tls = options.get('--tls', False)
|
||||
ca_cert = options.get('--tlscacert')
|
||||
cert = options.get('--tlscert')
|
||||
key = options.get('--tlskey')
|
||||
ca_cert = unquote_path(options.get('--tlscacert'))
|
||||
cert = unquote_path(options.get('--tlscert'))
|
||||
key = unquote_path(options.get('--tlskey'))
|
||||
verify = options.get('--tlsverify')
|
||||
skip_hostname_check = options.get('--skip-hostname-check', False)
|
||||
|
||||
@@ -39,17 +41,14 @@ def tls_config_from_options(options):
|
||||
return None
|
||||
|
||||
|
||||
def docker_client(environment, version=None, tls_config=None, host=None):
|
||||
def docker_client(environment, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
"""
|
||||
Returns a docker-py client configured using environment variables
|
||||
according to the same logic as the official Docker client.
|
||||
"""
|
||||
if 'DOCKER_CLIENT_TIMEOUT' in environment:
|
||||
log.warn("The DOCKER_CLIENT_TIMEOUT environment variable is deprecated. "
|
||||
"Please use COMPOSE_HTTP_TIMEOUT instead.")
|
||||
|
||||
try:
|
||||
kwargs = kwargs_from_env(environment=environment)
|
||||
kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
|
||||
except TLSParameterError:
|
||||
raise UserError(
|
||||
"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
|
||||
@@ -70,4 +69,6 @@ def docker_client(environment, version=None, tls_config=None, host=None):
|
||||
else:
|
||||
kwargs['timeout'] = HTTP_TIMEOUT
|
||||
|
||||
kwargs['user_agent'] = generate_user_agent()
|
||||
|
||||
return Client(**kwargs)
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import contextlib
|
||||
import logging
|
||||
import socket
|
||||
from distutils.spawn import find_executable
|
||||
from textwrap import dedent
|
||||
|
||||
from docker.errors import APIError
|
||||
@@ -13,10 +14,10 @@ from requests.exceptions import SSLError
|
||||
from requests.packages.urllib3.exceptions import ReadTimeoutError
|
||||
|
||||
from ..const import API_VERSION_TO_ENGINE_VERSION
|
||||
from ..const import HTTP_TIMEOUT
|
||||
from .utils import call_silently
|
||||
from .utils import is_docker_for_mac_installed
|
||||
from .utils import is_mac
|
||||
from .utils import is_ubuntu
|
||||
from .utils import is_windows
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -46,33 +47,24 @@ def handle_connection_errors(client):
|
||||
raise ConnectionError()
|
||||
except RequestsConnectionError as e:
|
||||
if e.args and isinstance(e.args[0], ReadTimeoutError):
|
||||
log_timeout_error()
|
||||
log_timeout_error(client.timeout)
|
||||
raise ConnectionError()
|
||||
|
||||
if call_silently(['which', 'docker']) != 0:
|
||||
if is_mac():
|
||||
exit_with_error(docker_not_found_mac)
|
||||
if is_ubuntu():
|
||||
exit_with_error(docker_not_found_ubuntu)
|
||||
exit_with_error(docker_not_found_generic)
|
||||
if call_silently(['which', 'docker-machine']) == 0:
|
||||
exit_with_error(conn_error_docker_machine)
|
||||
exit_with_error(conn_error_generic.format(url=client.base_url))
|
||||
exit_with_error(get_conn_error_message(client.base_url))
|
||||
except APIError as e:
|
||||
log_api_error(e, client.api_version)
|
||||
raise ConnectionError()
|
||||
except (ReadTimeout, socket.timeout) as e:
|
||||
log_timeout_error()
|
||||
log_timeout_error(client.timeout)
|
||||
raise ConnectionError()
|
||||
|
||||
|
||||
def log_timeout_error():
|
||||
def log_timeout_error(timeout):
|
||||
log.error(
|
||||
"An HTTP request took too long to complete. Retry with --verbose to "
|
||||
"obtain debug information.\n"
|
||||
"If you encounter this issue regularly because of slow network "
|
||||
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
|
||||
"value (current value: %s)." % HTTP_TIMEOUT)
|
||||
"value (current value: %s)." % timeout)
|
||||
|
||||
|
||||
def log_api_error(e, client_version):
|
||||
@@ -97,31 +89,46 @@ def exit_with_error(msg):
|
||||
raise ConnectionError()
|
||||
|
||||
|
||||
docker_not_found_mac = """
|
||||
Couldn't connect to Docker daemon. You might need to install Docker:
|
||||
|
||||
https://docs.docker.com/engine/installation/mac/
|
||||
"""
|
||||
def get_conn_error_message(url):
|
||||
if find_executable('docker') is None:
|
||||
return docker_not_found_msg("Couldn't connect to Docker daemon.")
|
||||
if is_docker_for_mac_installed():
|
||||
return conn_error_docker_for_mac
|
||||
if find_executable('docker-machine') is not None:
|
||||
return conn_error_docker_machine
|
||||
return conn_error_generic.format(url=url)
|
||||
|
||||
|
||||
docker_not_found_ubuntu = """
|
||||
Couldn't connect to Docker daemon. You might need to install Docker:
|
||||
|
||||
https://docs.docker.com/engine/installation/ubuntulinux/
|
||||
"""
|
||||
def docker_not_found_msg(problem):
|
||||
return "{} You might need to install Docker:\n\n{}".format(
|
||||
problem, docker_install_url())
|
||||
|
||||
|
||||
docker_not_found_generic = """
|
||||
Couldn't connect to Docker daemon. You might need to install Docker:
|
||||
def docker_install_url():
|
||||
if is_mac():
|
||||
return docker_install_url_mac
|
||||
elif is_ubuntu():
|
||||
return docker_install_url_ubuntu
|
||||
elif is_windows():
|
||||
return docker_install_url_windows
|
||||
else:
|
||||
return docker_install_url_generic
|
||||
|
||||
https://docs.docker.com/engine/installation/
|
||||
"""
|
||||
|
||||
docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
|
||||
docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
|
||||
docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
|
||||
docker_install_url_generic = "https://docs.docker.com/engine/installation/"
|
||||
|
||||
|
||||
conn_error_docker_machine = """
|
||||
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
|
||||
"""
|
||||
|
||||
conn_error_docker_for_mac = """
|
||||
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
|
||||
"""
|
||||
|
||||
|
||||
conn_error_generic = """
|
||||
Couldn't connect to Docker daemon at {url} - is it running?
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import logging
|
||||
import os
|
||||
|
||||
import six
|
||||
import texttable
|
||||
|
||||
from compose.cli import colors
|
||||
@@ -44,5 +45,7 @@ class ConsoleWarningFormatter(logging.Formatter):
|
||||
return ''
|
||||
|
||||
def format(self, record):
|
||||
if isinstance(record.msg, six.binary_type):
|
||||
record.msg = record.msg.decode('utf-8')
|
||||
message = super(ConsoleWarningFormatter, self).format(record)
|
||||
return self.get_level_message(record) + message
|
||||
return '{0}{1}'.format(self.get_level_message(record), message)
|
||||
|
||||
@@ -6,6 +6,7 @@ from collections import namedtuple
|
||||
from itertools import cycle
|
||||
from threading import Thread
|
||||
|
||||
from docker.errors import APIError
|
||||
from six.moves import _thread as thread
|
||||
from six.moves.queue import Empty
|
||||
from six.moves.queue import Queue
|
||||
@@ -176,8 +177,14 @@ def build_log_generator(container, log_args):
|
||||
|
||||
|
||||
def wait_on_exit(container):
|
||||
exit_code = container.wait()
|
||||
return "%s exited with code %s\n" % (container.name, exit_code)
|
||||
try:
|
||||
exit_code = container.wait()
|
||||
return "%s exited with code %s\n" % (container.name, exit_code)
|
||||
except APIError as e:
|
||||
return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
|
||||
container.name, e.response.status_code,
|
||||
e.response.text or '[empty]'
|
||||
)
|
||||
|
||||
|
||||
def start_producer_thread(thread_args):
|
||||
|
||||
@@ -6,30 +6,38 @@ import contextlib
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import pipes
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from distutils.spawn import find_executable
|
||||
from inspect import getdoc
|
||||
from operator import attrgetter
|
||||
|
||||
from . import errors
|
||||
from . import signals
|
||||
from .. import __version__
|
||||
from ..config import config
|
||||
from ..bundle import get_image_digests
|
||||
from ..bundle import MissingDigests
|
||||
from ..bundle import serialize_bundle
|
||||
from ..config import ConfigurationError
|
||||
from ..config import parse_environment
|
||||
from ..config.environment import Environment
|
||||
from ..config.serialize import serialize_config
|
||||
from ..const import DEFAULT_TIMEOUT
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
from ..errors import StreamParseError
|
||||
from ..progress_stream import StreamOutputError
|
||||
from ..project import NoSuchService
|
||||
from ..project import OneOffFilter
|
||||
from ..project import ProjectError
|
||||
from ..service import BuildAction
|
||||
from ..service import BuildError
|
||||
from ..service import ConvergenceStrategy
|
||||
from ..service import ImageType
|
||||
from ..service import NeedsBuildError
|
||||
from .command import get_config_path_from_options
|
||||
from ..service import OperationFailedError
|
||||
from .command import get_config_from_options
|
||||
from .command import project_from_options
|
||||
from .docopt_command import DocoptDispatcher
|
||||
from .docopt_command import get_handler
|
||||
@@ -58,7 +66,8 @@ def main():
|
||||
except (KeyboardInterrupt, signals.ShutdownException):
|
||||
log.error("Aborting.")
|
||||
sys.exit(1)
|
||||
except (UserError, NoSuchService, ConfigurationError) as e:
|
||||
except (UserError, NoSuchService, ConfigurationError,
|
||||
ProjectError, OperationFailedError) as e:
|
||||
log.error(e.msg)
|
||||
sys.exit(1)
|
||||
except BuildError as e:
|
||||
@@ -70,7 +79,7 @@ def main():
|
||||
except NeedsBuildError as e:
|
||||
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
|
||||
sys.exit(1)
|
||||
except errors.ConnectionError:
|
||||
except (errors.ConnectionError, StreamParseError):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -97,7 +106,7 @@ def perform_command(options, handler, command_options):
|
||||
handler(command_options)
|
||||
return
|
||||
|
||||
if options['COMMAND'] == 'config':
|
||||
if options['COMMAND'] in ('config', 'bundle'):
|
||||
command = TopLevelCommand(None)
|
||||
handler(command, options, command_options)
|
||||
return
|
||||
@@ -142,7 +151,7 @@ class TopLevelCommand(object):
|
||||
"""Define and run multi-container applications with Docker.
|
||||
|
||||
Usage:
|
||||
docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
|
||||
docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
|
||||
docker-compose -h|--help
|
||||
|
||||
Options:
|
||||
@@ -163,6 +172,7 @@ class TopLevelCommand(object):
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
bundle Generate a Docker bundle from the Compose file
|
||||
config Validate and view the compose file
|
||||
create Create services
|
||||
down Stop and remove containers, networks, images, and volumes
|
||||
@@ -174,7 +184,8 @@ class TopLevelCommand(object):
|
||||
pause Pause services
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
pull Pull service images
|
||||
push Push service images
|
||||
restart Restart services
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
@@ -211,6 +222,75 @@ class TopLevelCommand(object):
|
||||
pull=bool(options.get('--pull', False)),
|
||||
force_rm=bool(options.get('--force-rm', False)))
|
||||
|
||||
def bundle(self, config_options, options):
|
||||
"""
|
||||
Generate a Distributed Application Bundle (DAB) from the Compose file.
|
||||
|
||||
Images must have digests stored, which requires interaction with a
|
||||
Docker registry. If digests aren't stored for all images, you can fetch
|
||||
them with `docker-compose pull` or `docker-compose push`. To push images
|
||||
automatically when bundling, pass `--push-images`. Only services with
|
||||
a `build` option specified will have their images pushed.
|
||||
|
||||
Usage: bundle [options]
|
||||
|
||||
Options:
|
||||
--push-images Automatically push images for any services
|
||||
which have a `build` option specified.
|
||||
|
||||
-o, --output PATH Path to write the bundle file to.
|
||||
Defaults to "<project name>.dab".
|
||||
"""
|
||||
self.project = project_from_options('.', config_options)
|
||||
compose_config = get_config_from_options(self.project_dir, config_options)
|
||||
|
||||
output = options["--output"]
|
||||
if not output:
|
||||
output = "{}.dab".format(self.project.name)
|
||||
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
try:
|
||||
image_digests = get_image_digests(
|
||||
self.project,
|
||||
allow_push=options['--push-images'],
|
||||
)
|
||||
except MissingDigests as e:
|
||||
def list_images(images):
|
||||
return "\n".join(" {}".format(name) for name in sorted(images))
|
||||
|
||||
paras = ["Some images are missing digests."]
|
||||
|
||||
if e.needs_push:
|
||||
command_hint = (
|
||||
"Use `docker-compose push {}` to push them. "
|
||||
"You can do this automatically with `docker-compose bundle --push-images`."
|
||||
.format(" ".join(sorted(e.needs_push)))
|
||||
)
|
||||
paras += [
|
||||
"The following images can be pushed:",
|
||||
list_images(e.needs_push),
|
||||
command_hint,
|
||||
]
|
||||
|
||||
if e.needs_pull:
|
||||
command_hint = (
|
||||
"Use `docker-compose pull {}` to pull them. "
|
||||
.format(" ".join(sorted(e.needs_pull)))
|
||||
)
|
||||
|
||||
paras += [
|
||||
"The following images need to be pulled:",
|
||||
list_images(e.needs_pull),
|
||||
command_hint,
|
||||
]
|
||||
|
||||
raise UserError("\n\n".join(paras))
|
||||
|
||||
with open(output, 'w') as f:
|
||||
f.write(serialize_bundle(compose_config, image_digests))
|
||||
|
||||
log.info("Wrote bundle to {}".format(output))
|
||||
|
||||
def config(self, config_options, options):
|
||||
"""
|
||||
Validate and view the compose file.
|
||||
@@ -223,13 +303,7 @@ class TopLevelCommand(object):
|
||||
--services Print the service names, one per line.
|
||||
|
||||
"""
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
self.project_dir, config_options, environment
|
||||
)
|
||||
compose_config = config.load(
|
||||
config.find(self.project_dir, config_path, environment)
|
||||
)
|
||||
compose_config = get_config_from_options(self.project_dir, config_options)
|
||||
|
||||
if options['--quiet']:
|
||||
return
|
||||
@@ -264,18 +338,29 @@ class TopLevelCommand(object):
|
||||
|
||||
def down(self, options):
|
||||
"""
|
||||
Stop containers and remove containers, networks, volumes, and images
|
||||
created by `up`. Only containers and networks are removed by default.
|
||||
Stops containers and removes containers, networks, volumes, and images
|
||||
created by `up`.
|
||||
|
||||
By default, the only things removed are:
|
||||
|
||||
- Containers for services defined in the Compose file
|
||||
- Networks defined in the `networks` section of the Compose file
|
||||
- The default network, if one is used
|
||||
|
||||
Networks and volumes defined as `external` are never removed.
|
||||
|
||||
Usage: down [options]
|
||||
|
||||
Options:
|
||||
--rmi type Remove images, type may be one of: 'all' to remove
|
||||
all images, or 'local' to remove only images that
|
||||
don't have an custom name set by the `image` field
|
||||
-v, --volumes Remove data volumes
|
||||
--remove-orphans Remove containers for services not defined in
|
||||
the Compose file
|
||||
--rmi type Remove images. Type must be one of:
|
||||
'all': Remove all images used by any service.
|
||||
'local': Remove only images that don't have a custom tag
|
||||
set by the `image` field.
|
||||
-v, --volumes Remove named volumes declared in the `volumes` section
|
||||
of the Compose file and anonymous volumes
|
||||
attached to containers.
|
||||
--remove-orphans Remove containers for services not defined in the
|
||||
Compose file
|
||||
"""
|
||||
image_type = image_type_from_opt('--rmi', options['--rmi'])
|
||||
self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
|
||||
@@ -322,6 +407,8 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
index = int(options.get('--index'))
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
detach = options['-d']
|
||||
|
||||
try:
|
||||
container = service.get_container(number=index)
|
||||
except ValueError as e:
|
||||
@@ -329,6 +416,28 @@ class TopLevelCommand(object):
|
||||
command = [options['COMMAND']] + options['ARGS']
|
||||
tty = not options["-T"]
|
||||
|
||||
if IS_WINDOWS_PLATFORM and not detach:
|
||||
args = ["exec"]
|
||||
|
||||
if options["-d"]:
|
||||
args += ["--detach"]
|
||||
else:
|
||||
args += ["--interactive"]
|
||||
|
||||
if not options["-T"]:
|
||||
args += ["--tty"]
|
||||
|
||||
if options["--privileged"]:
|
||||
args += ["--privileged"]
|
||||
|
||||
if options["--user"]:
|
||||
args += ["--user", options["--user"]]
|
||||
|
||||
args += [container.id]
|
||||
args += command
|
||||
|
||||
sys.exit(call_docker(args))
|
||||
|
||||
create_exec_options = {
|
||||
"privileged": options["--privileged"],
|
||||
"user": options["--user"],
|
||||
@@ -338,7 +447,7 @@ class TopLevelCommand(object):
|
||||
|
||||
exec_id = container.create_exec(command, **create_exec_options)
|
||||
|
||||
if options['-d']:
|
||||
if detach:
|
||||
container.start_exec(exec_id, tty=tty)
|
||||
return
|
||||
|
||||
@@ -361,10 +470,14 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
Get help on a command.
|
||||
|
||||
Usage: help COMMAND
|
||||
Usage: help [COMMAND]
|
||||
"""
|
||||
handler = get_handler(cls, options['COMMAND'])
|
||||
raise SystemExit(getdoc(handler))
|
||||
if options['COMMAND']:
|
||||
subject = get_handler(cls, options['COMMAND'])
|
||||
else:
|
||||
subject = cls
|
||||
|
||||
print(getdoc(subject))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
@@ -411,7 +524,8 @@ class TopLevelCommand(object):
|
||||
self.project,
|
||||
containers,
|
||||
options['--no-color'],
|
||||
log_args).run()
|
||||
log_args,
|
||||
event_stream=self.project.events(service_names=options['SERVICE'])).run()
|
||||
|
||||
def pause(self, options):
|
||||
"""
|
||||
@@ -494,12 +608,26 @@ class TopLevelCommand(object):
|
||||
ignore_pull_failures=options.get('--ignore-pull-failures')
|
||||
)
|
||||
|
||||
def push(self, options):
|
||||
"""
|
||||
Pushes images for services.
|
||||
|
||||
Usage: push [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--ignore-push-failures Push what it can and ignores images with push failures.
|
||||
"""
|
||||
self.project.push(
|
||||
service_names=options['SERVICE'],
|
||||
ignore_push_failures=options.get('--ignore-push-failures')
|
||||
)
|
||||
|
||||
def rm(self, options):
|
||||
"""
|
||||
Remove stopped service containers.
|
||||
Removes stopped service containers.
|
||||
|
||||
By default, volumes attached to containers will not be removed. You can see all
|
||||
volumes with `docker volume ls`.
|
||||
By default, anonymous volumes attached to containers will not be removed. You
|
||||
can override this with `-v`. To list all volumes, use `docker volume ls`.
|
||||
|
||||
Any data which is not in a volume will be lost.
|
||||
|
||||
@@ -507,18 +635,15 @@ class TopLevelCommand(object):
|
||||
|
||||
Options:
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove volumes associated with containers
|
||||
-a, --all Also remove one-off containers created by
|
||||
docker-compose run
|
||||
-v Remove any anonymous volumes attached to containers
|
||||
-a, --all Deprecated - no effect.
|
||||
"""
|
||||
if options.get('--all'):
|
||||
one_off = OneOffFilter.include
|
||||
else:
|
||||
log.warn(
|
||||
'Not including one-off containers created by `docker-compose run`.\n'
|
||||
'To include them, use `docker-compose rm --all`.\n'
|
||||
'This will be the default behavior in the next version of Compose.\n')
|
||||
one_off = OneOffFilter.exclude
|
||||
'--all flag is obsolete. This is now the default behavior '
|
||||
'of `docker-compose rm`'
|
||||
)
|
||||
one_off = OneOffFilter.include
|
||||
|
||||
all_containers = self.project.containers(
|
||||
service_names=options['SERVICE'], stopped=True, one_off=one_off
|
||||
@@ -570,20 +695,16 @@ class TopLevelCommand(object):
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
detach = options['-d']
|
||||
|
||||
if IS_WINDOWS_PLATFORM and not detach:
|
||||
raise UserError(
|
||||
"Interactive mode is not yet supported on Windows.\n"
|
||||
"Please pass the -d flag when using `docker-compose run`."
|
||||
)
|
||||
|
||||
if options['--publish'] and options['--service-ports']:
|
||||
raise UserError(
|
||||
'Service port mapping and manual port mapping '
|
||||
'can not be used togather'
|
||||
'can not be used together'
|
||||
)
|
||||
|
||||
if options['COMMAND']:
|
||||
if options['COMMAND'] is not None:
|
||||
command = [options['COMMAND']] + options['ARGS']
|
||||
elif options['--entrypoint'] is not None:
|
||||
command = []
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
@@ -806,7 +927,9 @@ def build_container_options(options, detach, command):
|
||||
}
|
||||
|
||||
if options['-e']:
|
||||
container_options['environment'] = parse_environment(options['-e'])
|
||||
container_options['environment'] = Environment.from_command_line(
|
||||
parse_environment(options['-e'])
|
||||
)
|
||||
|
||||
if options['--entrypoint']:
|
||||
container_options['entrypoint'] = options.get('--entrypoint')
|
||||
@@ -860,17 +983,20 @@ def run_one_off_container(container_options, project, service, options):
|
||||
signals.set_signal_handler_to_shutdown()
|
||||
try:
|
||||
try:
|
||||
operation = RunOperation(
|
||||
project.client,
|
||||
container.id,
|
||||
interactive=not options['-T'],
|
||||
logs=False,
|
||||
)
|
||||
pty = PseudoTerminal(project.client, operation)
|
||||
sockets = pty.sockets()
|
||||
service.start_container(container)
|
||||
pty.start(sockets)
|
||||
exit_code = container.wait()
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
exit_code = call_docker(["start", "--attach", "--interactive", container.id])
|
||||
else:
|
||||
operation = RunOperation(
|
||||
project.client,
|
||||
container.id,
|
||||
interactive=not options['-T'],
|
||||
logs=False,
|
||||
)
|
||||
pty = PseudoTerminal(project.client, operation)
|
||||
sockets = pty.sockets()
|
||||
service.start_container(container)
|
||||
pty.start(sockets)
|
||||
exit_code = container.wait()
|
||||
except signals.ShutdownException:
|
||||
project.client.stop(container.id)
|
||||
exit_code = 1
|
||||
@@ -935,3 +1061,14 @@ def exit_if(condition, message, exit_code):
|
||||
if condition:
|
||||
log.error(message)
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
|
||||
def call_docker(args):
|
||||
executable_path = find_executable('docker')
|
||||
if not executable_path:
|
||||
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
|
||||
|
||||
args = [executable_path] + args
|
||||
log.debug(" ".join(map(pipes.quote, args)))
|
||||
|
||||
return subprocess.call(args)
|
||||
|
||||
@@ -6,11 +6,19 @@ import os
|
||||
import platform
|
||||
import ssl
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import docker
|
||||
from six.moves import input
|
||||
|
||||
import compose
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
|
||||
# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
|
||||
# defining it as OSError (its parent class) if missing.
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
WindowsError = OSError
|
||||
|
||||
|
||||
def yesno(prompt, default=None):
|
||||
@@ -35,6 +43,16 @@ def yesno(prompt, default=None):
|
||||
return None
|
||||
|
||||
|
||||
def input(prompt):
|
||||
"""
|
||||
Version of input (raw_input in Python 2) which forces a flush of sys.stdout
|
||||
to avoid problems where the prompt fails to appear due to line buffering
|
||||
"""
|
||||
sys.stdout.write(prompt)
|
||||
sys.stdout.flush()
|
||||
return sys.stdin.readline().rstrip('\n')
|
||||
|
||||
|
||||
def call_silently(*args, **kwargs):
|
||||
"""
|
||||
Like subprocess.call(), but redirects stdout and stderr to /dev/null.
|
||||
@@ -56,6 +74,10 @@ def is_ubuntu():
|
||||
return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
|
||||
|
||||
|
||||
def is_windows():
|
||||
return IS_WINDOWS_PLATFORM
|
||||
|
||||
|
||||
def get_version_info(scope):
|
||||
versioninfo = 'docker-compose version {}, build {}'.format(
|
||||
compose.__version__,
|
||||
@@ -86,3 +108,30 @@ def get_build_version():
|
||||
|
||||
with open(filename) as fh:
|
||||
return fh.read().strip()
|
||||
|
||||
|
||||
def is_docker_for_mac_installed():
|
||||
return is_mac() and os.path.isdir('/Applications/Docker.app')
|
||||
|
||||
|
||||
def generate_user_agent():
|
||||
parts = [
|
||||
"docker-compose/{}".format(compose.__version__),
|
||||
"docker-py/{}".format(docker.__version__),
|
||||
]
|
||||
try:
|
||||
p_system = platform.system()
|
||||
p_release = platform.release()
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
parts.append("{}/{}".format(p_system, p_release))
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
def unquote_path(s):
|
||||
if not s:
|
||||
return s
|
||||
if s[0] == '"' and s[-1] == '"':
|
||||
return s[1:-1]
|
||||
return s
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import operator
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
@@ -15,7 +14,9 @@ from cached_property import cached_property
|
||||
|
||||
from ..const import COMPOSEFILE_V1 as V1
|
||||
from ..const import COMPOSEFILE_V2_0 as V2_0
|
||||
from ..const import COMPOSEFILE_V2_1 as V2_1
|
||||
from ..utils import build_string_dict
|
||||
from ..utils import splitdrive
|
||||
from .environment import env_vars_from_file
|
||||
from .environment import Environment
|
||||
from .environment import split_env
|
||||
@@ -37,6 +38,7 @@ from .validation import validate_against_config_schema
|
||||
from .validation import validate_config_section
|
||||
from .validation import validate_depends_on
|
||||
from .validation import validate_extends_file_path
|
||||
from .validation import validate_links
|
||||
from .validation import validate_network_mode
|
||||
from .validation import validate_service_constraints
|
||||
from .validation import validate_top_level_object
|
||||
@@ -60,6 +62,7 @@ DOCKER_CONFIG_KEYS = [
|
||||
'env_file',
|
||||
'environment',
|
||||
'extra_hosts',
|
||||
'group_add',
|
||||
'hostname',
|
||||
'image',
|
||||
'ipc',
|
||||
@@ -68,7 +71,9 @@ DOCKER_CONFIG_KEYS = [
|
||||
'mac_address',
|
||||
'mem_limit',
|
||||
'memswap_limit',
|
||||
'mem_swappiness',
|
||||
'net',
|
||||
'oom_score_adj'
|
||||
'pid',
|
||||
'ports',
|
||||
'privileged',
|
||||
@@ -170,7 +175,7 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
if version == '2':
|
||||
version = V2_0
|
||||
|
||||
if version != V2_0:
|
||||
if version not in (V2_0, V2_1):
|
||||
raise ConfigurationError(
|
||||
'Version in "{}" is unsupported. {}'
|
||||
.format(self.filename, VERSION_EXPLANATION))
|
||||
@@ -356,6 +361,9 @@ def load_mapping(config_files, get_func, entity_type):
|
||||
config['driver_opts']
|
||||
)
|
||||
|
||||
if 'labels' in config:
|
||||
config['labels'] = parse_labels(config['labels'])
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
@@ -408,31 +416,36 @@ def load_services(config_details, config_file):
|
||||
return build_services(service_config)
|
||||
|
||||
|
||||
def interpolate_config_section(filename, config, section, environment):
|
||||
validate_config_section(filename, config, section)
|
||||
return interpolate_environment_variables(config, section, environment)
|
||||
def interpolate_config_section(config_file, config, section, environment):
|
||||
validate_config_section(config_file.filename, config, section)
|
||||
return interpolate_environment_variables(
|
||||
config_file.version,
|
||||
config,
|
||||
section,
|
||||
environment
|
||||
)
|
||||
|
||||
|
||||
def process_config_file(config_file, environment, service_name=None):
|
||||
services = interpolate_config_section(
|
||||
config_file.filename,
|
||||
config_file,
|
||||
config_file.get_service_dicts(),
|
||||
'service',
|
||||
environment,)
|
||||
environment)
|
||||
|
||||
if config_file.version == V2_0:
|
||||
if config_file.version in (V2_0, V2_1):
|
||||
processed_config = dict(config_file.config)
|
||||
processed_config['services'] = services
|
||||
processed_config['volumes'] = interpolate_config_section(
|
||||
config_file.filename,
|
||||
config_file,
|
||||
config_file.get_volumes(),
|
||||
'volume',
|
||||
environment,)
|
||||
environment)
|
||||
processed_config['networks'] = interpolate_config_section(
|
||||
config_file.filename,
|
||||
config_file,
|
||||
config_file.get_networks(),
|
||||
'network',
|
||||
environment,)
|
||||
environment)
|
||||
|
||||
if config_file.version == V1:
|
||||
processed_config = services
|
||||
@@ -580,6 +593,7 @@ def validate_service(service_config, service_names, version):
|
||||
validate_ulimits(service_config)
|
||||
validate_network_mode(service_config, service_names)
|
||||
validate_depends_on(service_config, service_names)
|
||||
validate_links(service_config, service_names)
|
||||
|
||||
if not service_dict.get('image') and has_uppercase(service_name):
|
||||
raise ConfigurationError(
|
||||
@@ -637,7 +651,10 @@ def finalize_service(service_config, service_names, version, environment):
|
||||
|
||||
if 'volumes' in service_dict:
|
||||
service_dict['volumes'] = [
|
||||
VolumeSpec.parse(v) for v in service_dict['volumes']]
|
||||
VolumeSpec.parse(
|
||||
v, environment.get('COMPOSE_CONVERT_WINDOWS_PATHS')
|
||||
) for v in service_dict['volumes']
|
||||
]
|
||||
|
||||
if 'net' in service_dict:
|
||||
network_mode = service_dict.pop('net')
|
||||
@@ -726,7 +743,7 @@ class MergeDict(dict):
|
||||
|
||||
merged = parse_sequence_func(self.base.get(field, []))
|
||||
merged.update(parse_sequence_func(self.override.get(field, [])))
|
||||
self[field] = [item.repr() for item in merged.values()]
|
||||
self[field] = [item.repr() for item in sorted(merged.values())]
|
||||
|
||||
def merge_scalar(self, field):
|
||||
if self.needs_merge(field):
|
||||
@@ -746,17 +763,16 @@ def merge_service_dicts(base, override, version):
|
||||
md.merge_field(field, merge_path_mappings)
|
||||
|
||||
for field in [
|
||||
'depends_on',
|
||||
'expose',
|
||||
'external_links',
|
||||
'ports',
|
||||
'volumes_from',
|
||||
'ports', 'cap_add', 'cap_drop', 'expose', 'external_links',
|
||||
'security_opt', 'volumes_from', 'depends_on',
|
||||
]:
|
||||
md.merge_field(field, operator.add, default=[])
|
||||
md.merge_field(field, merge_unique_items_lists, default=[])
|
||||
|
||||
for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
|
||||
md.merge_field(field, merge_list_or_string)
|
||||
|
||||
md.merge_field('logging', merge_logging, default={})
|
||||
|
||||
for field in set(ALLOWED_KEYS) - set(md):
|
||||
md.merge_scalar(field)
|
||||
|
||||
@@ -768,6 +784,10 @@ def merge_service_dicts(base, override, version):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_unique_items_lists(base, override):
|
||||
return sorted(set().union(base, override))
|
||||
|
||||
|
||||
def merge_build(output, base, override):
|
||||
def to_dict(service):
|
||||
build_config = service.get('build', {})
|
||||
@@ -782,6 +802,16 @@ def merge_build(output, base, override):
|
||||
return dict(md)
|
||||
|
||||
|
||||
def merge_logging(base, override):
|
||||
md = MergeDict(base, override)
|
||||
md.merge_scalar('driver')
|
||||
if md.get('driver') == base.get('driver') or base.get('driver') is None:
|
||||
md.merge_mapping('options', lambda m: m or {})
|
||||
else:
|
||||
md['options'] = override.get('options')
|
||||
return dict(md)
|
||||
|
||||
|
||||
def legacy_v1_merge_image_or_build(output, base, override):
|
||||
output.pop('image', None)
|
||||
output.pop('build', None)
|
||||
@@ -928,7 +958,7 @@ def dict_from_path_mappings(path_mappings):
|
||||
|
||||
|
||||
def path_mappings_from_dict(d):
|
||||
return [join_path_mapping(v) for v in d.items()]
|
||||
return [join_path_mapping(v) for v in sorted(d.items())]
|
||||
|
||||
|
||||
def split_path_mapping(volume_path):
|
||||
@@ -937,12 +967,7 @@ def split_path_mapping(volume_path):
|
||||
path. Using splitdrive so windows absolute paths won't cause issues with
|
||||
splitting on ':'.
|
||||
"""
|
||||
# splitdrive has limitations when it comes to relative paths, so when it's
|
||||
# relative, handle special case to set the drive to ''
|
||||
if volume_path.startswith('.') or volume_path.startswith('~'):
|
||||
drive, volume_config = '', volume_path
|
||||
else:
|
||||
drive, volume_config = os.path.splitdrive(volume_path)
|
||||
drive, volume_config = splitdrive(volume_path)
|
||||
|
||||
if ':' in volume_config:
|
||||
(host, container) = volume_config.split(':', 1)
|
||||
|
||||
@@ -85,6 +85,7 @@
|
||||
"mac_address": {"type": "string"},
|
||||
"mem_limit": {"type": ["number", "string"]},
|
||||
"memswap_limit": {"type": ["number", "string"]},
|
||||
"mem_swappiness": {"type": "integer"},
|
||||
"net": {"type": "string"},
|
||||
"pid": {"type": ["string", "null"]},
|
||||
|
||||
|
||||
@@ -139,6 +139,7 @@
|
||||
"mac_address": {"type": "string"},
|
||||
"mem_limit": {"type": ["number", "string"]},
|
||||
"memswap_limit": {"type": ["number", "string"]},
|
||||
"mem_swappiness": {"type": "integer"},
|
||||
"network_mode": {"type": "string"},
|
||||
|
||||
"networks": {
|
||||
@@ -166,6 +167,14 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
|
||||
"group_add": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"]
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
"pid": {"type": ["string", "null"]},
|
||||
|
||||
"ports": {
|
||||
@@ -245,7 +254,8 @@
|
||||
"name": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"internal": {"type": "boolean"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
333
compose/config/config_schema_v2.1.json
Normal file
333
compose/config/config_schema_v2.1.json
Normal file
@@ -0,0 +1,333 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"id": "config_schema_v2.1.json",
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "string"
|
||||
},
|
||||
|
||||
"services": {
|
||||
"id": "#/properties/services",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/service"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"networks": {
|
||||
"id": "#/properties/networks",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/network"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"volumes": {
|
||||
"id": "#/properties/volumes",
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"$ref": "#/definitions/volume"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
|
||||
"additionalProperties": false,
|
||||
|
||||
"definitions": {
|
||||
|
||||
"service": {
|
||||
"id": "#/definitions/service",
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"build": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {"type": "string"},
|
||||
"dockerfile": {"type": "string"},
|
||||
"args": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"cgroup_parent": {"type": "string"},
|
||||
"command": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"container_name": {"type": "string"},
|
||||
"cpu_shares": {"type": ["number", "string"]},
|
||||
"cpu_quota": {"type": ["number", "string"]},
|
||||
"cpuset": {"type": "string"},
|
||||
"depends_on": {"$ref": "#/definitions/list_of_strings"},
|
||||
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"dns": {"$ref": "#/definitions/string_or_list"},
|
||||
"dns_search": {"$ref": "#/definitions/string_or_list"},
|
||||
"domainname": {"type": "string"},
|
||||
"entrypoint": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
]
|
||||
},
|
||||
"env_file": {"$ref": "#/definitions/string_or_list"},
|
||||
"environment": {"$ref": "#/definitions/list_or_dict"},
|
||||
|
||||
"expose": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"],
|
||||
"format": "expose"
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"extends": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"service": {"type": "string"},
|
||||
"file": {"type": "string"}
|
||||
},
|
||||
"required": ["service"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
|
||||
"hostname": {"type": "string"},
|
||||
"image": {"type": "string"},
|
||||
"ipc": {"type": "string"},
|
||||
"isolation": {"type": "string"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
|
||||
"logging": {
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"options": {"type": "object"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"mac_address": {"type": "string"},
|
||||
"mem_limit": {"type": ["number", "string"]},
|
||||
"memswap_limit": {"type": ["number", "string"]},
|
||||
"mem_swappiness": {"type": "integer"},
|
||||
"network_mode": {"type": "string"},
|
||||
|
||||
"networks": {
|
||||
"oneOf": [
|
||||
{"$ref": "#/definitions/list_of_strings"},
|
||||
{
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-zA-Z0-9._-]+$": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"aliases": {"$ref": "#/definitions/list_of_strings"},
|
||||
"ipv4_address": {"type": "string"},
|
||||
"ipv6_address": {"type": "string"},
|
||||
"link_local_ips": {"$ref": "#/definitions/list_of_strings"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{"type": "null"}
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
|
||||
"group_add": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"]
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
"pid": {"type": ["string", "null"]},
|
||||
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": ["string", "number"],
|
||||
"format": "ports"
|
||||
},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"privileged": {"type": "boolean"},
|
||||
"read_only": {"type": "boolean"},
|
||||
"restart": {"type": "string"},
|
||||
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"shm_size": {"type": ["number", "string"]},
|
||||
"stdin_open": {"type": "boolean"},
|
||||
"stop_signal": {"type": "string"},
|
||||
"tmpfs": {"$ref": "#/definitions/string_or_list"},
|
||||
"tty": {"type": "boolean"},
|
||||
"ulimits": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^[a-z]+$": {
|
||||
"oneOf": [
|
||||
{"type": "integer"},
|
||||
{
|
||||
"type":"object",
|
||||
"properties": {
|
||||
"hard": {"type": "integer"},
|
||||
"soft": {"type": "integer"}
|
||||
},
|
||||
"required": ["soft", "hard"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"user": {"type": "string"},
|
||||
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"volume_driver": {"type": "string"},
|
||||
"volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
|
||||
"working_dir": {"type": "string"}
|
||||
},
|
||||
|
||||
"dependencies": {
|
||||
"memswap_limit": ["mem_limit"]
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"network": {
|
||||
"id": "#/definitions/network",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"driver_opts": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^.+$": {"type": ["string", "number"]}
|
||||
}
|
||||
},
|
||||
"ipam": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"config": {
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"properties": {
|
||||
"name": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"internal": {"type": "boolean"},
|
||||
"enable_ipv6": {"type": "boolean"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"volume": {
|
||||
"id": "#/definitions/volume",
|
||||
"type": ["object", "null"],
|
||||
"properties": {
|
||||
"driver": {"type": "string"},
|
||||
"driver_opts": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
"^.+$": {"type": ["string", "number"]}
|
||||
}
|
||||
},
|
||||
"external": {
|
||||
"type": ["boolean", "object"],
|
||||
"properties": {
|
||||
"name": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
|
||||
"string_or_list": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"$ref": "#/definitions/list_of_strings"}
|
||||
]
|
||||
},
|
||||
|
||||
"list_of_strings": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"uniqueItems": true
|
||||
},
|
||||
|
||||
"list_or_dict": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".+": {
|
||||
"type": ["string", "number", "null"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
|
||||
]
|
||||
},
|
||||
|
||||
"constraints": {
|
||||
"service": {
|
||||
"id": "#/definitions/constraints/service",
|
||||
"anyOf": [
|
||||
{"required": ["build"]},
|
||||
{"required": ["image"]}
|
||||
],
|
||||
"properties": {
|
||||
"build": {
|
||||
"required": ["context"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,8 @@ def env_vars_from_file(filename):
|
||||
"""
|
||||
if not os.path.exists(filename):
|
||||
raise ConfigurationError("Couldn't find env file: %s" % filename)
|
||||
elif not os.path.isfile(filename):
|
||||
raise ConfigurationError("%s is not a file." % (filename))
|
||||
env = {}
|
||||
for line in codecs.open(filename, 'r', 'utf-8'):
|
||||
line = line.strip()
|
||||
@@ -58,6 +60,18 @@ class Environment(dict):
|
||||
instance.update(os.environ)
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def from_command_line(cls, parsed_env_opts):
|
||||
result = cls()
|
||||
for k, v in parsed_env_opts.items():
|
||||
# Values from the command line take priority, unless they're unset
|
||||
# in which case they take the value from the system's environment
|
||||
if v is None and k in os.environ:
|
||||
result[k] = os.environ[k]
|
||||
else:
|
||||
result[k] = v
|
||||
return result
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return super(Environment, self).__getitem__(key)
|
||||
|
||||
@@ -3,10 +3,11 @@ from __future__ import unicode_literals
|
||||
|
||||
|
||||
VERSION_EXPLANATION = (
|
||||
'Either specify a version of "2" (or "2.0") and place your service '
|
||||
'definitions under the `services` key, or omit the `version` key and place '
|
||||
'your service definitions at the root of the file to use version 1.\n'
|
||||
'For more on the Compose file format versions, see '
|
||||
'You might be seeing this error because you\'re using the wrong Compose '
|
||||
'file version. Either specify a version of "2" (or "2.0") and place your '
|
||||
'service definitions under the `services` key, or omit the `version` key '
|
||||
'and place your service definitions at the root of the file to use '
|
||||
'version 1.\nFor more on the Compose file format versions, see '
|
||||
'https://docs.docker.com/compose/compose-file/')
|
||||
|
||||
|
||||
|
||||
@@ -7,14 +7,35 @@ from string import Template
|
||||
import six
|
||||
|
||||
from .errors import ConfigurationError
|
||||
from compose.const import COMPOSEFILE_V1 as V1
|
||||
from compose.const import COMPOSEFILE_V2_0 as V2_0
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def interpolate_environment_variables(config, section, environment):
|
||||
class Interpolator(object):
|
||||
|
||||
def __init__(self, templater, mapping):
|
||||
self.templater = templater
|
||||
self.mapping = mapping
|
||||
|
||||
def interpolate(self, string):
|
||||
try:
|
||||
return self.templater(string).substitute(self.mapping)
|
||||
except ValueError:
|
||||
raise InvalidInterpolation(string)
|
||||
|
||||
|
||||
def interpolate_environment_variables(version, config, section, environment):
|
||||
if version in (V2_0, V1):
|
||||
interpolator = Interpolator(Template, environment)
|
||||
else:
|
||||
interpolator = Interpolator(TemplateWithDefaults, environment)
|
||||
|
||||
def process_item(name, config_dict):
|
||||
return dict(
|
||||
(key, interpolate_value(name, key, val, section, environment))
|
||||
(key, interpolate_value(name, key, val, section, interpolator))
|
||||
for key, val in (config_dict or {}).items()
|
||||
)
|
||||
|
||||
@@ -24,9 +45,9 @@ def interpolate_environment_variables(config, section, environment):
|
||||
)
|
||||
|
||||
|
||||
def interpolate_value(name, config_key, value, section, mapping):
|
||||
def interpolate_value(name, config_key, value, section, interpolator):
|
||||
try:
|
||||
return recursive_interpolate(value, mapping)
|
||||
return recursive_interpolate(value, interpolator)
|
||||
except InvalidInterpolation as e:
|
||||
raise ConfigurationError(
|
||||
'Invalid interpolation format for "{config_key}" option '
|
||||
@@ -37,25 +58,44 @@ def interpolate_value(name, config_key, value, section, mapping):
|
||||
string=e.string))
|
||||
|
||||
|
||||
def recursive_interpolate(obj, mapping):
|
||||
def recursive_interpolate(obj, interpolator):
|
||||
if isinstance(obj, six.string_types):
|
||||
return interpolate(obj, mapping)
|
||||
elif isinstance(obj, dict):
|
||||
return interpolator.interpolate(obj)
|
||||
if isinstance(obj, dict):
|
||||
return dict(
|
||||
(key, recursive_interpolate(val, mapping))
|
||||
(key, recursive_interpolate(val, interpolator))
|
||||
for (key, val) in obj.items()
|
||||
)
|
||||
elif isinstance(obj, list):
|
||||
return [recursive_interpolate(val, mapping) for val in obj]
|
||||
else:
|
||||
return obj
|
||||
if isinstance(obj, list):
|
||||
return [recursive_interpolate(val, interpolator) for val in obj]
|
||||
return obj
|
||||
|
||||
|
||||
def interpolate(string, mapping):
|
||||
try:
|
||||
return Template(string).substitute(mapping)
|
||||
except ValueError:
|
||||
raise InvalidInterpolation(string)
|
||||
class TemplateWithDefaults(Template):
|
||||
idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
|
||||
|
||||
# Modified from python2.7/string.py
|
||||
def substitute(self, mapping):
|
||||
# Helper function for .sub()
|
||||
def convert(mo):
|
||||
# Check the most common path first.
|
||||
named = mo.group('named') or mo.group('braced')
|
||||
if named is not None:
|
||||
if ':-' in named:
|
||||
var, _, default = named.partition(':-')
|
||||
return mapping.get(var) or default
|
||||
if '-' in named:
|
||||
var, _, default = named.partition('-')
|
||||
return mapping.get(var, default)
|
||||
val = mapping[named]
|
||||
return '%s' % (val,)
|
||||
if mo.group('escaped') is not None:
|
||||
return self.delimiter
|
||||
if mo.group('invalid') is not None:
|
||||
self._invalid(mo)
|
||||
raise ValueError('Unrecognized named group in pattern',
|
||||
self.pattern)
|
||||
return self.pattern.sub(convert, self.template)
|
||||
|
||||
|
||||
class InvalidInterpolation(Exception):
|
||||
|
||||
@@ -5,6 +5,9 @@ import six
|
||||
import yaml
|
||||
|
||||
from compose.config import types
|
||||
from compose.config.config import V1
|
||||
from compose.config.config import V2_0
|
||||
from compose.config.config import V2_1
|
||||
|
||||
|
||||
def serialize_config_type(dumper, data):
|
||||
@@ -16,15 +19,47 @@ yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
|
||||
|
||||
|
||||
def serialize_config(config):
|
||||
output = {
|
||||
'version': config.version,
|
||||
'services': {service.pop('name'): service for service in config.services},
|
||||
'networks': config.networks,
|
||||
def denormalize_config(config):
|
||||
denormalized_services = [
|
||||
denormalize_service_dict(service_dict, config.version)
|
||||
for service_dict in config.services
|
||||
]
|
||||
services = {
|
||||
service_dict.pop('name'): service_dict
|
||||
for service_dict in denormalized_services
|
||||
}
|
||||
networks = config.networks.copy()
|
||||
for net_name, net_conf in networks.items():
|
||||
if 'external_name' in net_conf:
|
||||
del net_conf['external_name']
|
||||
|
||||
version = config.version
|
||||
if version not in (V2_0, V2_1):
|
||||
version = V2_1
|
||||
|
||||
return {
|
||||
'version': version,
|
||||
'services': services,
|
||||
'networks': networks,
|
||||
'volumes': config.volumes,
|
||||
}
|
||||
|
||||
|
||||
def serialize_config(config):
|
||||
return yaml.safe_dump(
|
||||
output,
|
||||
denormalize_config(config),
|
||||
default_flow_style=False,
|
||||
indent=2,
|
||||
width=80)
|
||||
|
||||
|
||||
def denormalize_service_dict(service_dict, version):
|
||||
service_dict = service_dict.copy()
|
||||
|
||||
if 'restart' in service_dict:
|
||||
service_dict['restart'] = types.serialize_restart_spec(service_dict['restart'])
|
||||
|
||||
if version == V1 and 'network_mode' not in service_dict:
|
||||
service_dict['network_mode'] = 'bridge'
|
||||
|
||||
return service_dict
|
||||
|
||||
@@ -5,11 +5,17 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from compose.config.config import V1
|
||||
from compose.config.errors import ConfigurationError
|
||||
from compose.const import IS_WINDOWS_PLATFORM
|
||||
from compose.utils import splitdrive
|
||||
|
||||
win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*')
|
||||
|
||||
|
||||
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
|
||||
@@ -89,6 +95,15 @@ def parse_restart_spec(restart_config):
|
||||
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
|
||||
|
||||
|
||||
def serialize_restart_spec(restart_spec):
|
||||
if not restart_spec:
|
||||
return ''
|
||||
parts = [restart_spec['Name']]
|
||||
if restart_spec['MaximumRetryCount']:
|
||||
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
|
||||
return ':'.join(parts)
|
||||
|
||||
|
||||
def parse_extra_hosts(extra_hosts_config):
|
||||
if not extra_hosts_config:
|
||||
return {}
|
||||
@@ -105,41 +120,23 @@ def parse_extra_hosts(extra_hosts_config):
|
||||
return extra_hosts_dict
|
||||
|
||||
|
||||
def normalize_paths_for_engine(external_path, internal_path):
|
||||
def normalize_path_for_engine(path):
|
||||
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
|
||||
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
|
||||
"""
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
return external_path, internal_path
|
||||
drive, tail = splitdrive(path)
|
||||
|
||||
if external_path:
|
||||
drive, tail = os.path.splitdrive(external_path)
|
||||
if drive:
|
||||
path = '/' + drive.lower().rstrip(':') + tail
|
||||
|
||||
if drive:
|
||||
external_path = '/' + drive.lower().rstrip(':') + tail
|
||||
|
||||
external_path = external_path.replace('\\', '/')
|
||||
|
||||
return external_path, internal_path.replace('\\', '/')
|
||||
return path.replace('\\', '/')
|
||||
|
||||
|
||||
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
|
||||
|
||||
@classmethod
|
||||
def parse(cls, volume_config):
|
||||
"""Parse a volume_config path and split it into external:internal[:mode]
|
||||
parts to be returned as a valid VolumeSpec.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
# relative paths in windows expand to include the drive, eg C:\
|
||||
# so we join the first 2 parts back together to count as one
|
||||
drive, tail = os.path.splitdrive(volume_config)
|
||||
parts = tail.split(":")
|
||||
|
||||
if drive:
|
||||
parts[0] = drive + parts[0]
|
||||
else:
|
||||
parts = volume_config.split(':')
|
||||
def _parse_unix(cls, volume_config):
|
||||
parts = volume_config.split(':')
|
||||
|
||||
if len(parts) > 3:
|
||||
raise ConfigurationError(
|
||||
@@ -147,13 +144,11 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
|
||||
"external:internal[:mode]" % volume_config)
|
||||
|
||||
if len(parts) == 1:
|
||||
external, internal = normalize_paths_for_engine(
|
||||
None,
|
||||
os.path.normpath(parts[0]))
|
||||
external = None
|
||||
internal = os.path.normpath(parts[0])
|
||||
else:
|
||||
external, internal = normalize_paths_for_engine(
|
||||
os.path.normpath(parts[0]),
|
||||
os.path.normpath(parts[1]))
|
||||
external = os.path.normpath(parts[0])
|
||||
internal = os.path.normpath(parts[1])
|
||||
|
||||
mode = 'rw'
|
||||
if len(parts) == 3:
|
||||
@@ -161,13 +156,65 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
|
||||
|
||||
return cls(external, internal, mode)
|
||||
|
||||
@classmethod
|
||||
def _parse_win32(cls, volume_config, normalize):
|
||||
# relative paths in windows expand to include the drive, eg C:\
|
||||
# so we join the first 2 parts back together to count as one
|
||||
mode = 'rw'
|
||||
|
||||
def separate_next_section(volume_config):
|
||||
drive, tail = splitdrive(volume_config)
|
||||
parts = tail.split(':', 1)
|
||||
if drive:
|
||||
parts[0] = drive + parts[0]
|
||||
return parts
|
||||
|
||||
parts = separate_next_section(volume_config)
|
||||
if len(parts) == 1:
|
||||
internal = parts[0]
|
||||
external = None
|
||||
else:
|
||||
external = parts[0]
|
||||
parts = separate_next_section(parts[1])
|
||||
external = os.path.normpath(external)
|
||||
internal = parts[0]
|
||||
if len(parts) > 1:
|
||||
if ':' in parts[1]:
|
||||
raise ConfigurationError(
|
||||
"Volume %s has incorrect format, should be "
|
||||
"external:internal[:mode]" % volume_config
|
||||
)
|
||||
mode = parts[1]
|
||||
|
||||
if normalize:
|
||||
external = normalize_path_for_engine(external) if external else None
|
||||
|
||||
return cls(external, internal, mode)
|
||||
|
||||
@classmethod
|
||||
def parse(cls, volume_config, normalize=False):
|
||||
"""Parse a volume_config path and split it into external:internal[:mode]
|
||||
parts to be returned as a valid VolumeSpec.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return cls._parse_win32(volume_config, normalize)
|
||||
else:
|
||||
return cls._parse_unix(volume_config)
|
||||
|
||||
def repr(self):
|
||||
external = self.external + ':' if self.external else ''
|
||||
return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
|
||||
|
||||
@property
|
||||
def is_named_volume(self):
|
||||
return self.external and not self.external.startswith(('.', '/', '~'))
|
||||
res = self.external and not self.external.startswith(('.', '/', '~'))
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
return res
|
||||
|
||||
return (
|
||||
res and not self.external.startswith('\\') and
|
||||
not win32_root_path_pattern.match(self.external)
|
||||
)
|
||||
|
||||
|
||||
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
|
||||
|
||||
@@ -171,6 +171,14 @@ def validate_network_mode(service_config, service_names):
|
||||
"is undefined.".format(s=service_config, dep=dependency))
|
||||
|
||||
|
||||
def validate_links(service_config, service_names):
|
||||
for link in service_config.config.get('links', []):
|
||||
if link.split(':')[0] not in service_names:
|
||||
raise ConfigurationError(
|
||||
"Service '{s.name}' has a link to service '{link}' which is "
|
||||
"undefined.".format(s=service_config, link=link))
|
||||
|
||||
|
||||
def validate_depends_on(service_config, service_names):
|
||||
for dependency in service_config.config.get('depends_on', []):
|
||||
if dependency not in service_names:
|
||||
@@ -211,7 +219,7 @@ def handle_error_for_schema_with_id(error, path):
|
||||
return get_unsupported_config_msg(path, invalid_config_key)
|
||||
|
||||
if not error.path:
|
||||
return '{}\n{}'.format(error.message, VERSION_EXPLANATION)
|
||||
return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
|
||||
|
||||
|
||||
def handle_generic_error(error, path):
|
||||
@@ -408,6 +416,6 @@ def handle_errors(errors, format_error_func, filename):
|
||||
|
||||
error_msg = '\n'.join(format_error_func(error) for error in errors)
|
||||
raise ConfigurationError(
|
||||
"Validation failed{file_msg}, reason(s):\n{error_msg}".format(
|
||||
file_msg=" in file '{}'".format(filename) if filename else "",
|
||||
"The Compose file{file_msg} is invalid because:\n{error_msg}".format(
|
||||
file_msg=" '{}'".format(filename) if filename else "",
|
||||
error_msg=error_msg))
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
HTTP_TIMEOUT = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
|
||||
HTTP_TIMEOUT = 60
|
||||
IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag']
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
@@ -17,13 +16,16 @@ LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
|
||||
|
||||
COMPOSEFILE_V1 = '1'
|
||||
COMPOSEFILE_V2_0 = '2.0'
|
||||
COMPOSEFILE_V2_1 = '2.1'
|
||||
|
||||
API_VERSIONS = {
|
||||
COMPOSEFILE_V1: '1.21',
|
||||
COMPOSEFILE_V2_0: '1.22',
|
||||
COMPOSEFILE_V2_1: '1.24',
|
||||
}
|
||||
|
||||
API_VERSION_TO_ENGINE_VERSION = {
|
||||
API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
|
||||
API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0'
|
||||
API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
|
||||
API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ class Container(object):
|
||||
@property
|
||||
def has_api_logs(self):
|
||||
log_type = self.log_driver
|
||||
return not log_type or log_type != 'none'
|
||||
return not log_type or log_type in ('json-file', 'journald')
|
||||
|
||||
def attach_log_stream(self):
|
||||
"""A log stream can only be attached if the container uses a json-file
|
||||
|
||||
12
compose/errors.py
Normal file
12
compose/errors.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
class OperationFailedError(Exception):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
@@ -12,10 +12,15 @@ from .config import ConfigurationError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
OPTS_EXCEPTIONS = [
|
||||
'com.docker.network.driver.overlay.vxlanid_list',
|
||||
]
|
||||
|
||||
|
||||
class Network(object):
|
||||
def __init__(self, client, project, name, driver=None, driver_opts=None,
|
||||
ipam=None, external_name=None):
|
||||
ipam=None, external_name=None, internal=False, enable_ipv6=False,
|
||||
labels=None):
|
||||
self.client = client
|
||||
self.project = project
|
||||
self.name = name
|
||||
@@ -23,6 +28,9 @@ class Network(object):
|
||||
self.driver_opts = driver_opts
|
||||
self.ipam = create_ipam_config_from_dict(ipam)
|
||||
self.external_name = external_name
|
||||
self.internal = internal
|
||||
self.enable_ipv6 = enable_ipv6
|
||||
self.labels = labels
|
||||
|
||||
def ensure(self):
|
||||
if self.external_name:
|
||||
@@ -45,14 +53,7 @@ class Network(object):
|
||||
|
||||
try:
|
||||
data = self.inspect()
|
||||
if self.driver and data['Driver'] != self.driver:
|
||||
raise ConfigurationError(
|
||||
'Network "{}" needs to be recreated - driver has changed'
|
||||
.format(self.full_name))
|
||||
if data['Options'] != (self.driver_opts or {}):
|
||||
raise ConfigurationError(
|
||||
'Network "{}" needs to be recreated - options have changed'
|
||||
.format(self.full_name))
|
||||
check_remote_network_config(data, self)
|
||||
except NotFound:
|
||||
driver_name = 'the default driver'
|
||||
if self.driver:
|
||||
@@ -68,6 +69,9 @@ class Network(object):
|
||||
driver=self.driver,
|
||||
options=self.driver_opts,
|
||||
ipam=self.ipam,
|
||||
internal=self.internal,
|
||||
enable_ipv6=self.enable_ipv6,
|
||||
labels=self.labels,
|
||||
)
|
||||
|
||||
def remove(self):
|
||||
@@ -106,6 +110,24 @@ def create_ipam_config_from_dict(ipam_dict):
|
||||
)
|
||||
|
||||
|
||||
def check_remote_network_config(remote, local):
|
||||
if local.driver and remote.get('Driver') != local.driver:
|
||||
raise ConfigurationError(
|
||||
'Network "{}" needs to be recreated - driver has changed'
|
||||
.format(local.full_name)
|
||||
)
|
||||
local_opts = local.driver_opts or {}
|
||||
remote_opts = remote.get('Options') or {}
|
||||
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
|
||||
if k in OPTS_EXCEPTIONS:
|
||||
continue
|
||||
if remote_opts.get(k) != local_opts.get(k):
|
||||
raise ConfigurationError(
|
||||
'Network "{}" needs to be recreated - options have changed'
|
||||
.format(local.full_name)
|
||||
)
|
||||
|
||||
|
||||
def build_networks(name, config_data, client):
|
||||
network_config = config_data.networks or {}
|
||||
networks = {
|
||||
@@ -115,6 +137,9 @@ def build_networks(name, config_data, client):
|
||||
driver_opts=data.get('driver_opts'),
|
||||
ipam=data.get('ipam'),
|
||||
external_name=data.get('external_name'),
|
||||
internal=data.get('internal'),
|
||||
enable_ipv6=data.get('enable_ipv6'),
|
||||
labels=data.get('labels'),
|
||||
)
|
||||
for network_name, data in network_config.items()
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ from six.moves.queue import Empty
|
||||
from six.moves.queue import Queue
|
||||
|
||||
from compose.cli.signals import ShutdownException
|
||||
from compose.errors import OperationFailedError
|
||||
from compose.utils import get_output_stream
|
||||
|
||||
|
||||
@@ -47,6 +48,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
|
||||
elif isinstance(exception, APIError):
|
||||
errors[get_name(obj)] = exception.explanation
|
||||
writer.write(get_name(obj), 'error')
|
||||
elif isinstance(exception, OperationFailedError):
|
||||
errors[get_name(obj)] = exception.msg
|
||||
writer.write(get_name(obj), 'error')
|
||||
elif isinstance(exception, UpstreamError):
|
||||
writer.write(get_name(obj), 'error')
|
||||
else:
|
||||
@@ -59,7 +63,7 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
|
||||
if error_to_reraise:
|
||||
raise error_to_reraise
|
||||
|
||||
return results
|
||||
return results, errors
|
||||
|
||||
|
||||
def _no_deps(x):
|
||||
|
||||
@@ -91,3 +91,22 @@ def print_output_event(event, stream, is_terminal):
|
||||
stream.write("%s%s" % (event['stream'], terminator))
|
||||
else:
|
||||
stream.write("%s%s\n" % (status, terminator))
|
||||
|
||||
|
||||
def get_digest_from_pull(events):
|
||||
for event in events:
|
||||
status = event.get('status')
|
||||
if not status or 'Digest' not in status:
|
||||
continue
|
||||
|
||||
_, digest = status.split(':', 1)
|
||||
return digest.strip()
|
||||
return None
|
||||
|
||||
|
||||
def get_digest_from_push(events):
|
||||
for event in events:
|
||||
digest = event.get('aux', {}).get('Digest')
|
||||
if digest:
|
||||
return digest
|
||||
return None
|
||||
|
||||
@@ -342,7 +342,10 @@ class Project(object):
|
||||
filters={'label': self.labels()},
|
||||
decode=True
|
||||
):
|
||||
if event['status'] in IMAGE_EVENTS:
|
||||
# The first part of this condition is a guard against some events
|
||||
# broadcasted by swarm that don't have a status field.
|
||||
# See https://github.com/docker/compose/issues/3316
|
||||
if 'status' not in event or event['status'] in IMAGE_EVENTS:
|
||||
# We don't receive any image events because labels aren't applied
|
||||
# to images
|
||||
continue
|
||||
@@ -366,6 +369,8 @@ class Project(object):
|
||||
detached=False,
|
||||
remove_orphans=False):
|
||||
|
||||
warn_for_swarm_mode(self.client)
|
||||
|
||||
self.initialize()
|
||||
self.find_orphan_containers(remove_orphans)
|
||||
|
||||
@@ -387,13 +392,18 @@ class Project(object):
|
||||
def get_deps(service):
|
||||
return {self.get_service(dep) for dep in service.get_dependency_names()}
|
||||
|
||||
results = parallel.parallel_execute(
|
||||
results, errors = parallel.parallel_execute(
|
||||
services,
|
||||
do,
|
||||
operator.attrgetter('name'),
|
||||
None,
|
||||
get_deps
|
||||
)
|
||||
if errors:
|
||||
raise ProjectError(
|
||||
'Encountered errors while bringing up the project.'
|
||||
)
|
||||
|
||||
return [
|
||||
container
|
||||
for svc_containers in results
|
||||
@@ -432,6 +442,10 @@ class Project(object):
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.pull(ignore_pull_failures)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
for service in self.get_services(service_names, include_deps=False):
|
||||
service.push(ignore_push_failures)
|
||||
|
||||
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
|
||||
return list(filter(None, [
|
||||
Container.from_ps(self.client, container)
|
||||
@@ -521,6 +535,24 @@ def get_volumes_from(project, service_dict):
|
||||
return [build_volume_from(vf) for vf in volumes_from]
|
||||
|
||||
|
||||
def warn_for_swarm_mode(client):
|
||||
info = client.info()
|
||||
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
|
||||
if info.get('ServerVersion', '').startswith('ucp'):
|
||||
# UCP does multi-node scheduling with traditional Compose files.
|
||||
return
|
||||
|
||||
log.warn(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use the bundle feature of the Docker experimental build.\n\n"
|
||||
"More info:\n"
|
||||
"https://docs.docker.com/compose/bundles\n"
|
||||
)
|
||||
|
||||
|
||||
class NoSuchService(Exception):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
@@ -528,3 +560,8 @@ class NoSuchService(Exception):
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class ProjectError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
@@ -10,11 +10,13 @@ from operator import attrgetter
|
||||
import enum
|
||||
import six
|
||||
from docker.errors import APIError
|
||||
from docker.errors import NotFound
|
||||
from docker.utils import LogConfig
|
||||
from docker.utils.ports import build_port_bindings
|
||||
from docker.utils.ports import split_port
|
||||
|
||||
from . import __version__
|
||||
from . import progress_stream
|
||||
from .config import DOCKER_CONFIG_KEYS
|
||||
from .config import merge_environment
|
||||
from .config.types import VolumeSpec
|
||||
@@ -26,6 +28,7 @@ from .const import LABEL_PROJECT
|
||||
from .const import LABEL_SERVICE
|
||||
from .const import LABEL_VERSION
|
||||
from .container import Container
|
||||
from .errors import OperationFailedError
|
||||
from .parallel import parallel_execute
|
||||
from .parallel import parallel_start
|
||||
from .progress_stream import stream_output
|
||||
@@ -46,12 +49,15 @@ DOCKER_START_KEYS = [
|
||||
'dns_search',
|
||||
'env_file',
|
||||
'extra_hosts',
|
||||
'group_add',
|
||||
'ipc',
|
||||
'read_only',
|
||||
'log_driver',
|
||||
'log_opt',
|
||||
'mem_limit',
|
||||
'memswap_limit',
|
||||
'oom_score_adj',
|
||||
'mem_swappiness',
|
||||
'pid',
|
||||
'privileged',
|
||||
'restart',
|
||||
@@ -179,7 +185,7 @@ class Service(object):
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
|
||||
if self.specifies_host_port():
|
||||
if self.specifies_host_port() and desired_num > 1:
|
||||
log.warn('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
@@ -276,7 +282,11 @@ class Service(object):
|
||||
if 'name' in container_options and not quiet:
|
||||
log.info("Creating %s" % container_options['name'])
|
||||
|
||||
return Container.create(self.client, **container_options)
|
||||
try:
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError as ex:
|
||||
raise OperationFailedError("Cannot create container for service %s: %s" %
|
||||
(self.name, ex.explanation))
|
||||
|
||||
def ensure_image_exists(self, do_build=BuildAction.none):
|
||||
if self.can_be_built() and do_build == BuildAction.force:
|
||||
@@ -446,26 +456,31 @@ class Service(object):
|
||||
|
||||
def start_container(self, container):
|
||||
self.connect_container_to_networks(container)
|
||||
container.start()
|
||||
try:
|
||||
container.start()
|
||||
except APIError as ex:
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
|
||||
return container
|
||||
|
||||
def connect_container_to_networks(self, container):
|
||||
connected_networks = container.get('NetworkSettings.Networks')
|
||||
|
||||
for network, netdefs in self.networks.items():
|
||||
aliases = netdefs.get('aliases', [])
|
||||
ipv4_address = netdefs.get('ipv4_address', None)
|
||||
ipv6_address = netdefs.get('ipv6_address', None)
|
||||
if network in connected_networks:
|
||||
if short_id_alias_exists(container, network):
|
||||
continue
|
||||
|
||||
self.client.disconnect_container_from_network(
|
||||
container.id, network)
|
||||
container.id,
|
||||
network)
|
||||
|
||||
self.client.connect_container_to_network(
|
||||
container.id, network,
|
||||
aliases=list(self._get_aliases(container).union(aliases)),
|
||||
ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address,
|
||||
links=self._get_links(False)
|
||||
aliases=self._get_aliases(netdefs, container),
|
||||
ipv4_address=netdefs.get('ipv4_address', None),
|
||||
ipv6_address=netdefs.get('ipv6_address', None),
|
||||
links=self._get_links(False),
|
||||
link_local_ips=netdefs.get('link_local_ips', None),
|
||||
)
|
||||
|
||||
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
|
||||
@@ -533,11 +548,32 @@ class Service(object):
|
||||
numbers = [c.number for c in containers]
|
||||
return 1 if not numbers else max(numbers) + 1
|
||||
|
||||
def _get_aliases(self, container):
|
||||
if container.labels.get(LABEL_ONE_OFF) == "True":
|
||||
return set()
|
||||
def _get_aliases(self, network, container=None):
|
||||
if container and container.labels.get(LABEL_ONE_OFF) == "True":
|
||||
return []
|
||||
|
||||
return {self.name, container.short_id}
|
||||
return list(
|
||||
{self.name} |
|
||||
({container.short_id} if container else set()) |
|
||||
set(network.get('aliases', ()))
|
||||
)
|
||||
|
||||
def build_default_networking_config(self):
|
||||
if not self.networks:
|
||||
return {}
|
||||
|
||||
network = self.networks[self.network_mode.id]
|
||||
endpoint = {
|
||||
'Aliases': self._get_aliases(network),
|
||||
'IPAMConfig': {},
|
||||
}
|
||||
|
||||
if network.get('ipv4_address'):
|
||||
endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
|
||||
if network.get('ipv6_address'):
|
||||
endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
|
||||
|
||||
return {"EndpointsConfig": {self.network_mode.id: endpoint}}
|
||||
|
||||
def _get_links(self, link_to_self):
|
||||
links = {}
|
||||
@@ -633,6 +669,10 @@ class Service(object):
|
||||
override_options,
|
||||
one_off=one_off)
|
||||
|
||||
networking_config = self.build_default_networking_config()
|
||||
if networking_config:
|
||||
container_options['networking_config'] = networking_config
|
||||
|
||||
container_options['environment'] = format_environment(
|
||||
container_options['environment'])
|
||||
return container_options
|
||||
@@ -643,7 +683,7 @@ class Service(object):
|
||||
logging_dict = options.get('logging', None)
|
||||
log_config = get_log_config(logging_dict)
|
||||
|
||||
return self.client.create_host_config(
|
||||
host_config = self.client.create_host_config(
|
||||
links=self._get_links(link_to_self=one_off),
|
||||
port_bindings=build_port_bindings(options.get('ports') or []),
|
||||
binds=options.get('binds'),
|
||||
@@ -669,8 +709,17 @@ class Service(object):
|
||||
cpu_quota=options.get('cpu_quota'),
|
||||
shm_size=options.get('shm_size'),
|
||||
tmpfs=options.get('tmpfs'),
|
||||
oom_score_adj=options.get('oom_score_adj'),
|
||||
mem_swappiness=options.get('mem_swappiness'),
|
||||
group_add=options.get('group_add')
|
||||
)
|
||||
|
||||
# TODO: Add as an argument to create_host_config once it's supported
|
||||
# in docker-py
|
||||
host_config['Isolation'] = options.get('isolation')
|
||||
|
||||
return host_config
|
||||
|
||||
def build(self, no_cache=False, pull=False, force_rm=False):
|
||||
log.info('Building %s' % self.name)
|
||||
|
||||
@@ -781,20 +830,40 @@ class Service(object):
|
||||
repo, tag, separator = parse_repository_tag(self.options['image'])
|
||||
tag = tag or 'latest'
|
||||
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
output = self.client.pull(
|
||||
repo,
|
||||
tag=tag,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
try:
|
||||
stream_output(output, sys.stdout)
|
||||
except StreamOutputError as e:
|
||||
output = self.client.pull(repo, tag=tag, stream=True)
|
||||
return progress_stream.get_digest_from_pull(
|
||||
stream_output(output, sys.stdout))
|
||||
except (StreamOutputError, NotFound) as e:
|
||||
if not ignore_pull_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
def push(self, ignore_push_failures=False):
|
||||
if 'image' not in self.options or 'build' not in self.options:
|
||||
return
|
||||
|
||||
repo, tag, separator = parse_repository_tag(self.options['image'])
|
||||
tag = tag or 'latest'
|
||||
log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
|
||||
output = self.client.push(repo, tag=tag, stream=True)
|
||||
|
||||
try:
|
||||
return progress_stream.get_digest_from_push(
|
||||
stream_output(output, sys.stdout))
|
||||
except StreamOutputError as e:
|
||||
if not ignore_push_failures:
|
||||
raise
|
||||
else:
|
||||
log.error(six.text_type(e))
|
||||
|
||||
|
||||
def short_id_alias_exists(container, network):
|
||||
aliases = container.get(
|
||||
'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
|
||||
return container.short_id in aliases
|
||||
|
||||
|
||||
class NetworkMode(object):
|
||||
"""A `standard` network mode (ex: host, bridge)"""
|
||||
@@ -1048,6 +1117,8 @@ def format_environment(environment):
|
||||
def format_env(key, value):
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, six.binary_type):
|
||||
value = value.decode('utf-8')
|
||||
return '{key}={value}'.format(key=key, value=value)
|
||||
return [format_env(*item) for item in environment.items()]
|
||||
|
||||
|
||||
@@ -5,11 +5,16 @@ import codecs
|
||||
import hashlib
|
||||
import json
|
||||
import json.decoder
|
||||
import logging
|
||||
import ntpath
|
||||
|
||||
import six
|
||||
|
||||
from .errors import StreamParseError
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_output_stream(stream):
|
||||
@@ -60,13 +65,21 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
yield decoder(buffered)
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
'Compose tried decoding the following data chunk, but failed:'
|
||||
'\n%s' % repr(buffered)
|
||||
)
|
||||
raise StreamParseError(e)
|
||||
|
||||
|
||||
def json_splitter(buffer):
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
|
||||
@@ -95,4 +108,12 @@ def microseconds_from_time_nano(time_nano):
|
||||
|
||||
|
||||
def build_string_dict(source_dict):
|
||||
return dict((k, str(v)) for k, v in source_dict.items())
|
||||
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
|
||||
|
||||
|
||||
def splitdrive(path):
|
||||
if len(path) == 0:
|
||||
return ('', '')
|
||||
if path[0] in ['.', '\\', '/', '~']:
|
||||
return ('', path)
|
||||
return ntpath.splitdrive(path)
|
||||
|
||||
@@ -12,17 +12,18 @@ log = logging.getLogger(__name__)
|
||||
|
||||
class Volume(object):
|
||||
def __init__(self, client, project, name, driver=None, driver_opts=None,
|
||||
external_name=None):
|
||||
external_name=None, labels=None):
|
||||
self.client = client
|
||||
self.project = project
|
||||
self.name = name
|
||||
self.driver = driver
|
||||
self.driver_opts = driver_opts
|
||||
self.external_name = external_name
|
||||
self.labels = labels
|
||||
|
||||
def create(self):
|
||||
return self.client.create_volume(
|
||||
self.full_name, self.driver, self.driver_opts
|
||||
self.full_name, self.driver, self.driver_opts, labels=self.labels
|
||||
)
|
||||
|
||||
def remove(self):
|
||||
@@ -68,7 +69,8 @@ class ProjectVolumes(object):
|
||||
name=vol_name,
|
||||
driver=data.get('driver'),
|
||||
driver_opts=data.get('driver_opts'),
|
||||
external_name=data.get('external_name')
|
||||
external_name=data.get('external_name'),
|
||||
labels=data.get('labels')
|
||||
)
|
||||
for vol_name, data in config_volumes.items()
|
||||
}
|
||||
|
||||
@@ -109,6 +109,18 @@ _docker_compose_build() {
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_bundle() {
|
||||
case "$prev" in
|
||||
--output|-o)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--push-images --help --output -o" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_config() {
|
||||
COMPREPLY=( $( compgen -W "--help --quiet -q --services" -- "$cur" ) )
|
||||
}
|
||||
@@ -304,6 +316,18 @@ _docker_compose_pull() {
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_push() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_services_all
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_restart() {
|
||||
case "$prev" in
|
||||
--timeout|-t)
|
||||
@@ -325,7 +349,7 @@ _docker_compose_restart() {
|
||||
_docker_compose_rm() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--all -a --force -f --help -v" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_services_stopped
|
||||
@@ -455,6 +479,7 @@ _docker_compose() {
|
||||
|
||||
local commands=(
|
||||
build
|
||||
bundle
|
||||
config
|
||||
create
|
||||
down
|
||||
@@ -467,6 +492,7 @@ _docker_compose() {
|
||||
port
|
||||
ps
|
||||
pull
|
||||
push
|
||||
restart
|
||||
rm
|
||||
run
|
||||
|
||||
@@ -19,52 +19,49 @@
|
||||
# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# For compatibility reasons, Compose and therefore its completion supports several
|
||||
# stack compositon files as listed here, in descending priority.
|
||||
# Support for these filenames might be dropped in some future version.
|
||||
__docker-compose_compose_file() {
|
||||
local file
|
||||
for file in docker-compose.y{,a}ml ; do
|
||||
[ -e $file ] && {
|
||||
echo $file
|
||||
return
|
||||
}
|
||||
done
|
||||
echo docker-compose.yml
|
||||
__docker-compose_q() {
|
||||
docker-compose 2>/dev/null $compose_options "$@"
|
||||
}
|
||||
|
||||
# Extracts all service names from docker-compose.yml.
|
||||
___docker-compose_all_services_in_compose_file() {
|
||||
# All services defined in docker-compose.yml
|
||||
__docker-compose_all_services_in_compose_file() {
|
||||
local already_selected
|
||||
local -a services
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected"
|
||||
__docker-compose_q config --services \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
# All services, even those without an existing container
|
||||
__docker-compose_services_all() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
services=$(___docker-compose_all_services_in_compose_file)
|
||||
services=$(__docker-compose_all_services_in_compose_file)
|
||||
_alternative "args:services:($services)" && ret=0
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
# All services that have an entry with the given key in their docker-compose.yml section
|
||||
___docker-compose_services_with_key() {
|
||||
__docker-compose_services_with_key() {
|
||||
local already_selected
|
||||
local -a buildable
|
||||
already_selected=$(echo $words | tr " " "|")
|
||||
# flatten sections to one line, then filter lines containing the key and return section name.
|
||||
awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected"
|
||||
__docker-compose_q config \
|
||||
| sed -n -e '/^services:/,/^[^ ]/p' \
|
||||
| sed -n 's/^ //p' \
|
||||
| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
|
||||
| grep " \+$1:" \
|
||||
| cut -d: -f1 \
|
||||
| grep -Ev "^(${already_selected})$"
|
||||
}
|
||||
|
||||
# All services that are defined by a Dockerfile reference
|
||||
__docker-compose_services_from_build() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
buildable=$(___docker-compose_services_with_key build)
|
||||
buildable=$(__docker-compose_services_with_key build)
|
||||
_alternative "args:buildable services:($buildable)" && ret=0
|
||||
|
||||
return ret
|
||||
@@ -74,7 +71,7 @@ __docker-compose_services_from_build() {
|
||||
__docker-compose_services_from_image() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
pullable=$(___docker-compose_services_with_key image)
|
||||
pullable=$(__docker-compose_services_with_key image)
|
||||
_alternative "args:pullable services:($pullable)" && ret=0
|
||||
|
||||
return ret
|
||||
@@ -96,7 +93,7 @@ __docker-compose_get_services() {
|
||||
shift
|
||||
[[ $kind =~ (stopped|all) ]] && args=($args -a)
|
||||
|
||||
lines=(${(f)"$(_call_program commands docker ps $args)"})
|
||||
lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"})
|
||||
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
|
||||
|
||||
# Parse header line to find columns
|
||||
@@ -185,7 +182,17 @@ __docker-compose_commands() {
|
||||
}
|
||||
|
||||
__docker-compose_subcommand() {
|
||||
local opts_help='(: -)--help[Print usage]'
|
||||
local opts_help opts_force_recreate opts_no_recreate opts_no_build opts_remove_orphans opts_timeout opts_no_color opts_no_deps
|
||||
|
||||
opts_help='(: -)--help[Print usage]'
|
||||
opts_force_recreate="(--no-recreate)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]"
|
||||
opts_no_recreate="(--force-recreate)--no-recreate[If containers already exist, don't recreate them. Incompatible with --force-recreate.]"
|
||||
opts_no_build="(--build)--no-build[Don't build an image, even if it's missing.]"
|
||||
opts_remove_orphans="--remove-orphans[Remove containers for services not defined in the Compose file]"
|
||||
opts_timeout=('(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: ")
|
||||
opts_no_color='--no-color[Produce monochrome output.]'
|
||||
opts_no_deps="--no-deps[Don't start linked services.]"
|
||||
|
||||
integer ret=1
|
||||
|
||||
case "$words[1]" in
|
||||
@@ -193,10 +200,16 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--force-rm[Always remove intermediate containers.]' \
|
||||
'--no-cache[Do not use cache when building the image]' \
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--push-images[Automatically push images for any services which have a `build` option specified.]' \
|
||||
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
|
||||
;;
|
||||
(config)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
@@ -206,21 +219,23 @@ __docker-compose_subcommand() {
|
||||
(create)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
"(--no-recreate --no-build)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \
|
||||
"(--force-recreate)--no-build[If containers already exist, don't recreate them. Incompatible with --force-recreate.]" \
|
||||
"(--force-recreate)--no-recreate[Don't build an image, even if it's missing]" \
|
||||
$opts_force_recreate \
|
||||
$opts_no_recreate \
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before creating containers.]" \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(down)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
"--rmi[Remove images, type may be one of: 'all' to remove all images, or 'local' to remove only images that don't have an custom name set by the 'image' field]:type:(all local)" \
|
||||
'(-v --volumes)'{-v,--volumes}"[Remove data volumes]" && ret=0
|
||||
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
|
||||
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
|
||||
$opts_remove_orphans && ret=0
|
||||
;;
|
||||
(events)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--json[Output events as a stream of json objects.]' \
|
||||
'--json[Output events as a stream of json objects]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(exec)
|
||||
@@ -230,7 +245,7 @@ __docker-compose_subcommand() {
|
||||
'--privileged[Give extended privileges to the process.]' \
|
||||
'--user=[Run the command as this user.]:username:_users' \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
|
||||
'--index=[Index of the container if there are multiple instances of a service (default: 1)]:index: ' \
|
||||
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||
'(-):running services:__docker-compose_runningservices' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -248,7 +263,7 @@ __docker-compose_subcommand() {
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-f --follow)'{-f,--follow}'[Follow log output]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
$opts_no_color \
|
||||
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
|
||||
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
@@ -261,8 +276,8 @@ __docker-compose_subcommand() {
|
||||
(port)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \
|
||||
'--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \
|
||||
'--protocol=[tcp or udp \[default: tcp\]]:protocol:(tcp udp)' \
|
||||
'--index=[index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
|
||||
'1:running services:__docker-compose_runningservices' \
|
||||
'2:port:_ports' && ret=0
|
||||
;;
|
||||
@@ -278,12 +293,17 @@ __docker-compose_subcommand() {
|
||||
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
|
||||
'*:services:__docker-compose_services_from_image' && ret=0
|
||||
;;
|
||||
(push)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--ignore-push-failures[Push what it can and ignores images with push failures.]' \
|
||||
'*:services:__docker-compose_services' && ret=0
|
||||
;;
|
||||
(rm)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-a --all)'{-a,--all}"[Also remove one-off containers]" \
|
||||
'(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
|
||||
'-v[Remove volumes associated with containers]' \
|
||||
'-v[Remove any anonymous volumes attached to containers]' \
|
||||
'*:stopped services:__docker-compose_stoppedservices' && ret=0
|
||||
;;
|
||||
(run)
|
||||
@@ -292,14 +312,14 @@ __docker-compose_subcommand() {
|
||||
'-d[Detached mode: Run container in the background, print new container name.]' \
|
||||
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
||||
'--name[Assign a name to the container]:name: ' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
'(-p --publish)'{-p,--publish=-}"[Run command with manually mapped container's port(s) to the host.]" \
|
||||
'--name=[Assign a name to the container]:name: ' \
|
||||
$opts_no_deps \
|
||||
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
|
||||
'--rm[Remove container after run. Ignored in detached mode.]' \
|
||||
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
|
||||
'(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \
|
||||
'(-w --workdir)'{-w=,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
|
||||
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
|
||||
'(-):services:__docker-compose_services' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
@@ -307,7 +327,7 @@ __docker-compose_subcommand() {
|
||||
(scale)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
$opts_timeout \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(start)
|
||||
@@ -318,7 +338,7 @@ __docker-compose_subcommand() {
|
||||
(stop|restart)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
$opts_timeout \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(unpause)
|
||||
@@ -329,15 +349,16 @@ __docker-compose_subcommand() {
|
||||
(up)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names.]' \
|
||||
'--build[Build images before starting containers.]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
"--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" \
|
||||
"--no-recreate[If containers already exist, don't recreate them.]" \
|
||||
"--no-build[Don't build an image, even if it's missing]" \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
|
||||
$opts_no_color \
|
||||
$opts_no_deps \
|
||||
$opts_force_recreate \
|
||||
$opts_no_recreate \
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before starting containers.]" \
|
||||
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
||||
$opts_remove_orphans \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(version)
|
||||
@@ -367,16 +388,57 @@ _docker-compose() {
|
||||
|
||||
_arguments -C \
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'--verbose[Show more output]' \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
'--verbose[Show more output]' \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
|
||||
'--tls[Use TLS; implied by --tlsverify]' \
|
||||
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
|
||||
'--tlscert=[Path to TLS certificate file]:client cert path:' \
|
||||
'--tlskey=[Path to TLS key file]:tls key path:' \
|
||||
'--tlsverify[Use TLS and verify the remote]' \
|
||||
"--skip-hostname-check[Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)]" \
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local compose_file=${opt_args[-f]}${opt_args[--file]}
|
||||
local compose_project=${opt_args[-p]}${opt_args[--project-name]}
|
||||
local compose_options="${compose_file:+--file $compose_file} ${compose_project:+--project-name $compose_project}"
|
||||
local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--file" "-f"
|
||||
"--host" "-H"
|
||||
"--project-name" "-p"
|
||||
"--tls"
|
||||
"--tlscacert"
|
||||
"--tlscert"
|
||||
"--tlskey"
|
||||
"--tlsverify"
|
||||
"--skip-hostname-check"
|
||||
)
|
||||
|
||||
relevant_docker_flags=(
|
||||
"--host" "-H"
|
||||
"--tls"
|
||||
"--tlscacert"
|
||||
"--tlscert"
|
||||
"--tlskey"
|
||||
"--tlsverify"
|
||||
)
|
||||
|
||||
for k in "${(@k)opt_args}"; do
|
||||
if [[ -n "${relevant_docker_flags[(r)$k]}" ]]; then
|
||||
docker_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
docker_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
|
||||
compose_options+=$k
|
||||
if [[ -n "$opt_args[$k]" ]]; then
|
||||
compose_options+=$opt_args[$k]
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
case $state in
|
||||
(command)
|
||||
|
||||
@@ -27,6 +27,11 @@ exe = EXE(pyz,
|
||||
'compose/config/config_schema_v2.0.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.1.json',
|
||||
'compose/config/config_schema_v2.1.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/GITSHA',
|
||||
'compose/GITSHA',
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
FROM docs/base:latest
|
||||
MAINTAINER Mary Anthony <mary@docker.com> (@moxiegirl)
|
||||
|
||||
RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine
|
||||
RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm
|
||||
RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine
|
||||
RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry
|
||||
RUN svn checkout https://github.com/docker/notary/trunk/docs /docs/content/notary
|
||||
RUN svn checkout https://github.com/docker/kitematic/trunk/docs /docs/content/kitematic
|
||||
RUN svn checkout https://github.com/docker/toolbox/trunk/docs /docs/content/toolbox
|
||||
RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/project
|
||||
|
||||
|
||||
ENV PROJECT=compose
|
||||
# To get the git info for this repo
|
||||
COPY . /src
|
||||
|
||||
COPY . /docs/content/$PROJECT/
|
||||
@@ -1,55 +0,0 @@
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
|
||||
# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
DOCKER_ENVS := \
|
||||
-e BUILDFLAGS \
|
||||
-e DOCKER_CLIENTONLY \
|
||||
-e DOCKER_EXECDRIVER \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TIMEOUT
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
|
||||
# to allow `make DOCSDIR=1 docs-shell` (to create a bind mount in docs)
|
||||
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR):/docs/content/compose)
|
||||
|
||||
# to allow `make DOCSPORT=9000 docs`
|
||||
DOCSPORT := 8000
|
||||
|
||||
# Get the IP ADDRESS
|
||||
DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''")
|
||||
HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)")
|
||||
HUGO_BIND_IP=0.0.0.0
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
|
||||
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
|
||||
|
||||
# for some docs workarounds (see below in "docs-build" target)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
||||
|
||||
default: docs
|
||||
|
||||
docs: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) --watch
|
||||
|
||||
docs-draft: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
||||
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
|
||||
|
||||
docs-build:
|
||||
# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files
|
||||
# echo "$(GIT_BRANCH)" > GIT_BRANCH
|
||||
# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET
|
||||
# echo "$(GITCOMMIT)" > GITCOMMIT
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" .
|
||||
@@ -1,86 +1,16 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
draft = true
|
||||
title = "Compose README"
|
||||
description = "Compose README"
|
||||
keywords = ["Docker, documentation, manual, guide, reference, api"]
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
# The docs have been moved!
|
||||
|
||||
# Contributing to the Docker Compose documentation
|
||||
The documentation for Compose has been merged into
|
||||
[the general documentation repo](https://github.com/docker/docker.github.io).
|
||||
|
||||
The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site.
|
||||
The docs for Compose are now here:
|
||||
https://github.com/docker/docker.github.io/tree/master/compose
|
||||
|
||||
You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files.
|
||||
Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
|
||||
|
||||
If you want to add a new file or change the location of the document in the menu, you do need to know a little more.
|
||||
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
|
||||
|
||||
## Documentation contributing workflow
|
||||
PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
|
||||
|
||||
1. Edit a Markdown file in the tree.
|
||||
|
||||
2. Save your changes.
|
||||
|
||||
3. Make sure you are in the `docs` subdirectory.
|
||||
|
||||
4. Build the documentation.
|
||||
|
||||
$ make docs
|
||||
---> ffcf3f6c4e97
|
||||
Removing intermediate container a676414185e8
|
||||
Successfully built ffcf3f6c4e97
|
||||
docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0
|
||||
ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead.
|
||||
0 of 4 drafts rendered
|
||||
0 future content
|
||||
12 pages created
|
||||
0 paginator pages created
|
||||
0 tags created
|
||||
0 categories created
|
||||
in 55 ms
|
||||
Serving pages from /docs/public
|
||||
Web Server is available at http://0.0.0.0:8000/
|
||||
Press Ctrl+C to stop
|
||||
|
||||
5. Open the available server in your browser.
|
||||
|
||||
The documentation server has the complete menu but only the Docker Compose
|
||||
documentation resolves. You can't access the other project docs from this
|
||||
localized build.
|
||||
|
||||
## Tips on Hugo metadata and menu positioning
|
||||
|
||||
The top of each Docker Compose documentation file contains TOML metadata. The metadata is commented out to prevent it from appearing in GitHub.
|
||||
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Extending services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
The metadata alone has this structure:
|
||||
|
||||
+++
|
||||
title = "Extending services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=2
|
||||
+++
|
||||
|
||||
The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Compose *to the menu with the* `smn_workdw_compose` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title.
|
||||
|
||||
You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up.
|
||||
|
||||
|
||||
## Other key documentation repositories
|
||||
|
||||
The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Compose documentation.
|
||||
|
||||
The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site.
|
||||
As always, the docs remain open-source and we appreciate your feedback and
|
||||
pull requests!
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Command-line Completion"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=88
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Command-line Completion
|
||||
|
||||
Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion)
|
||||
for the bash and zsh shell.
|
||||
|
||||
## Installing Command Completion
|
||||
|
||||
### Bash
|
||||
|
||||
Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available.
|
||||
On a Mac, install with `brew install bash-completion`
|
||||
|
||||
Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
|
||||
|
||||
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
|
||||
|
||||
Completion will be available upon next login.
|
||||
|
||||
### Zsh
|
||||
|
||||
Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`
|
||||
|
||||
mkdir -p ~/.zsh/completion
|
||||
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose
|
||||
|
||||
Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`
|
||||
|
||||
fpath=(~/.zsh/completion $fpath)
|
||||
|
||||
Make sure `compinit` is loaded or do it by adding in `~/.zshrc`
|
||||
|
||||
autoload -Uz compinit && compinit -i
|
||||
|
||||
Then reload your shell
|
||||
|
||||
exec $SHELL -l
|
||||
|
||||
## Available completions
|
||||
|
||||
Depending on what you typed on the command line so far, it will complete
|
||||
|
||||
- available docker-compose commands
|
||||
- options that are available for a particular command
|
||||
- service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended.
|
||||
- arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1.
|
||||
|
||||
Enjoy working with Compose faster and with less typos!
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
1132
docs/compose-file.md
1132
docs/compose-file.md
File diff suppressed because it is too large
Load Diff
194
docs/django.md
194
docs/django.md
@@ -1,194 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart: Compose and Django"
|
||||
description = "Getting started with Docker Compose and Django"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=4
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Quickstart: Docker Compose and Django
|
||||
|
||||
This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have
|
||||
[Compose installed](install.md).
|
||||
|
||||
## Define the project components
|
||||
|
||||
For this project, you need to create a Dockerfile, a Python dependencies file,
|
||||
and a `docker-compose.yml` file.
|
||||
|
||||
1. Create an empty project directory.
|
||||
|
||||
You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
|
||||
|
||||
2. Create a new file called `Dockerfile` in your project directory.
|
||||
|
||||
The Dockerfile defines an application's image content via one or more build
|
||||
commands that configure that image. Once built, you can run the image in a
|
||||
container. For more information on `Dockerfiles`, see the [Docker user
|
||||
guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile)
|
||||
and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
|
||||
3. Add the following content to the `Dockerfile`.
|
||||
|
||||
FROM python:2.7
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
RUN mkdir /code
|
||||
WORKDIR /code
|
||||
ADD requirements.txt /code/
|
||||
RUN pip install -r requirements.txt
|
||||
ADD . /code/
|
||||
|
||||
This `Dockerfile` starts with a Python 2.7 base image. The base image is
|
||||
modified by adding a new `code` directory. The base image is further modified
|
||||
by installing the Python requirements defined in the `requirements.txt` file.
|
||||
|
||||
4. Save and close the `Dockerfile`.
|
||||
|
||||
5. Create a `requirements.txt` in your project directory.
|
||||
|
||||
This file is used by the `RUN pip install -r requirements.txt` command in your `Dockerfile`.
|
||||
|
||||
6. Add the required software in the file.
|
||||
|
||||
Django
|
||||
psycopg2
|
||||
|
||||
7. Save and close the `requirements.txt` file.
|
||||
|
||||
8. Create a file called `docker-compose.yml` in your project directory.
|
||||
|
||||
The `docker-compose.yml` file describes the services that make your app. In
|
||||
this example those services are a web server and database. The compose file
|
||||
also describes which Docker images these services use, how they link
|
||||
together, any volumes they might need mounted inside the containers.
|
||||
Finally, the `docker-compose.yml` file describes which ports these services
|
||||
expose. See the [`docker-compose.yml` reference](compose-file.md) for more
|
||||
information on how this file works.
|
||||
|
||||
9. Add the following configuration to the file.
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
db:
|
||||
image: postgres
|
||||
web:
|
||||
build: .
|
||||
command: python manage.py runserver 0.0.0.0:8000
|
||||
volumes:
|
||||
- .:/code
|
||||
ports:
|
||||
- "8000:8000"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
This file defines two services: The `db` service and the `web` service.
|
||||
|
||||
10. Save and close the `docker-compose.yml` file.
|
||||
|
||||
## Create a Django project
|
||||
|
||||
In this step, you create a Django started project by building the image from the build context defined in the previous procedure.
|
||||
|
||||
1. Change to the root of your project directory.
|
||||
|
||||
2. Create the Django project using the `docker-compose` command.
|
||||
|
||||
$ docker-compose run web django-admin.py startproject composeexample .
|
||||
|
||||
This instructs Compose to run `django-admin.py startproject composeeexample`
|
||||
in a container, using the `web` service's image and configuration. Because
|
||||
the `web` image doesn't exist yet, Compose builds it from the current
|
||||
directory, as specified by the `build: .` line in `docker-compose.yml`.
|
||||
|
||||
Once the `web` service image is built, Compose runs it and executes the
|
||||
`django-admin.py startproject` command in the container. This command
|
||||
instructs Django to create a set of files and directories representing a
|
||||
Django project.
|
||||
|
||||
3. After the `docker-compose` command completes, list the contents of your project.
|
||||
|
||||
$ ls -l
|
||||
drwxr-xr-x 2 root root composeexample
|
||||
-rw-rw-r-- 1 user user docker-compose.yml
|
||||
-rw-rw-r-- 1 user user Dockerfile
|
||||
-rwxr-xr-x 1 root root manage.py
|
||||
-rw-rw-r-- 1 user user requirements.txt
|
||||
|
||||
If you are running Docker on Linux, the files `django-admin` created are owned
|
||||
by root. This happens because the container runs as the root user. Change the
|
||||
ownership of the the new files.
|
||||
|
||||
sudo chown -R $USER:$USER .
|
||||
|
||||
If you are running Docker on Mac or Windows, you should already have ownership
|
||||
of all files, including those generated by `django-admin`. List the files just
|
||||
verify this.
|
||||
|
||||
$ ls -l
|
||||
total 32
|
||||
-rw-r--r-- 1 user staff 145 Feb 13 23:00 Dockerfile
|
||||
drwxr-xr-x 6 user staff 204 Feb 13 23:07 composeexample
|
||||
-rw-r--r-- 1 user staff 159 Feb 13 23:02 docker-compose.yml
|
||||
-rwxr-xr-x 1 user staff 257 Feb 13 23:07 manage.py
|
||||
-rw-r--r-- 1 user staff 16 Feb 13 23:01 requirements.txt
|
||||
|
||||
|
||||
## Connect the database
|
||||
|
||||
In this section, you set up the database connection for Django.
|
||||
|
||||
1. In your project directory, edit the `composeexample/settings.py` file.
|
||||
|
||||
2. Replace the `DATABASES = ...` with the following:
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.postgresql_psycopg2',
|
||||
'NAME': 'postgres',
|
||||
'USER': 'postgres',
|
||||
'HOST': 'db',
|
||||
'PORT': 5432,
|
||||
}
|
||||
}
|
||||
|
||||
These settings are determined by the
|
||||
[postgres](https://hub.docker.com/_/postgres/) Docker image
|
||||
specified in `docker-compose.yml`.
|
||||
|
||||
3. Save and close the file.
|
||||
|
||||
4. Run the `docker-compose up` command.
|
||||
|
||||
$ docker-compose up
|
||||
Starting composepractice_db_1...
|
||||
Starting composepractice_web_1...
|
||||
Attaching to composepractice_db_1, composepractice_web_1
|
||||
...
|
||||
db_1 | PostgreSQL init process complete; ready for start up.
|
||||
...
|
||||
db_1 | LOG: database system is ready to accept connections
|
||||
db_1 | LOG: autovacuum launcher started
|
||||
..
|
||||
web_1 | Django version 1.8.4, using settings 'composeexample.settings'
|
||||
web_1 | Starting development server at http://0.0.0.0:8000/
|
||||
web_1 | Quit the server with CONTROL-C.
|
||||
|
||||
At this point, your Django app should be running at port `8000` on your
|
||||
Docker host. If you are using a Docker Machine VM, you can use the
|
||||
`docker-machine ip MACHINE_NAME` to get the IP address.
|
||||
|
||||

|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,43 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Environment file"
|
||||
description = "Declaring default environment variables in file"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, environment, env file"]
|
||||
[menu.main]
|
||||
parent = "workw_compose"
|
||||
weight=10
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Environment file
|
||||
|
||||
Compose supports declaring default environment variables in an environment
|
||||
file named `.env` and placed in the same folder as your
|
||||
[compose file](compose-file.md).
|
||||
|
||||
Compose expects each line in an env file to be in `VAR=VAL` format. Lines
|
||||
beginning with `#` (i.e. comments) are ignored, as are blank lines.
|
||||
|
||||
> Note: Values present in the environment at runtime will always override
|
||||
> those defined inside the `.env` file. Similarly, values passed via
|
||||
> command-line arguments take precedence as well.
|
||||
|
||||
Those environment variables will be used for
|
||||
[variable substitution](compose-file.md#variable-substitution) in your Compose
|
||||
file, but can also be used to define the following
|
||||
[CLI variables](reference/envvars.md):
|
||||
|
||||
- `COMPOSE_API_VERSION`
|
||||
- `COMPOSE_FILE`
|
||||
- `COMPOSE_HTTP_TIMEOUT`
|
||||
- `COMPOSE_PROJECT_NAME`
|
||||
- `DOCKER_CERT_PATH`
|
||||
- `DOCKER_HOST`
|
||||
- `DOCKER_TLS_VERIFY`
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
354
docs/extends.md
354
docs/extends.md
@@ -1,354 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Extending Services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=20
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Extending services and Compose files
|
||||
|
||||
Compose supports two methods of sharing common configuration:
|
||||
|
||||
1. Extending an entire Compose file by
|
||||
[using multiple Compose files](#multiple-compose-files)
|
||||
2. Extending individual services with [the `extends` field](#extending-services)
|
||||
|
||||
|
||||
## Multiple Compose files
|
||||
|
||||
Using multiple Compose files enables you to customize a Compose application
|
||||
for different environments or different workflows.
|
||||
|
||||
### Understanding multiple Compose files
|
||||
|
||||
By default, Compose reads two files, a `docker-compose.yml` and an optional
|
||||
`docker-compose.override.yml` file. By convention, the `docker-compose.yml`
|
||||
contains your base configuration. The override file, as its name implies, can
|
||||
contain configuration overrides for existing services or entirely new
|
||||
services.
|
||||
|
||||
If a service is defined in both files Compose merges the configurations using
|
||||
the rules described in [Adding and overriding
|
||||
configuration](#adding-and-overriding-configuration).
|
||||
|
||||
To use multiple override files, or an override file with a different name, you
|
||||
can use the `-f` option to specify the list of files. Compose merges files in
|
||||
the order they're specified on the command line. See the [`docker-compose`
|
||||
command reference](./reference/overview.md) for more information about
|
||||
using `-f`.
|
||||
|
||||
When you use multiple configuration files, you must make sure all paths in the
|
||||
files are relative to the base Compose file (the first Compose file specified
|
||||
with `-f`). This is required because override files need not be valid
|
||||
Compose files. Override files can contain small fragments of configuration.
|
||||
Tracking which fragment of a service is relative to which path is difficult and
|
||||
confusing, so to keep paths easier to understand, all paths must be defined
|
||||
relative to the base file.
|
||||
|
||||
### Example use case
|
||||
|
||||
In this section are two common use cases for multiple compose files: changing a
|
||||
Compose app for different environments, and running administrative tasks
|
||||
against a Compose app.
|
||||
|
||||
#### Different environments
|
||||
|
||||
A common use case for multiple files is changing a development Compose app
|
||||
for a production-like environment (which may be production, staging or CI).
|
||||
To support these differences, you can split your Compose configuration into
|
||||
a few different files:
|
||||
|
||||
Start with a base file that defines the canonical configuration for the
|
||||
services.
|
||||
|
||||
**docker-compose.yml**
|
||||
|
||||
web:
|
||||
image: example/my_web_app:latest
|
||||
links:
|
||||
- db
|
||||
- cache
|
||||
|
||||
db:
|
||||
image: postgres:latest
|
||||
|
||||
cache:
|
||||
image: redis:latest
|
||||
|
||||
In this example the development configuration exposes some ports to the
|
||||
host, mounts our code as a volume, and builds the web image.
|
||||
|
||||
**docker-compose.override.yml**
|
||||
|
||||
|
||||
web:
|
||||
build: .
|
||||
volumes:
|
||||
- '.:/code'
|
||||
ports:
|
||||
- 8883:80
|
||||
environment:
|
||||
DEBUG: 'true'
|
||||
|
||||
db:
|
||||
command: '-d'
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
cache:
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
When you run `docker-compose up` it reads the overrides automatically.
|
||||
|
||||
Now, it would be nice to use this Compose app in a production environment. So,
|
||||
create another override file (which might be stored in a different git
|
||||
repo or managed by a different team).
|
||||
|
||||
**docker-compose.prod.yml**
|
||||
|
||||
web:
|
||||
ports:
|
||||
- 80:80
|
||||
environment:
|
||||
PRODUCTION: 'true'
|
||||
|
||||
cache:
|
||||
environment:
|
||||
TTL: '500'
|
||||
|
||||
To deploy with this production Compose file you can run
|
||||
|
||||
docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
|
||||
|
||||
This deploys all three services using the configuration in
|
||||
`docker-compose.yml` and `docker-compose.prod.yml` (but not the
|
||||
dev configuration in `docker-compose.override.yml`).
|
||||
|
||||
|
||||
See [production](production.md) for more information about Compose in
|
||||
production.
|
||||
|
||||
#### Administrative tasks
|
||||
|
||||
Another common use case is running adhoc or administrative tasks against one
|
||||
or more services in a Compose app. This example demonstrates running a
|
||||
database backup.
|
||||
|
||||
Start with a **docker-compose.yml**.
|
||||
|
||||
web:
|
||||
image: example/my_web_app:latest
|
||||
links:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres:latest
|
||||
|
||||
In a **docker-compose.admin.yml** add a new service to run the database
|
||||
export or backup.
|
||||
|
||||
dbadmin:
|
||||
build: database_admin/
|
||||
links:
|
||||
- db
|
||||
|
||||
To start a normal environment run `docker-compose up -d`. To run a database
|
||||
backup, include the `docker-compose.admin.yml` as well.
|
||||
|
||||
docker-compose -f docker-compose.yml -f docker-compose.admin.yml \
|
||||
run dbadmin db-backup
|
||||
|
||||
|
||||
## Extending services
|
||||
|
||||
Docker Compose's `extends` keyword enables sharing of common configurations
|
||||
among different files, or even different projects entirely. Extending services
|
||||
is useful if you have several services that reuse a common set of configuration
|
||||
options. Using `extends` you can define a common set of service options in one
|
||||
place and refer to it from anywhere.
|
||||
|
||||
> **Note:** `links`, `volumes_from`, and `depends_on` are never shared between
|
||||
> services using >`extends`. These exceptions exist to avoid
|
||||
> implicit dependencies—you always define `links` and `volumes_from`
|
||||
> locally. This ensures dependencies between services are clearly visible when
|
||||
> reading the current file. Defining these locally also ensures changes to the
|
||||
> referenced file don't result in breakage.
|
||||
|
||||
### Understand the extends configuration
|
||||
|
||||
When defining any service in `docker-compose.yml`, you can declare that you are
|
||||
extending another service like this:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
|
||||
This instructs Compose to re-use the configuration for the `webapp` service
|
||||
defined in the `common-services.yml` file. Suppose that `common-services.yml`
|
||||
looks like this:
|
||||
|
||||
webapp:
|
||||
build: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- "/data"
|
||||
|
||||
In this case, you'll get exactly the same result as if you wrote
|
||||
`docker-compose.yml` with the same `build`, `ports` and `volumes` configuration
|
||||
values defined directly under `web`.
|
||||
|
||||
You can go further and define (or re-define) configuration locally in
|
||||
`docker-compose.yml`:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
environment:
|
||||
- DEBUG=1
|
||||
cpu_shares: 5
|
||||
|
||||
important_web:
|
||||
extends: web
|
||||
cpu_shares: 10
|
||||
|
||||
You can also write other services and link your `web` service to them:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
environment:
|
||||
- DEBUG=1
|
||||
cpu_shares: 5
|
||||
links:
|
||||
- db
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
### Example use case
|
||||
|
||||
Extending an individual service is useful when you have multiple services that
|
||||
have a common configuration. The example below is a Compose app with
|
||||
two services: a web application and a queue worker. Both services use the same
|
||||
codebase and share many configuration options.
|
||||
|
||||
In a **common.yml** we define the common configuration:
|
||||
|
||||
app:
|
||||
build: .
|
||||
environment:
|
||||
CONFIG_FILE_PATH: /code/config
|
||||
API_KEY: xxxyyy
|
||||
cpu_shares: 5
|
||||
|
||||
In a **docker-compose.yml** we define the concrete services which use the
|
||||
common configuration:
|
||||
|
||||
webapp:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: app
|
||||
command: /code/run_web_app
|
||||
ports:
|
||||
- 8080:8080
|
||||
links:
|
||||
- queue
|
||||
- db
|
||||
|
||||
queue_worker:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: app
|
||||
command: /code/run_worker
|
||||
links:
|
||||
- queue
|
||||
|
||||
## Adding and overriding configuration
|
||||
|
||||
Compose copies configurations from the original service over to the local one.
|
||||
If a configuration option is defined in both the original service the local
|
||||
service, the local value *replaces* or *extends* the original value.
|
||||
|
||||
For single-value options like `image`, `command` or `mem_limit`, the new value
|
||||
replaces the old value.
|
||||
|
||||
# original service
|
||||
command: python app.py
|
||||
|
||||
# local service
|
||||
command: python otherapp.py
|
||||
|
||||
# result
|
||||
command: python otherapp.py
|
||||
|
||||
> **Note:** In the case of `build` and `image`, when using
|
||||
> [version 1 of the Compose file format](compose-file.md#version-1), using one
|
||||
> option in the local service causes Compose to discard the other option if it
|
||||
> was defined in the original service.
|
||||
>
|
||||
> For example, if the original service defines `image: webapp` and the
|
||||
> local service defines `build: .` then the resulting service will have
|
||||
> `build: .` and no `image` option.
|
||||
>
|
||||
> This is because `build` and `image` cannot be used together in a version 1
|
||||
> file.
|
||||
|
||||
For the **multi-value options** `ports`, `expose`, `external_links`, `dns`,
|
||||
`dns_search`, and `tmpfs`, Compose concatenates both sets of values:
|
||||
|
||||
# original service
|
||||
expose:
|
||||
- "3000"
|
||||
|
||||
# local service
|
||||
expose:
|
||||
- "4000"
|
||||
- "5000"
|
||||
|
||||
# result
|
||||
expose:
|
||||
- "3000"
|
||||
- "4000"
|
||||
- "5000"
|
||||
|
||||
In the case of `environment`, `labels`, `volumes` and `devices`, Compose
|
||||
"merges" entries together with locally-defined values taking precedence:
|
||||
|
||||
# original service
|
||||
environment:
|
||||
- FOO=original
|
||||
- BAR=original
|
||||
|
||||
# local service
|
||||
environment:
|
||||
- BAR=local
|
||||
- BAZ=local
|
||||
|
||||
# result
|
||||
environment:
|
||||
- FOO=original
|
||||
- BAR=local
|
||||
- BAZ=local
|
||||
|
||||
|
||||
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
128
docs/faq.md
128
docs/faq.md
@@ -1,128 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Frequently Asked Questions"
|
||||
description = "Docker Compose FAQ"
|
||||
keywords = "documentation, docs, docker, compose, faq"
|
||||
[menu.main]
|
||||
identifier="faq.compose"
|
||||
parent="workw_compose"
|
||||
weight=90
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Frequently asked questions
|
||||
|
||||
If you don’t see your question here, feel free to drop by `#docker-compose` on
|
||||
freenode IRC and ask the community.
|
||||
|
||||
|
||||
## Can I control service startup order?
|
||||
|
||||
Yes - see [Controlling startup order](startup-order.md).
|
||||
|
||||
|
||||
## Why do my services take 10 seconds to recreate or stop?
|
||||
|
||||
Compose stop attempts to stop a container by sending a `SIGTERM`. It then waits
|
||||
for a [default timeout of 10 seconds](./reference/stop.md). After the timeout,
|
||||
a `SIGKILL` is sent to the container to forcefully kill it. If you
|
||||
are waiting for this timeout, it means that your containers aren't shutting down
|
||||
when they receive the `SIGTERM` signal.
|
||||
|
||||
There has already been a lot written about this problem of
|
||||
[processes handling signals](https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86)
|
||||
in containers.
|
||||
|
||||
To fix this problem, try the following:
|
||||
|
||||
* Make sure you're using the JSON form of `CMD` and `ENTRYPOINT`
|
||||
in your Dockerfile.
|
||||
|
||||
For example use `["program", "arg1", "arg2"]` not `"program arg1 arg2"`.
|
||||
Using the string form causes Docker to run your process using `bash` which
|
||||
doesn't handle signals properly. Compose always uses the JSON form, so don't
|
||||
worry if you override the command or entrypoint in your Compose file.
|
||||
|
||||
* If you are able, modify the application that you're running to
|
||||
add an explicit signal handler for `SIGTERM`.
|
||||
|
||||
* Set the `stop_signal` to a signal which the application knows how to handle:
|
||||
|
||||
web:
|
||||
build: .
|
||||
stop_signal: SIGINT
|
||||
|
||||
* If you can't modify the application, wrap the application in a lightweight init
|
||||
system (like [s6](http://skarnet.org/software/s6/)) or a signal proxy (like
|
||||
[dumb-init](https://github.com/Yelp/dumb-init) or
|
||||
[tini](https://github.com/krallin/tini)). Either of these wrappers take care of
|
||||
handling `SIGTERM` properly.
|
||||
|
||||
## How do I run multiple copies of a Compose file on the same host?
|
||||
|
||||
Compose uses the project name to create unique identifiers for all of a
|
||||
project's containers and other resources. To run multiple copies of a project,
|
||||
set a custom project name using the [`-p` command line
|
||||
option](./reference/overview.md) or the [`COMPOSE_PROJECT_NAME`
|
||||
environment variable](./reference/envvars.md#compose-project-name).
|
||||
|
||||
## What's the difference between `up`, `run`, and `start`?
|
||||
|
||||
Typically, you want `docker-compose up`. Use `up` to start or restart all the
|
||||
services defined in a `docker-compose.yml`. In the default "attached"
|
||||
mode, you'll see all the logs from all the containers. In "detached" mode (`-d`),
|
||||
Compose exits after starting the containers, but the containers continue to run
|
||||
in the background.
|
||||
|
||||
The `docker-compose run` command is for running "one-off" or "adhoc" tasks. It
|
||||
requires the service name you want to run and only starts containers for services
|
||||
that the running service depends on. Use `run` to run tests or perform
|
||||
an administrative task such as removing or adding data to a data volume
|
||||
container. The `run` command acts like `docker run -ti` in that it opens an
|
||||
interactive terminal to the container and returns an exit status matching the
|
||||
exit status of the process in the container.
|
||||
|
||||
The `docker-compose start` command is useful only to restart containers
|
||||
that were previously created, but were stopped. It never creates new
|
||||
containers.
|
||||
|
||||
## Can I use json instead of yaml for my Compose file?
|
||||
|
||||
Yes. [Yaml is a superset of json](http://stackoverflow.com/a/1729545/444646) so
|
||||
any JSON file should be valid Yaml. To use a JSON file with Compose,
|
||||
specify the filename to use, for example:
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.json up
|
||||
```
|
||||
|
||||
## Should I include my code with `COPY`/`ADD` or a volume?
|
||||
|
||||
You can add your code to the image using `COPY` or `ADD` directive in a
|
||||
`Dockerfile`. This is useful if you need to relocate your code along with the
|
||||
Docker image, for example when you're sending code to another environment
|
||||
(production, CI, etc).
|
||||
|
||||
You should use a `volume` if you want to make changes to your code and see them
|
||||
reflected immediately, for example when you're developing code and your server
|
||||
supports hot code reloading or live-reload.
|
||||
|
||||
There may be cases where you'll want to use both. You can have the image
|
||||
include the code using a `COPY`, and use a `volume` in your Compose file to
|
||||
include the code from the host during development. The volume overrides
|
||||
the directory contents of the image.
|
||||
|
||||
## Where can I find example compose files?
|
||||
|
||||
There are [many examples of Compose files on
|
||||
github](https://github.com/search?q=in%3Apath+docker-compose.yml+extension%3Ayml&type=Code).
|
||||
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,191 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Getting Started"
|
||||
description = "Getting started with Docker Compose"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=-85
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Getting Started
|
||||
|
||||
On this page you build a simple Python web application running on Docker Compose. The
|
||||
application uses the Flask framework and increments a value in Redis. While the
|
||||
sample uses Python, the concepts demonstrated here should be understandable even
|
||||
if you're not familiar with it.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Make sure you have already
|
||||
[installed both Docker Engine and Docker Compose](install.md). You
|
||||
don't need to install Python, it is provided by a Docker image.
|
||||
|
||||
## Step 1: Setup
|
||||
|
||||
1. Create a directory for the project:
|
||||
|
||||
$ mkdir composetest
|
||||
$ cd composetest
|
||||
|
||||
2. With your favorite text editor create a file called `app.py` in your project
|
||||
directory.
|
||||
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host='redis', port=6379)
|
||||
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.' % redis.get('hits')
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
|
||||
3. Create another file called `requirements.txt` in your project directory and
|
||||
add the following:
|
||||
|
||||
flask
|
||||
redis
|
||||
|
||||
These define the applications dependencies.
|
||||
|
||||
## Step 2: Create a Docker image
|
||||
|
||||
In this step, you build a new Docker image. The image contains all the
|
||||
dependencies the Python application requires, including Python itself.
|
||||
|
||||
1. In your project directory create a file named `Dockerfile` and add the
|
||||
following:
|
||||
|
||||
FROM python:2.7
|
||||
ADD . /code
|
||||
WORKDIR /code
|
||||
RUN pip install -r requirements.txt
|
||||
CMD python app.py
|
||||
|
||||
This tells Docker to:
|
||||
|
||||
* Build an image starting with the Python 2.7 image.
|
||||
* Add the current directory `.` into the path `/code` in the image.
|
||||
* Set the working directory to `/code`.
|
||||
* Install the Python dependencies.
|
||||
* Set the default command for the container to `python app.py`
|
||||
|
||||
For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
2. Build the image.
|
||||
|
||||
$ docker build -t web .
|
||||
|
||||
This command builds an image named `web` from the contents of the current
|
||||
directory. The command automatically locates the `Dockerfile`, `app.py`, and
|
||||
`requirements.txt` files.
|
||||
|
||||
|
||||
## Step 3: Define services
|
||||
|
||||
Define a set of services using `docker-compose.yml`:
|
||||
|
||||
1. Create a file called docker-compose.yml in your project directory and add
|
||||
the following:
|
||||
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
depends_on:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
This Compose file defines two services, `web` and `redis`. The web service:
|
||||
|
||||
* Builds from the `Dockerfile` in the current directory.
|
||||
* Forwards the exposed port 5000 on the container to port 5000 on the host machine.
|
||||
* Mounts the project directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image.
|
||||
* Links the web service to the Redis service.
|
||||
|
||||
The `redis` service uses the latest public [Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker Hub registry.
|
||||
|
||||
## Step 4: Build and run your app with Compose
|
||||
|
||||
1. From your project directory, start up your application.
|
||||
|
||||
$ docker-compose up
|
||||
Pulling image redis...
|
||||
Building web...
|
||||
Starting composetest_redis_1...
|
||||
Starting composetest_web_1...
|
||||
redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
|
||||
web_1 | * Running on http://0.0.0.0:5000/
|
||||
web_1 | * Restarting with stat
|
||||
|
||||
Compose pulls a Redis image, builds an image for your code, and start the
|
||||
services you defined.
|
||||
|
||||
2. Enter `http://0.0.0.0:5000/` in a browser to see the application running.
|
||||
|
||||
If you're using Docker on Linux natively, then the web app should now be
|
||||
listening on port 5000 on your Docker daemon host. If http://0.0.0.0:5000
|
||||
doesn't resolve, you can also try http://localhost:5000.
|
||||
|
||||
If you're using Docker Machine on a Mac, use `docker-machine ip MACHINE_VM` to get
|
||||
the IP address of your Docker host. Then, `open http://MACHINE_VM_IP:5000` in a
|
||||
browser.
|
||||
|
||||
You should see a message in your browser saying:
|
||||
|
||||
`Hello World! I have been seen 1 times.`
|
||||
|
||||
3. Refresh the page.
|
||||
|
||||
The number should increment.
|
||||
|
||||
## Step 5: Experiment with some other commands
|
||||
|
||||
If you want to run your services in the background, you can pass the `-d` flag
|
||||
(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to
|
||||
see what is currently running:
|
||||
|
||||
$ docker-compose up -d
|
||||
Starting composetest_redis_1...
|
||||
Starting composetest_web_1...
|
||||
$ docker-compose ps
|
||||
Name Command State Ports
|
||||
-------------------------------------------------------------------
|
||||
composetest_redis_1 /usr/local/bin/run Up
|
||||
composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp
|
||||
|
||||
The `docker-compose run` command allows you to run one-off commands for your
|
||||
services. For example, to see what environment variables are available to the
|
||||
`web` service:
|
||||
|
||||
$ docker-compose run web env
|
||||
|
||||
See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands.
|
||||
|
||||
If you started Compose with `docker-compose up -d`, you'll probably want to stop
|
||||
your services once you've finished with them:
|
||||
|
||||
$ docker-compose stop
|
||||
|
||||
At this point, you have seen the basics of how Compose works.
|
||||
|
||||
|
||||
## Where to go next
|
||||
|
||||
- Next, try the quick start guide for [Django](django.md),
|
||||
[Rails](rails.md), or [WordPress](wordpress.md).
|
||||
- [Explore the full list of Compose commands](./reference/index.md)
|
||||
- [Compose configuration file reference](compose-file.md)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 28 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 69 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 69 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 29 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 61 KiB |
@@ -1,30 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Docker Compose"
|
||||
description = "Introduction and Overview of Compose"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
identifier="workw_compose"
|
||||
weight=-70
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Docker Compose
|
||||
|
||||
Compose is a tool for defining and running multi-container Docker applications. To learn more about Compose refer to the following documentation:
|
||||
|
||||
- [Compose Overview](overview.md)
|
||||
- [Install Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Frequently asked questions](faq.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
- [Environment file](env-file.md)
|
||||
|
||||
To see a detailed list of changes for past and current releases of Docker
|
||||
Compose, please refer to the
|
||||
[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
|
||||
136
docs/install.md
136
docs/install.md
@@ -1,136 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Install Compose"
|
||||
description = "How to install Docker Compose"
|
||||
keywords = ["compose, orchestration, install, installation, docker, documentation"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=-90
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Install Docker Compose
|
||||
|
||||
You can run Compose on OS X, Windows and 64-bit Linux. To install it, you'll need to install Docker first.
|
||||
|
||||
To install Compose, do the following:
|
||||
|
||||
1. Install Docker Engine:
|
||||
|
||||
* <a href="/engine/installation/mac/" target="_blank">Mac OS X installation</a>
|
||||
|
||||
* <a href="/engine/installation/windows/" target="_blank">Windows installation</a>
|
||||
|
||||
* <a href="/engine/installation/ubuntulinux/" target="_blank">Ubuntu installation</a>
|
||||
|
||||
* <a href="/engine/installation/" target="_blank">other system installations</a>
|
||||
|
||||
2. The Docker Toolbox installation includes both Engine and Compose, so Mac and Windows users are done installing. Others should continue to the next step.
|
||||
|
||||
3. Go to the <a href="https://github.com/docker/compose/releases" target="_blank">Compose repository release page on GitHub</a>.
|
||||
|
||||
4. Follow the instructions from the release page and run the `curl` command,
|
||||
which the release page specifies, in your terminal.
|
||||
|
||||
> Note: If you get a "Permission denied" error, your `/usr/local/bin` directory
|
||||
probably isn't writable and you'll need to install Compose as the superuser. Run
|
||||
`sudo -i`, then the two commands below, then `exit`.
|
||||
|
||||
The following is an example command illustrating the format:
|
||||
|
||||
curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
||||
|
||||
If you have problems installing with `curl`, see
|
||||
[Alternative Install Options](#alternative-install-options).
|
||||
|
||||
5. Apply executable permissions to the binary:
|
||||
|
||||
$ chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
6. Optionally, install [command completion](completion.md) for the
|
||||
`bash` and `zsh` shell.
|
||||
|
||||
7. Test the installation.
|
||||
|
||||
$ docker-compose --version
|
||||
docker-compose version: 1.7.0
|
||||
|
||||
|
||||
## Alternative install options
|
||||
|
||||
### Install using pip
|
||||
|
||||
Compose can be installed from [pypi](https://pypi.python.org/pypi/docker-compose)
|
||||
using `pip`. If you install using `pip` it is highly recommended that you use a
|
||||
[virtualenv](https://virtualenv.pypa.io/en/latest/) because many operating systems
|
||||
have python system packages that conflict with docker-compose dependencies. See
|
||||
the [virtualenv tutorial](http://docs.python-guide.org/en/latest/dev/virtualenvs/)
|
||||
to get started.
|
||||
|
||||
$ pip install docker-compose
|
||||
|
||||
> **Note:** pip version 6.0 or greater is required
|
||||
|
||||
### Install as a container
|
||||
|
||||
Compose can also be run inside a container, from a small bash script wrapper.
|
||||
To install compose as a container run:
|
||||
|
||||
$ curl -L https://github.com/docker/compose/releases/download/1.7.0/run.sh > /usr/local/bin/docker-compose
|
||||
$ chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
## Master builds
|
||||
|
||||
If you're interested in trying out a pre-release build you can download a
|
||||
binary from https://dl.bintray.com/docker-compose/master/. Pre-release
|
||||
builds allow you to try out new features before they are released, but may
|
||||
be less stable.
|
||||
|
||||
|
||||
## Upgrading
|
||||
|
||||
If you're upgrading from Compose 1.2 or earlier, you'll need to remove or migrate
|
||||
your existing containers after upgrading Compose. This is because, as of version
|
||||
1.3, Compose uses Docker labels to keep track of containers, and so they need to
|
||||
be recreated with labels added.
|
||||
|
||||
If Compose detects containers that were created without labels, it will refuse
|
||||
to run so that you don't end up with two sets of them. If you want to keep using
|
||||
your existing containers (for example, because they have data volumes you want
|
||||
to preserve) you can use compose 1.5.x to migrate them with the following command:
|
||||
|
||||
$ docker-compose migrate-to-labels
|
||||
|
||||
Alternatively, if you're not worried about keeping them, you can remove them.
|
||||
Compose will just create new ones.
|
||||
|
||||
$ docker rm -f -v myapp_web_1 myapp_db_1 ...
|
||||
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To uninstall Docker Compose if you installed using `curl`:
|
||||
|
||||
$ rm /usr/local/bin/docker-compose
|
||||
|
||||
|
||||
To uninstall Docker Compose if you installed using `pip`:
|
||||
|
||||
$ pip uninstall docker-compose
|
||||
|
||||
>**Note**: If you get a "Permission denied" error using either of the above
|
||||
>methods, you probably do not have the proper permissions to remove
|
||||
>`docker-compose`. To force the removal, prepend `sudo` to either of the above
|
||||
>commands and run again.
|
||||
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,48 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Link Environment Variables"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
aliases = ["/compose/env"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=89
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Link environment variables reference
|
||||
|
||||
> **Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](compose-file.md#links) for details.
|
||||
>
|
||||
> Environment variables will only be populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning).
|
||||
|
||||
Compose uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container.
|
||||
|
||||
To see what environment variables are available to a service, run `docker-compose run SERVICE env`.
|
||||
|
||||
<b><i>name</i>\_PORT</b><br>
|
||||
Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432`
|
||||
|
||||
<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i></b><br>
|
||||
Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432`
|
||||
|
||||
<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_ADDR</b><br>
|
||||
Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5`
|
||||
|
||||
<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_PORT</b><br>
|
||||
Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432`
|
||||
|
||||
<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_PROTO</b><br>
|
||||
Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp`
|
||||
|
||||
<b><i>name</i>\_NAME</b><br>
|
||||
Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1`
|
||||
|
||||
[Docker links]: https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/
|
||||
|
||||
## Related Information
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,154 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Networking in Compose"
|
||||
description = "How Compose sets up networking between containers"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers, networking"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=21
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Networking in Compose
|
||||
|
||||
> **Note:** This document only applies if you're using [version 2 of the Compose file format](compose-file.md#versioning). Networking features are not supported for version 1 (legacy) Compose files.
|
||||
|
||||
By default Compose sets up a single
|
||||
[network](https://docs.docker.com/engine/reference/commandline/network_create/) for your app. Each
|
||||
container for a service joins the default network and is both *reachable* by
|
||||
other containers on that network, and *discoverable* by them at a hostname
|
||||
identical to the container name.
|
||||
|
||||
> **Note:** Your app's network is given a name based on the "project name",
|
||||
> which is based on the name of the directory it lives in. You can override the
|
||||
> project name with either the [`--project-name`
|
||||
> flag](reference/overview.md) or the [`COMPOSE_PROJECT_NAME` environment
|
||||
> variable](reference/envvars.md#compose-project-name).
|
||||
|
||||
For example, suppose your app is in a directory called `myapp`, and your `docker-compose.yml` looks like this:
|
||||
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
When you run `docker-compose up`, the following happens:
|
||||
|
||||
1. A network called `myapp_default` is created.
|
||||
2. A container is created using `web`'s configuration. It joins the network
|
||||
`myapp_default` under the name `web`.
|
||||
3. A container is created using `db`'s configuration. It joins the network
|
||||
`myapp_default` under the name `db`.
|
||||
|
||||
Each container can now look up the hostname `web` or `db` and
|
||||
get back the appropriate container's IP address. For example, `web`'s
|
||||
application code could connect to the URL `postgres://db:5432` and start
|
||||
using the Postgres database.
|
||||
|
||||
Because `web` explicitly maps a port, it's also accessible from the outside world via port 8000 on your Docker host's network interface.
|
||||
|
||||
## Updating containers
|
||||
|
||||
If you make a configuration change to a service and run `docker-compose up` to update it, the old container will be removed and the new one will join the network under a different IP address but the same name. Running containers will be able to look up that name and connect to the new address, but the old address will stop working.
|
||||
|
||||
If any containers have connections open to the old container, they will be closed. It is a container's responsibility to detect this condition, look up the name again and reconnect.
|
||||
|
||||
## Links
|
||||
|
||||
Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate - by default, any service can reach any other service at that service's name. In the following example, `db` is reachable from `web` at the hostnames `db` and `database`:
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
links:
|
||||
- "db:database"
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
See the [links reference](compose-file.md#links) for more information.
|
||||
|
||||
## Multi-host networking
|
||||
|
||||
When [deploying a Compose application to a Swarm cluster](swarm.md), you can make use of the built-in `overlay` driver to enable multi-host communication between containers with no changes to your Compose file or application code.
|
||||
|
||||
Consult the [Getting started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this.
|
||||
|
||||
## Specifying custom networks
|
||||
|
||||
Instead of just using the default app network, you can specify your own networks with the top-level `networks` key. This lets you create more complex topologies and specify [custom network drivers](https://docs.docker.com/engine/extend/plugins_network/) and options. You can also use it to connect services to externally-created networks which aren't managed by Compose.
|
||||
|
||||
Each service can specify what networks to connect to with the *service-level* `networks` key, which is a list of names referencing entries under the *top-level* `networks` key.
|
||||
|
||||
Here's an example Compose file defining two custom networks. The `proxy` service is isolated from the `db` service, because they do not share a network in common - only `app` can talk to both.
|
||||
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
proxy:
|
||||
build: ./proxy
|
||||
networks:
|
||||
- front
|
||||
app:
|
||||
build: ./app
|
||||
networks:
|
||||
- front
|
||||
- back
|
||||
db:
|
||||
image: postgres
|
||||
networks:
|
||||
- back
|
||||
|
||||
networks:
|
||||
front:
|
||||
# Use a custom driver
|
||||
driver: custom-driver-1
|
||||
back:
|
||||
# Use a custom driver which takes special options
|
||||
driver: custom-driver-2
|
||||
driver_opts:
|
||||
foo: "1"
|
||||
bar: "2"
|
||||
|
||||
Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file.md#ipv4-address-ipv6-address) for each attached network.
|
||||
|
||||
For full details of the network configuration options available, see the following references:
|
||||
|
||||
- [Top-level `networks` key](compose-file.md#network-configuration-reference)
|
||||
- [Service-level `networks` key](compose-file.md#networks)
|
||||
|
||||
## Configuring the default network
|
||||
|
||||
Instead of (or as well as) specifying your own networks, you can also change the settings of the app-wide default network by defining an entry under `networks` named `default`:
|
||||
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
networks:
|
||||
default:
|
||||
# Use a custom driver
|
||||
driver: custom-driver-1
|
||||
|
||||
## Using a pre-existing network
|
||||
|
||||
If you want your containers to join a pre-existing network, use the [`external` option](compose-file.md#network-configuration-reference):
|
||||
|
||||
networks:
|
||||
default:
|
||||
external:
|
||||
name: my-pre-existing-network
|
||||
|
||||
Instead of attemping to create a network called `[projectname]_default`, Compose will look for a network called `my-pre-existing-network` and connect your app's containers to it.
|
||||
188
docs/overview.md
188
docs/overview.md
@@ -1,188 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Overview of Docker Compose"
|
||||
description = "Introduction and Overview of Compose"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=-99
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Overview of Docker Compose
|
||||
|
||||
Compose is a tool for defining and running multi-container Docker applications.
|
||||
With Compose, you use a Compose file to configure your application's services.
|
||||
Then, using a single command, you create and start all the services
|
||||
from your configuration. To learn more about all the features of Compose
|
||||
see [the list of features](#features).
|
||||
|
||||
Compose is great for development, testing, and staging environments, as well as
|
||||
CI workflows. You can learn more about each case in
|
||||
[Common Use Cases](#common-use-cases).
|
||||
|
||||
Using Compose is basically a three-step process.
|
||||
|
||||
1. Define your app's environment with a `Dockerfile` so it can be reproduced
|
||||
anywhere.
|
||||
|
||||
2. Define the services that make up your app in `docker-compose.yml`
|
||||
so they can be run together in an isolated environment.
|
||||
|
||||
3. Lastly, run
|
||||
`docker-compose up` and Compose will start and run your entire app.
|
||||
|
||||
A `docker-compose.yml` looks like this:
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
- logvolume01:/var/log
|
||||
links:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
volumes:
|
||||
logvolume01: {}
|
||||
|
||||
For more information about the Compose file, see the
|
||||
[Compose file reference](compose-file.md)
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
* Start, stop and rebuild services
|
||||
* View the status of running services
|
||||
* Stream the log output of running services
|
||||
* Run a one-off command on a service
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Frequently asked questions](faq.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
|
||||
## Features
|
||||
|
||||
The features of Compose that make it effective are:
|
||||
|
||||
* [Multiple isolated environments on a single host](#Multiple-isolated-environments-on-a-single-host)
|
||||
* [Preserve volume data when containers are created](#preserve-volume-data-when-containers-are-created)
|
||||
* [Only recreate containers that have changed](#only-recreate-containers-that-have-changed)
|
||||
* [Variables and moving a composition between environments](#variables-and-moving-a-composition-between-environments)
|
||||
|
||||
### Multiple isolated environments on a single host
|
||||
|
||||
Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:
|
||||
|
||||
* on a dev host, to create multiple copies of a single environment (e.g., you want to run a stable copy for each feature branch of a project)
|
||||
* on a CI server, to keep builds from interfering with each other, you can set
|
||||
the project name to a unique build number
|
||||
* on a shared host or dev host, to prevent different projects, which may use the
|
||||
same service names, from interfering with each other
|
||||
|
||||
The default project name is the basename of the project directory. You can set
|
||||
a custom project name by using the
|
||||
[`-p` command line option](./reference/overview.md) or the
|
||||
[`COMPOSE_PROJECT_NAME` environment variable](./reference/envvars.md#compose-project-name).
|
||||
|
||||
### Preserve volume data when containers are created
|
||||
|
||||
Compose preserves all volumes used by your services. When `docker-compose up`
|
||||
runs, if it finds any containers from previous runs, it copies the volumes from
|
||||
the old container to the new container. This process ensures that any data
|
||||
you've created in volumes isn't lost.
|
||||
|
||||
|
||||
### Only recreate containers that have changed
|
||||
|
||||
Compose caches the configuration used to create a container. When you
|
||||
restart a service that has not changed, Compose re-uses the existing
|
||||
containers. Re-using containers means that you can make changes to your
|
||||
environment very quickly.
|
||||
|
||||
|
||||
### Variables and moving a composition between environments
|
||||
|
||||
Compose supports variables in the Compose file. You can use these variables
|
||||
to customize your composition for different environments, or different users.
|
||||
See [Variable substitution](compose-file.md#variable-substitution) for more
|
||||
details.
|
||||
|
||||
You can extend a Compose file using the `extends` field or by creating multiple
|
||||
Compose files. See [extends](extends.md) for more details.
|
||||
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
Compose can be used in many different ways. Some common use cases are outlined
|
||||
below.
|
||||
|
||||
### Development environments
|
||||
|
||||
When you're developing software, the ability to run an application in an
|
||||
isolated environment and interact with it is crucial. The Compose command
|
||||
line tool can be used to create the environment and interact with it.
|
||||
|
||||
The [Compose file](compose-file.md) provides a way to document and configure
|
||||
all of the application's service dependencies (databases, queues, caches,
|
||||
web service APIs, etc). Using the Compose command line tool you can create
|
||||
and start one or more containers for each dependency with a single command
|
||||
(`docker-compose up`).
|
||||
|
||||
Together, these features provide a convenient way for developers to get
|
||||
started on a project. Compose can reduce a multi-page "developer getting
|
||||
started guide" to a single machine readable Compose file and a few commands.
|
||||
|
||||
### Automated testing environments
|
||||
|
||||
An important part of any Continuous Deployment or Continuous Integration process
|
||||
is the automated test suite. Automated end-to-end testing requires an
|
||||
environment in which to run tests. Compose provides a convenient way to create
|
||||
and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file.md) you can create and destroy these environments in just a few commands:
|
||||
|
||||
$ docker-compose up -d
|
||||
$ ./run_tests
|
||||
$ docker-compose down
|
||||
|
||||
### Single host deployments
|
||||
|
||||
Compose has traditionally been focused on development and testing workflows,
|
||||
but with each release we're making progress on more production-oriented features. You can use Compose to deploy to a remote Docker Engine. The Docker Engine may be a single instance provisioned with
|
||||
[Docker Machine](https://docs.docker.com/machine/) or an entire
|
||||
[Docker Swarm](https://docs.docker.com/swarm/) cluster.
|
||||
|
||||
For details on using production-oriented features, see
|
||||
[compose in production](production.md) in this documentation.
|
||||
|
||||
|
||||
## Release Notes
|
||||
|
||||
To see a detailed list of changes for past and current releases of Docker
|
||||
Compose, please refer to the
|
||||
[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
|
||||
|
||||
## Getting help
|
||||
|
||||
Docker Compose is under active development. If you need help, would like to
|
||||
contribute, or simply want to talk about the project with like-minded
|
||||
individuals, we have a number of open channels for communication.
|
||||
|
||||
* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues).
|
||||
|
||||
* To talk about the project with people in real time: please join the
|
||||
`#docker-compose` channel on freenode IRC.
|
||||
|
||||
* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls).
|
||||
|
||||
For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/opensource/get-help/).
|
||||
@@ -1,88 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Using Compose in Production"
|
||||
description = "Guide to using Docker Compose in production"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers, production"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=22
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Using Compose in production
|
||||
|
||||
When you define your app with Compose in development, you can use this
|
||||
definition to run your application in different environments such as CI,
|
||||
staging, and production.
|
||||
|
||||
The easiest way to deploy an application is to run it on a single server,
|
||||
similar to how you would run your development environment. If you want to scale
|
||||
up your application, you can run Compose apps on a Swarm cluster.
|
||||
|
||||
### Modify your Compose file for production
|
||||
|
||||
You'll almost certainly want to make changes to your app configuration that are
|
||||
more appropriate to a live environment. These changes may include:
|
||||
|
||||
- Removing any volume bindings for application code, so that code stays inside
|
||||
the container and can't be changed from outside
|
||||
- Binding to different ports on the host
|
||||
- Setting environment variables differently (e.g., to decrease the verbosity of
|
||||
logging, or to enable email sending)
|
||||
- Specifying a restart policy (e.g., `restart: always`) to avoid downtime
|
||||
- Adding extra services (e.g., a log aggregator)
|
||||
|
||||
For this reason, you'll probably want to define an additional Compose file, say
|
||||
`production.yml`, which specifies production-appropriate
|
||||
configuration. This configuration file only needs to include the changes you'd
|
||||
like to make from the original Compose file. The additional Compose file
|
||||
can be applied over the original `docker-compose.yml` to create a new configuration.
|
||||
|
||||
Once you've got a second configuration file, tell Compose to use it with the
|
||||
`-f` option:
|
||||
|
||||
$ docker-compose -f docker-compose.yml -f production.yml up -d
|
||||
|
||||
See [Using multiple compose files](extends.md#different-environments) for a more
|
||||
complete example.
|
||||
|
||||
### Deploying changes
|
||||
|
||||
When you make changes to your app code, you'll need to rebuild your image and
|
||||
recreate your app's containers. To redeploy a service called
|
||||
`web`, you would use:
|
||||
|
||||
$ docker-compose build web
|
||||
$ docker-compose up --no-deps -d web
|
||||
|
||||
This will first rebuild the image for `web` and then stop, destroy, and recreate
|
||||
*just* the `web` service. The `--no-deps` flag prevents Compose from also
|
||||
recreating any services which `web` depends on.
|
||||
|
||||
### Running Compose on a single server
|
||||
|
||||
You can use Compose to deploy an app to a remote Docker host by setting the
|
||||
`DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables
|
||||
appropriately. For tasks like this,
|
||||
[Docker Machine](/machine/overview) makes managing local and
|
||||
remote Docker hosts very easy, and is recommended even if you're not deploying
|
||||
remotely.
|
||||
|
||||
Once you've set up your environment variables, all the normal `docker-compose`
|
||||
commands will work with no further configuration.
|
||||
|
||||
### Running Compose on a Swarm cluster
|
||||
|
||||
[Docker Swarm](/swarm/overview), a Docker-native clustering
|
||||
system, exposes the same API as a single Docker host, which means you can use
|
||||
Compose against a Swarm instance and run your apps across multiple hosts.
|
||||
|
||||
Read more about the Compose/Swarm integration in the
|
||||
[integration guide](swarm.md).
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
174
docs/rails.md
174
docs/rails.md
@@ -1,174 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart: Compose and Rails"
|
||||
description = "Getting started with Docker Compose and Rails"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=5
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
## Quickstart: Docker Compose and Rails
|
||||
|
||||
This Quickstart guide will show you how to use Docker Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md).
|
||||
|
||||
### Define the project
|
||||
|
||||
Start by setting up the three files you'll need to build the app. First, since
|
||||
your app is going to run inside a Docker container containing all of its
|
||||
dependencies, you'll need to define exactly what needs to be included in the
|
||||
container. This is done using a file called `Dockerfile`. To begin with, the
|
||||
Dockerfile consists of:
|
||||
|
||||
FROM ruby:2.2.0
|
||||
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev
|
||||
RUN mkdir /myapp
|
||||
WORKDIR /myapp
|
||||
ADD Gemfile /myapp/Gemfile
|
||||
ADD Gemfile.lock /myapp/Gemfile.lock
|
||||
RUN bundle install
|
||||
ADD . /myapp
|
||||
|
||||
That'll put your application code inside an image that will build a container
|
||||
with Ruby, Bundler and all your dependencies inside it. For more information on
|
||||
how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
|
||||
Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
|
||||
|
||||
source 'https://rubygems.org'
|
||||
gem 'rails', '4.2.0'
|
||||
|
||||
You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`.
|
||||
|
||||
$ touch Gemfile.lock
|
||||
|
||||
Finally, `docker-compose.yml` is where the magic happens. This file describes
|
||||
the services that comprise your app (a database and a web app), how to get each
|
||||
one's Docker image (the database just runs on a pre-made PostgreSQL image, and
|
||||
the web app is built from the current directory), and the configuration needed
|
||||
to link them together and expose the web app's port.
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
db:
|
||||
image: postgres
|
||||
web:
|
||||
build: .
|
||||
command: bundle exec rails s -p 3000 -b '0.0.0.0'
|
||||
volumes:
|
||||
- .:/myapp
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
### Build the project
|
||||
|
||||
With those three files in place, you can now generate the Rails skeleton app
|
||||
using `docker-compose run`:
|
||||
|
||||
$ docker-compose run web rails new . --force --database=postgresql --skip-bundle
|
||||
|
||||
First, Compose will build the image for the `web` service using the `Dockerfile`. Then it'll run `rails new` inside a new container, using that image. Once it's done, you should have generated a fresh app:
|
||||
|
||||
$ ls -l
|
||||
total 56
|
||||
-rw-r--r-- 1 user staff 215 Feb 13 23:33 Dockerfile
|
||||
-rw-r--r-- 1 user staff 1480 Feb 13 23:43 Gemfile
|
||||
-rw-r--r-- 1 user staff 2535 Feb 13 23:43 Gemfile.lock
|
||||
-rw-r--r-- 1 root root 478 Feb 13 23:43 README.rdoc
|
||||
-rw-r--r-- 1 root root 249 Feb 13 23:43 Rakefile
|
||||
drwxr-xr-x 8 root root 272 Feb 13 23:43 app
|
||||
drwxr-xr-x 6 root root 204 Feb 13 23:43 bin
|
||||
drwxr-xr-x 11 root root 374 Feb 13 23:43 config
|
||||
-rw-r--r-- 1 root root 153 Feb 13 23:43 config.ru
|
||||
drwxr-xr-x 3 root root 102 Feb 13 23:43 db
|
||||
-rw-r--r-- 1 user staff 161 Feb 13 23:35 docker-compose.yml
|
||||
drwxr-xr-x 4 root root 136 Feb 13 23:43 lib
|
||||
drwxr-xr-x 3 root root 102 Feb 13 23:43 log
|
||||
drwxr-xr-x 7 root root 238 Feb 13 23:43 public
|
||||
drwxr-xr-x 9 root root 306 Feb 13 23:43 test
|
||||
drwxr-xr-x 3 root root 102 Feb 13 23:43 tmp
|
||||
drwxr-xr-x 3 root root 102 Feb 13 23:43 vendor
|
||||
|
||||
|
||||
If you are running Docker on Linux, the files `rails new` created are owned by
|
||||
root. This happens because the container runs as the root user. Change the
|
||||
ownership of the the new files.
|
||||
|
||||
sudo chown -R $USER:$USER .
|
||||
|
||||
If you are running Docker on Mac or Windows, you should already have ownership
|
||||
of all files, including those generated by `rails new`. List the files just to
|
||||
verify this.
|
||||
|
||||
Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've
|
||||
got a Javascript runtime:
|
||||
|
||||
gem 'therubyracer', platforms: :ruby
|
||||
|
||||
Now that you've got a new `Gemfile`, you need to build the image again. (This,
|
||||
and changes to the Dockerfile itself, should be the only times you'll need to
|
||||
rebuild.)
|
||||
|
||||
$ docker-compose build
|
||||
|
||||
|
||||
### Connect the database
|
||||
|
||||
The app is now bootable, but you're not quite there yet. By default, Rails
|
||||
expects a database to be running on `localhost` - so you need to point it at the
|
||||
`db` container instead. You also need to change the database and username to
|
||||
align with the defaults set by the `postgres` image.
|
||||
|
||||
Replace the contents of `config/database.yml` with the following:
|
||||
|
||||
development: &default
|
||||
adapter: postgresql
|
||||
encoding: unicode
|
||||
database: postgres
|
||||
pool: 5
|
||||
username: postgres
|
||||
password:
|
||||
host: db
|
||||
|
||||
test:
|
||||
<<: *default
|
||||
database: myapp_test
|
||||
|
||||
You can now boot the app with:
|
||||
|
||||
$ docker-compose up
|
||||
|
||||
If all's well, you should see some PostgreSQL output, and then—after a few
|
||||
seconds—the familiar refrain:
|
||||
|
||||
myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick 1.3.1
|
||||
myapp_web_1 | [2014-01-17 17:16:29] INFO ruby 2.2.0 (2014-12-25) [x86_64-linux-gnu]
|
||||
myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick::HTTPServer#start: pid=1 port=3000
|
||||
|
||||
Finally, you need to create the database. In another terminal, run:
|
||||
|
||||
$ docker-compose run web rake db:create
|
||||
|
||||
That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
|
||||
|
||||

|
||||
|
||||
>**Note**: If you stop the example application and attempt to restart it, you might get the
|
||||
following error: `web_1 | A server is already running. Check
|
||||
/myapp/tmp/pids/server.pid.` One way to resolve this is to delete the file
|
||||
`tmp/pids/server.pid`, and then re-start the application with `docker-compose
|
||||
up`.
|
||||
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,25 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "build"
|
||||
description = "build"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, build"]
|
||||
[menu.main]
|
||||
identifier="build.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# build
|
||||
|
||||
```
|
||||
Usage: build [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--force-rm Always remove intermediate containers.
|
||||
--no-cache Do not use cache when building the image.
|
||||
--pull Always attempt to pull a newer version of the image.
|
||||
```
|
||||
|
||||
Services are built once and then tagged as `project_service`, e.g.,
|
||||
`composetest_db`. If you change a service's Dockerfile or the contents of its
|
||||
build directory, run `docker-compose build` to rebuild it.
|
||||
@@ -1,23 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "config"
|
||||
description = "Config validates and view the compose file."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, config"]
|
||||
[menu.main]
|
||||
identifier="config.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# config
|
||||
|
||||
```:
|
||||
Usage: config [options]
|
||||
|
||||
Options:
|
||||
-q, --quiet Only validate the configuration, don't print
|
||||
anything.
|
||||
--services Print the service names, one per line.
|
||||
```
|
||||
|
||||
Validate and view the compose file.
|
||||
@@ -1,26 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "create"
|
||||
description = "Create creates containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, create"]
|
||||
[menu.main]
|
||||
identifier="create.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# create
|
||||
|
||||
```
|
||||
Creates containers for a service.
|
||||
|
||||
Usage: create [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--force-recreate Recreate containers even if their configuration and
|
||||
image haven't changed. Incompatible with --no-recreate.
|
||||
--no-recreate If containers already exist, don't recreate them.
|
||||
Incompatible with --force-recreate.
|
||||
--no-build Don't build an image, even if it's missing.
|
||||
--build Build images before creating containers.
|
||||
```
|
||||
@@ -1,28 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "down"
|
||||
description = "down"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, down"]
|
||||
[menu.main]
|
||||
identifier="down.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# down
|
||||
|
||||
```
|
||||
Stop containers and remove containers, networks, volumes, and images
|
||||
created by `up`. Only containers and networks are removed by default.
|
||||
|
||||
Usage: down [options]
|
||||
|
||||
Options:
|
||||
--rmi type Remove images, type may be one of: 'all' to remove
|
||||
all images, or 'local' to remove only images that
|
||||
don't have an custom name set by the `image` field
|
||||
-v, --volumes Remove data volumes
|
||||
|
||||
--remove-orphans Remove containers for services not defined in the
|
||||
Compose file
|
||||
```
|
||||
@@ -1,87 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "CLI Environment Variables"
|
||||
description = "CLI Environment Variables"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
weight=-1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# CLI Environment Variables
|
||||
|
||||
Several environment variables are available for you to configure the Docker Compose command-line behaviour.
|
||||
|
||||
Variables starting with `DOCKER_` are the same as those used to configure the
|
||||
Docker command-line client. If you're using `docker-machine`, then the `eval "$(docker-machine env my-docker-vm)"` command should set them to their correct values. (In this example, `my-docker-vm` is the name of a machine you created.)
|
||||
|
||||
> Note: Some of these variables can also be provided using an
|
||||
> [environment file](../env-file.md)
|
||||
|
||||
## COMPOSE\_PROJECT\_NAME
|
||||
|
||||
Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively.
|
||||
|
||||
Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME`
|
||||
defaults to the `basename` of the project directory. See also the `-p`
|
||||
[command-line option](overview.md).
|
||||
|
||||
## COMPOSE\_FILE
|
||||
|
||||
Specify the path to a Compose file. If not provided, Compose looks for a file named
|
||||
`docker-compose.yml` in the current directory and then each parent directory in
|
||||
succession until a file by that name is found.
|
||||
|
||||
This variable supports multiple compose files separate by a path separator (on
|
||||
Linux and OSX the path separator is `:`, on Windows it is `;`). For example:
|
||||
`COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml`
|
||||
|
||||
See also the `-f` [command-line option](overview.md).
|
||||
|
||||
## COMPOSE\_API\_VERSION
|
||||
|
||||
The Docker API only supports requests from clients which report a specific
|
||||
version. If you receive a `client and server don't have same version error` using
|
||||
`docker-compose`, you can workaround this error by setting this environment
|
||||
variable. Set the version value to match the server version.
|
||||
|
||||
Setting this variable is intended as a workaround for situations where you need
|
||||
to run temporarily with a mismatch between the client and server version. For
|
||||
example, if you can upgrade the client but need to wait to upgrade the server.
|
||||
|
||||
Running with this variable set and a known mismatch does prevent some Docker
|
||||
features from working properly. The exact features that fail would depend on the
|
||||
Docker client and server versions. For this reason, running with this variable
|
||||
set is only intended as a workaround and it is not officially supported.
|
||||
|
||||
If you run into problems running with this set, resolve the mismatch through
|
||||
upgrade and remove this setting to see if your problems resolve before notifying
|
||||
support.
|
||||
|
||||
## DOCKER\_HOST
|
||||
|
||||
Sets the URL of the `docker` daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`.
|
||||
|
||||
## DOCKER\_TLS\_VERIFY
|
||||
|
||||
When set to anything other than an empty string, enables TLS communication with
|
||||
the `docker` daemon.
|
||||
|
||||
## DOCKER\_CERT\_PATH
|
||||
|
||||
Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.
|
||||
|
||||
## COMPOSE\_HTTP\_TIMEOUT
|
||||
|
||||
Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers
|
||||
it failed. Defaults to 60 seconds.
|
||||
|
||||
|
||||
## Related Information
|
||||
|
||||
- [User guide](../index.md)
|
||||
- [Installing Compose](../install.md)
|
||||
- [Compose file reference](../compose-file.md)
|
||||
- [Environment file](../env-file.md)
|
||||
@@ -1,34 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "events"
|
||||
description = "Receive real time events from containers."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, events"]
|
||||
[menu.main]
|
||||
identifier="events.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# events
|
||||
|
||||
```
|
||||
Usage: events [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--json Output events as a stream of json objects
|
||||
```
|
||||
|
||||
Stream container events for every container in the project.
|
||||
|
||||
With the `--json` flag, a json object will be printed one per line with the
|
||||
format:
|
||||
|
||||
```
|
||||
{
|
||||
"service": "web",
|
||||
"event": "create",
|
||||
"container": "213cf75fc39a",
|
||||
"image": "alpine:edge",
|
||||
"time": "2015-11-20T18:01:03.615550",
|
||||
}
|
||||
```
|
||||
@@ -1,29 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "exec"
|
||||
description = "exec"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, exec"]
|
||||
[menu.main]
|
||||
identifier="exec.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# exec
|
||||
|
||||
```
|
||||
Usage: exec [options] SERVICE COMMAND [ARGS...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run command in the background.
|
||||
--privileged Give extended privileges to the process.
|
||||
--user USER Run the command as this user.
|
||||
-T Disable pseudo-tty allocation. By default `docker-compose exec`
|
||||
allocates a TTY.
|
||||
--index=index index of the container if there are multiple
|
||||
instances of a service [default: 1]
|
||||
```
|
||||
|
||||
This is equivalent of `docker exec`. With this subcommand you can run arbitrary
|
||||
commands in your services. Commands are by default allocating a TTY, so you can
|
||||
do e.g. `docker-compose exec web sh` to get an interactive prompt.
|
||||
@@ -1,18 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "help"
|
||||
description = "help"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, help"]
|
||||
[menu.main]
|
||||
identifier="help.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# help
|
||||
|
||||
```
|
||||
Usage: help COMMAND
|
||||
```
|
||||
|
||||
Displays help and usage instructions for a command.
|
||||
@@ -1,42 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Command-line Reference"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
identifier = "smn_compose_cli"
|
||||
parent = "workw_compose"
|
||||
weight=80
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
## Compose command-line reference
|
||||
|
||||
The following pages describe the usage information for the [docker-compose](overview.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line.
|
||||
|
||||
* [docker-compose](overview.md)
|
||||
* [build](build.md)
|
||||
* [config](config.md)
|
||||
* [create](create.md)
|
||||
* [down](down.md)
|
||||
* [events](events.md)
|
||||
* [help](help.md)
|
||||
* [kill](kill.md)
|
||||
* [logs](logs.md)
|
||||
* [pause](pause.md)
|
||||
* [port](port.md)
|
||||
* [ps](ps.md)
|
||||
* [pull](pull.md)
|
||||
* [restart](restart.md)
|
||||
* [rm](rm.md)
|
||||
* [run](run.md)
|
||||
* [scale](scale.md)
|
||||
* [start](start.md)
|
||||
* [stop](stop.md)
|
||||
* [unpause](unpause.md)
|
||||
* [up](up.md)
|
||||
|
||||
## Where to go next
|
||||
|
||||
* [CLI environment variables](envvars.md)
|
||||
* [docker-compose Command](overview.md)
|
||||
@@ -1,24 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "kill"
|
||||
description = "Forces running containers to stop."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, kill"]
|
||||
[menu.main]
|
||||
identifier="kill.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# kill
|
||||
|
||||
```
|
||||
Usage: kill [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL.
|
||||
```
|
||||
|
||||
Forces running containers to stop by sending a `SIGKILL` signal. Optionally the
|
||||
signal can be passed, for example:
|
||||
|
||||
$ docker-compose kill -s SIGINT
|
||||
@@ -1,25 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "logs"
|
||||
description = "Displays log output from services."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, logs"]
|
||||
[menu.main]
|
||||
identifier="logs.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# logs
|
||||
|
||||
```
|
||||
Usage: logs [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--no-color Produce monochrome output.
|
||||
-f, --follow Follow log output
|
||||
-t, --timestamps Show timestamps
|
||||
--tail Number of lines to show from the end of the logs
|
||||
for each container.
|
||||
```
|
||||
|
||||
Displays log output from services.
|
||||
@@ -1,127 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Overview of docker-compose CLI"
|
||||
description = "Overview of docker-compose CLI"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, docker-compose"]
|
||||
aliases = ["/compose/reference/docker-compose/"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
weight=-2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Overview of docker-compose CLI
|
||||
|
||||
This page provides the usage information for the `docker-compose` Command.
|
||||
You can also see this information by running `docker-compose --help` from the
|
||||
command line.
|
||||
|
||||
```
|
||||
Define and run multi-container applications with Docker.
|
||||
|
||||
Usage:
|
||||
docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
|
||||
docker-compose -h|--help
|
||||
|
||||
Options:
|
||||
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
|
||||
-p, --project-name NAME Specify an alternate project name (default: directory name)
|
||||
--verbose Show more output
|
||||
-v, --version Print version and exit
|
||||
-H, --host HOST Daemon socket to connect to
|
||||
|
||||
--tls Use TLS; implied by --tlsverify
|
||||
--tlscacert CA_PATH Trust certs signed only by this CA
|
||||
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
|
||||
--tlskey TLS_KEY_PATH Path to TLS key file
|
||||
--tlsverify Use TLS and verify the remote
|
||||
--skip-hostname-check Don't check the daemon's hostname against the name specified
|
||||
in the client certificate (for example if your docker host
|
||||
is an IP address)
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
config Validate and view the compose file
|
||||
create Create services
|
||||
down Stop and remove containers, networks, images, and volumes
|
||||
events Receive real time events from containers
|
||||
help Get help on a command
|
||||
kill Kill containers
|
||||
logs View output from containers
|
||||
pause Pause services
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
restart Restart services
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
scale Set number of containers for a service
|
||||
start Start services
|
||||
stop Stop services
|
||||
unpause Unpause services
|
||||
up Create and start containers
|
||||
version Show the Docker-Compose version information
|
||||
|
||||
```
|
||||
|
||||
The Docker Compose binary. You use this command to build and manage multiple
|
||||
services in Docker containers.
|
||||
|
||||
Use the `-f` flag to specify the location of a Compose configuration file. You
|
||||
can supply multiple `-f` configuration files. When you supply multiple files,
|
||||
Compose combines them into a single configuration. Compose builds the
|
||||
configuration in the order you supply the files. Subsequent files override and
|
||||
add to their successors.
|
||||
|
||||
For example, consider this command line:
|
||||
|
||||
```
|
||||
$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db`
|
||||
```
|
||||
|
||||
The `docker-compose.yml` file might specify a `webapp` service.
|
||||
|
||||
```
|
||||
webapp:
|
||||
image: examples/web
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- "/data"
|
||||
```
|
||||
|
||||
If the `docker-compose.admin.yml` also specifies this same service, any matching
|
||||
fields will override the previous file. New values, add to the `webapp` service
|
||||
configuration.
|
||||
|
||||
```
|
||||
webapp:
|
||||
build: .
|
||||
environment:
|
||||
- DEBUG=1
|
||||
```
|
||||
|
||||
Use a `-f` with `-` (dash) as the filename to read the configuration from
|
||||
stdin. When stdin is used all paths in the configuration are
|
||||
relative to the current working directory.
|
||||
|
||||
The `-f` flag is optional. If you don't provide this flag on the command line,
|
||||
Compose traverses the working directory and its parent directories looking for a
|
||||
`docker-compose.yml` and a `docker-compose.override.yml` file. You must
|
||||
supply at least the `docker-compose.yml` file. If both files are present on the
|
||||
same directory level, Compose combines the two files into a single configuration.
|
||||
The configuration in the `docker-compose.override.yml` file is applied over and
|
||||
in addition to the values in the `docker-compose.yml` file.
|
||||
|
||||
See also the `COMPOSE_FILE` [environment variable](envvars.md#compose-file).
|
||||
|
||||
Each configuration has a project name. If you supply a `-p` flag, you can
|
||||
specify a project name. If you don't specify the flag, Compose uses the current
|
||||
directory name. See also the `COMPOSE_PROJECT_NAME` [environment variable](
|
||||
envvars.md#compose-project-name)
|
||||
|
||||
|
||||
## Where to go next
|
||||
|
||||
* [CLI environment variables](envvars.md)
|
||||
@@ -1,18 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "pause"
|
||||
description = "Pauses running containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, pause"]
|
||||
[menu.main]
|
||||
identifier="pause.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# pause
|
||||
|
||||
```
|
||||
Usage: pause [SERVICE...]
|
||||
```
|
||||
|
||||
Pauses running containers of a service. They can be unpaused with `docker-compose unpause`.
|
||||
@@ -1,23 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "port"
|
||||
description = "Prints the public port for a port binding.s"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, port"]
|
||||
[menu.main]
|
||||
identifier="port.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# port
|
||||
|
||||
```
|
||||
Usage: port [options] SERVICE PRIVATE_PORT
|
||||
|
||||
Options:
|
||||
--protocol=proto tcp or udp [default: tcp]
|
||||
--index=index index of the container if there are multiple
|
||||
instances of a service [default: 1]
|
||||
```
|
||||
|
||||
Prints the public port for a port binding.
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "ps"
|
||||
description = "Lists containers."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, ps"]
|
||||
[menu.main]
|
||||
identifier="ps.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# ps
|
||||
|
||||
```
|
||||
Usage: ps [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-q Only display IDs
|
||||
```
|
||||
|
||||
Lists containers.
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "pull"
|
||||
description = "Pulls service images."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, pull"]
|
||||
[menu.main]
|
||||
identifier="pull.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# pull
|
||||
|
||||
```
|
||||
Usage: pull [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--ignore-pull-failures Pull what it can and ignores images with pull failures.
|
||||
```
|
||||
|
||||
Pulls service images.
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "restart"
|
||||
description = "Restarts Docker Compose services."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, restart"]
|
||||
[menu.main]
|
||||
identifier="restart.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# restart
|
||||
|
||||
```
|
||||
Usage: restart [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10)
|
||||
```
|
||||
|
||||
Restarts services.
|
||||
@@ -1,28 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "rm"
|
||||
description = "Removes stopped service containers."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, rm"]
|
||||
[menu.main]
|
||||
identifier="rm.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# rm
|
||||
|
||||
```
|
||||
Usage: rm [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove volumes associated with containers
|
||||
-a, --all Also remove one-off containers
|
||||
```
|
||||
|
||||
Removes stopped service containers.
|
||||
|
||||
By default, volumes attached to containers will not be removed. You can see all
|
||||
volumes with `docker volume ls`.
|
||||
|
||||
Any data which is not in a volume will be lost.
|
||||
@@ -1,56 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "run"
|
||||
description = "Runs a one-off command on a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, run"]
|
||||
[menu.main]
|
||||
identifier="run.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# run
|
||||
|
||||
```
|
||||
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run container in the background, print
|
||||
new container name.
|
||||
--name NAME Assign a name to the container
|
||||
--entrypoint CMD Override the entrypoint of the image.
|
||||
-e KEY=VAL Set an environment variable (can be used multiple times)
|
||||
-u, --user="" Run as specified username or uid
|
||||
--no-deps Don't start linked services.
|
||||
--rm Remove container after run. Ignored in detached mode.
|
||||
-p, --publish=[] Publish a container's port(s) to the host
|
||||
--service-ports Run command with the service's ports enabled and mapped to the host.
|
||||
-T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.
|
||||
-w, --workdir="" Working directory inside the container
|
||||
```
|
||||
|
||||
Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command.
|
||||
|
||||
$ docker-compose run web bash
|
||||
|
||||
Commands you use with `run` start in new containers with the same configuration as defined by the service' configuration. This means the container has the same volumes, links, as defined in the configuration file. There two differences though.
|
||||
|
||||
First, the command passed by `run` overrides the command defined in the service configuration. For example, if the `web` service configuration is started with `bash`, then `docker-compose run web python app.py` overrides it with `python app.py`.
|
||||
|
||||
The second difference is the `docker-compose run` command does not create any of the ports specified in the service configuration. This prevents the port collisions with already open ports. If you *do want* the service's ports created and mapped to the host, specify the `--service-ports` flag:
|
||||
|
||||
$ docker-compose run --service-ports web python manage.py shell
|
||||
|
||||
Alternatively manual port mapping can be specified. Same as when running Docker's `run` command - using `--publish` or `-p` options:
|
||||
|
||||
$ docker-compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
|
||||
|
||||
If you start a service configured with links, the `run` command first checks to see if the linked service is running and starts the service if it is stopped. Once all the linked services are running, the `run` executes the command you passed it. So, for example, you could run:
|
||||
|
||||
$ docker-compose run db psql -h db -U docker
|
||||
|
||||
This would open up an interactive PostgreSQL shell for the linked `db` container.
|
||||
|
||||
If you do not want the `run` command to start linked containers, specify the `--no-deps` flag:
|
||||
|
||||
$ docker-compose run --no-deps web python manage.py shell
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "scale"
|
||||
description = "Sets the number of containers to run for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, scale"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# scale
|
||||
|
||||
```
|
||||
Usage: scale [SERVICE=NUM...]
|
||||
```
|
||||
|
||||
Sets the number of containers to run for a service.
|
||||
|
||||
Numbers are specified as arguments in the form `service=num`. For example:
|
||||
|
||||
$ docker-compose scale web=2 worker=3
|
||||
@@ -1,18 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "start"
|
||||
description = "Starts existing containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, start"]
|
||||
[menu.main]
|
||||
identifier="start.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# start
|
||||
|
||||
```
|
||||
Usage: start [SERVICE...]
|
||||
```
|
||||
|
||||
Starts existing containers for a service.
|
||||
@@ -1,22 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "stop"
|
||||
description = "Stops running containers without removing them. "
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, stop"]
|
||||
[menu.main]
|
||||
identifier="stop.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# stop
|
||||
|
||||
```
|
||||
Usage: stop [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds (default: 10).
|
||||
```
|
||||
|
||||
Stops running containers without removing them. They can be started again with
|
||||
`docker-compose start`.
|
||||
@@ -1,18 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "unpause"
|
||||
description = "Unpauses paused containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, unpause"]
|
||||
[menu.main]
|
||||
identifier="unpause.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# unpause
|
||||
|
||||
```
|
||||
Usage: unpause [SERVICE...]
|
||||
```
|
||||
|
||||
Unpauses paused containers of a service.
|
||||
@@ -1,55 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "up"
|
||||
description = "Builds, (re)creates, starts, and attaches to containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, up"]
|
||||
[menu.main]
|
||||
identifier="up.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# up
|
||||
|
||||
```
|
||||
Usage: up [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run containers in the background,
|
||||
print new container names.
|
||||
Incompatible with --abort-on-container-exit.
|
||||
--no-color Produce monochrome output.
|
||||
--no-deps Don't start linked services.
|
||||
--force-recreate Recreate containers even if their configuration
|
||||
and image haven't changed.
|
||||
Incompatible with --no-recreate.
|
||||
--no-recreate If containers already exist, don't recreate them.
|
||||
Incompatible with --force-recreate.
|
||||
--no-build Don't build an image, even if it's missing.
|
||||
--build Build images before starting containers.
|
||||
--abort-on-container-exit Stops all containers if any container was stopped.
|
||||
Incompatible with -d.
|
||||
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
|
||||
when attached or when containers are already
|
||||
running. (default: 10)
|
||||
--remove-orphans Remove containers for services not defined in
|
||||
the Compose file
|
||||
|
||||
```
|
||||
|
||||
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||
|
||||
Unless they are already running, this command also starts any linked services.
|
||||
|
||||
The `docker-compose up` command aggregates the output of each container. When
|
||||
the command exits, all containers are stopped. Running `docker-compose up -d`
|
||||
starts the containers in the background and leaves them running.
|
||||
|
||||
If there are existing containers for a service, and the service's configuration
|
||||
or image was changed after the container's creation, `docker-compose up` picks
|
||||
up the changes by stopping and recreating the containers (preserving mounted
|
||||
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
|
||||
flag.
|
||||
|
||||
If you want to force Compose to stop and recreate all containers, use the
|
||||
`--force-recreate` flag.
|
||||
@@ -1,88 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Controlling startup order"
|
||||
description = "How to control service startup order in Docker Compose"
|
||||
keywords = "documentation, docs, docker, compose, startup, order"
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=90
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Controlling startup order in Compose
|
||||
|
||||
You can control the order of service startup with the
|
||||
[depends_on](compose-file.md#depends-on) option. Compose always starts
|
||||
containers in dependency order, where dependencies are determined by
|
||||
`depends_on`, `links`, `volumes_from` and `network_mode: "service:..."`.
|
||||
|
||||
However, Compose will not wait until a container is "ready" (whatever that means
|
||||
for your particular application) - only until it's running. There's a good
|
||||
reason for this.
|
||||
|
||||
The problem of waiting for a database (for example) to be ready is really just
|
||||
a subset of a much larger problem of distributed systems. In production, your
|
||||
database could become unavailable or move hosts at any time. Your application
|
||||
needs to be resilient to these types of failures.
|
||||
|
||||
To handle this, your application should attempt to re-establish a connection to
|
||||
the database after a failure. If the application retries the connection,
|
||||
it should eventually be able to connect to the database.
|
||||
|
||||
The best solution is to perform this check in your application code, both at
|
||||
startup and whenever a connection is lost for any reason. However, if you don't
|
||||
need this level of resilience, you can work around the problem with a wrapper
|
||||
script:
|
||||
|
||||
- Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it)
|
||||
or [dockerize](https://github.com/jwilder/dockerize). These are small
|
||||
wrapper scripts which you can include in your application's image and will
|
||||
poll a given host and port until it's accepting TCP connections.
|
||||
|
||||
Supposing your application's image has a `CMD` set in its Dockerfile, you
|
||||
can wrap it by setting the entrypoint in `docker-compose.yml`:
|
||||
|
||||
version: "2"
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "80:8000"
|
||||
depends_on:
|
||||
- "db"
|
||||
entrypoint: ./wait-for-it.sh db:5432
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
- Write your own wrapper script to perform a more application-specific health
|
||||
check. For example, you might want to wait until Postgres is definitely
|
||||
ready to accept commands:
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
host="$1"
|
||||
shift
|
||||
cmd="$@"
|
||||
|
||||
until psql -h "$host" -U "postgres" -c '\l'; do
|
||||
>&2 echo "Postgres is unavailable - sleeping"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
>&2 echo "Postgres is up - executing command"
|
||||
exec $cmd
|
||||
|
||||
You can use this as a wrapper script as in the previous example, by setting
|
||||
`entrypoint: ./wait-for-postgres.sh db`.
|
||||
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with WordPress](wordpress.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
181
docs/swarm.md
181
docs/swarm.md
@@ -1,181 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Using Compose with Swarm"
|
||||
description = "How to use Compose and Swarm together to deploy apps to multi-host clusters"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers, swarm"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Using Compose with Swarm
|
||||
|
||||
Docker Compose and [Docker Swarm](/swarm/overview) aim to have full integration, meaning
|
||||
you can point a Compose app at a Swarm cluster and have it all just work as if
|
||||
you were using a single Docker host.
|
||||
|
||||
The actual extent of integration depends on which version of the [Compose file
|
||||
format](compose-file.md#versioning) you are using:
|
||||
|
||||
1. If you're using version 1 along with `links`, your app will work, but Swarm
|
||||
will schedule all containers on one host, because links between containers
|
||||
do not work across hosts with the old networking system.
|
||||
|
||||
2. If you're using version 2, your app should work with no changes:
|
||||
|
||||
- subject to the [limitations](#limitations) described below,
|
||||
|
||||
- as long as the Swarm cluster is configured to use the [overlay driver](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network),
|
||||
or a custom driver which supports multi-host networking.
|
||||
|
||||
Read [Get started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to
|
||||
set up a Swarm cluster with [Docker Machine](/machine/overview) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as:
|
||||
|
||||
$ eval "$(docker-machine env --swarm <name of swarm master machine>)"
|
||||
$ docker-compose up
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
### Building images
|
||||
|
||||
Swarm can build an image from a Dockerfile just like a single-host Docker
|
||||
instance can, but the resulting image will only live on a single node and won't
|
||||
be distributed to other nodes.
|
||||
|
||||
If you want to use Compose to scale the service in question to multiple nodes,
|
||||
you'll have to build it yourself, push it to a registry (e.g. the Docker Hub)
|
||||
and reference it from `docker-compose.yml`:
|
||||
|
||||
$ docker build -t myusername/web .
|
||||
$ docker push myusername/web
|
||||
|
||||
$ cat docker-compose.yml
|
||||
web:
|
||||
image: myusername/web
|
||||
|
||||
$ docker-compose up -d
|
||||
$ docker-compose scale web=3
|
||||
|
||||
### Multiple dependencies
|
||||
|
||||
If a service has multiple dependencies of the type which force co-scheduling
|
||||
(see [Automatic scheduling](#automatic-scheduling) below), it's possible that
|
||||
Swarm will schedule the dependencies on different nodes, making the dependent
|
||||
service impossible to schedule. For example, here `foo` needs to be co-scheduled
|
||||
with `bar` and `baz`:
|
||||
|
||||
version: "2"
|
||||
services:
|
||||
foo:
|
||||
image: foo
|
||||
volumes_from: ["bar"]
|
||||
network_mode: "service:baz"
|
||||
bar:
|
||||
image: bar
|
||||
baz:
|
||||
image: baz
|
||||
|
||||
The problem is that Swarm might first schedule `bar` and `baz` on different
|
||||
nodes (since they're not dependent on one another), making it impossible to
|
||||
pick an appropriate node for `foo`.
|
||||
|
||||
To work around this, use [manual scheduling](#manual-scheduling) to ensure that
|
||||
all three services end up on the same node:
|
||||
|
||||
version: "2"
|
||||
services:
|
||||
foo:
|
||||
image: foo
|
||||
volumes_from: ["bar"]
|
||||
network_mode: "service:baz"
|
||||
environment:
|
||||
- "constraint:node==node-1"
|
||||
bar:
|
||||
image: bar
|
||||
environment:
|
||||
- "constraint:node==node-1"
|
||||
baz:
|
||||
image: baz
|
||||
environment:
|
||||
- "constraint:node==node-1"
|
||||
|
||||
### Host ports and recreating containers
|
||||
|
||||
If a service maps a port from the host, e.g. `80:8000`, then you may get an
|
||||
error like this when running `docker-compose up` on it after the first time:
|
||||
|
||||
docker: Error response from daemon: unable to find a node that satisfies
|
||||
container==6ab2dfe36615ae786ef3fc35d641a260e3ea9663d6e69c5b70ce0ca6cb373c02.
|
||||
|
||||
The usual cause of this error is that the container has a volume (defined either
|
||||
in its image or in the Compose file) without an explicit mapping, and so in
|
||||
order to preserve its data, Compose has directed Swarm to schedule the new
|
||||
container on the same node as the old container. This results in a port clash.
|
||||
|
||||
There are two viable workarounds for this problem:
|
||||
|
||||
- Specify a named volume, and use a volume driver which is capable of mounting
|
||||
the volume into the container regardless of what node it's scheduled on.
|
||||
|
||||
Compose does not give Swarm any specific scheduling instructions if a
|
||||
service uses only named volumes.
|
||||
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "80:8000"
|
||||
volumes:
|
||||
- web-logs:/var/log/web
|
||||
|
||||
volumes:
|
||||
web-logs:
|
||||
driver: custom-volume-driver
|
||||
|
||||
- Remove the old container before creating the new one. You will lose any data
|
||||
in the volume.
|
||||
|
||||
$ docker-compose stop web
|
||||
$ docker-compose rm -f web
|
||||
$ docker-compose up web
|
||||
|
||||
|
||||
## Scheduling containers
|
||||
|
||||
### Automatic scheduling
|
||||
|
||||
Some configuration options will result in containers being automatically
|
||||
scheduled on the same Swarm node to ensure that they work correctly. These are:
|
||||
|
||||
- `network_mode: "service:..."` and `network_mode: "container:..."` (and
|
||||
`net: "container:..."` in the version 1 file format).
|
||||
|
||||
- `volumes_from`
|
||||
|
||||
- `links`
|
||||
|
||||
### Manual scheduling
|
||||
|
||||
Swarm offers a rich set of scheduling and affinity hints, enabling you to
|
||||
control where containers are located. They are specified via container
|
||||
environment variables, so you can use Compose's `environment` option to set
|
||||
them.
|
||||
|
||||
# Schedule containers on a specific node
|
||||
environment:
|
||||
- "constraint:node==node-1"
|
||||
|
||||
# Schedule containers on a node that has the 'storage' label set to 'ssd'
|
||||
environment:
|
||||
- "constraint:storage==ssd"
|
||||
|
||||
# Schedule containers where the 'redis' image is already pulled
|
||||
environment:
|
||||
- "affinity:image==redis"
|
||||
|
||||
For the full set of available filters and expressions, see the [Swarm
|
||||
documentation](/swarm/scheduler/filter.md).
|
||||
@@ -1,149 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart: Compose and WordPress"
|
||||
description = "Getting started with Compose and WordPress"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="workw_compose"
|
||||
weight=6
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Quickstart: Docker Compose and WordPress
|
||||
|
||||
You can use Docker Compose to easily run WordPress in an isolated environment built
|
||||
with Docker containers. This quick-start guide demonstrates how to use Compose to set up and run WordPress. Before starting, you'll need to have
|
||||
[Compose installed](install.md).
|
||||
|
||||
## Define the project
|
||||
|
||||
1. Create an empty project directory.
|
||||
|
||||
You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
|
||||
|
||||
This project directory will contain a `Dockerfile`, a `docker-compose.yaml` file, along with a downloaded `wordpress` directory and a custom `wp-config.php`, all of which you will create in the following steps.
|
||||
|
||||
2. Change directories into your project directory.
|
||||
|
||||
For example, if you named your directory `my_wordpress`:
|
||||
|
||||
$ cd my-wordpress/
|
||||
|
||||
3. Create a `Dockerfile`, a file that defines the environment in which your application will run.
|
||||
|
||||
For more information on how to write Dockerfiles, see the [Docker Engine user guide](https://docs.docker.com/engine/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/).
|
||||
|
||||
In this case, your Dockerfile should include these two lines:
|
||||
|
||||
FROM orchardup/php5
|
||||
ADD . /code
|
||||
|
||||
This tells the Docker Engine daemon how to build an image defining a container that contains PHP and WordPress.
|
||||
|
||||
4. Create a `docker-compose.yml` file that will start your web service and a separate MySQL instance:
|
||||
|
||||
version: '2'
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
command: php -S 0.0.0.0:8000 -t /code/wordpress/
|
||||
ports:
|
||||
- "8000:8000"
|
||||
depends_on:
|
||||
- db
|
||||
volumes:
|
||||
- .:/code
|
||||
db:
|
||||
image: orchardup/mysql
|
||||
environment:
|
||||
MYSQL_DATABASE: wordpress
|
||||
|
||||
5. Download WordPress into the current directory:
|
||||
|
||||
$ curl https://wordpress.org/latest.tar.gz | tar -xvzf -
|
||||
|
||||
This creates a directory called `wordpress` in your project directory.
|
||||
|
||||
6. Create a `wp-config.php` file within the `wordpress` directory.
|
||||
|
||||
A supporting file is needed to get this working. At the top level of the wordpress directory, add a new file called `wp-config.php` as shown. This is the standard WordPress config file with a single change to point the database configuration at the `db` container:
|
||||
|
||||
<?php
|
||||
define('DB_NAME', 'wordpress');
|
||||
define('DB_USER', 'root');
|
||||
define('DB_PASSWORD', '');
|
||||
define('DB_HOST', "db:3306");
|
||||
define('DB_CHARSET', 'utf8');
|
||||
define('DB_COLLATE', '');
|
||||
|
||||
define('AUTH_KEY', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_KEY', 'put your unique phrase here');
|
||||
define('LOGGED_IN_KEY', 'put your unique phrase here');
|
||||
define('NONCE_KEY', 'put your unique phrase here');
|
||||
define('AUTH_SALT', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_SALT', 'put your unique phrase here');
|
||||
define('LOGGED_IN_SALT', 'put your unique phrase here');
|
||||
define('NONCE_SALT', 'put your unique phrase here');
|
||||
|
||||
$table_prefix = 'wp_';
|
||||
define('WPLANG', '');
|
||||
define('WP_DEBUG', false);
|
||||
|
||||
if ( !defined('ABSPATH') )
|
||||
define('ABSPATH', dirname(__FILE__) . '/');
|
||||
|
||||
require_once(ABSPATH . 'wp-settings.php');
|
||||
?>
|
||||
|
||||
7. Verify the contents and structure of your project directory.
|
||||
<!--
|
||||
Dockerfile
|
||||
docker-compose.yaml
|
||||
wordpress/
|
||||
index.php
|
||||
license.txt
|
||||
readme.html
|
||||
wp-activate.php
|
||||
wp-admin/
|
||||
wp-blog-header.php
|
||||
wp-comments-post.php
|
||||
wp-config-sample.php
|
||||
wp-config.php
|
||||
wp-content/
|
||||
wp-cron.php
|
||||
wp-includes/
|
||||
wp-links-opml.php
|
||||
wp-load.php
|
||||
wp-login.php
|
||||
wp-mail.php
|
||||
wp-settings.php
|
||||
wp-signup.php
|
||||
wp-trackback.php
|
||||
xmlrpc.php
|
||||
-->
|
||||
|
||||

|
||||
|
||||
### Build the project
|
||||
|
||||
With those four new files in place, run `docker-compose up` from your project directory. This will pull and build the needed images, and then start the web and database containers.
|
||||
|
||||
If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser.
|
||||
|
||||
At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](index.md)
|
||||
- [Installing Compose](install.md)
|
||||
- [Getting Started](gettingstarted.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Command line reference](./reference/index.md)
|
||||
- [Compose file reference](compose-file.md)
|
||||
@@ -1,183 +1,5 @@
|
||||
# Experimental: Compose, Swarm and Multi-Host Networking
|
||||
|
||||
The [experimental build of Docker](https://github.com/docker/docker/tree/master/experimental) has an entirely new networking system, which enables secure communication between containers on multiple hosts. In combination with Docker Swarm and Docker Compose, you can now run multi-container apps on multi-host clusters with the same tooling and configuration format you use to develop them locally.
|
||||
Compose now supports multi-host networking as standard. Read more here:
|
||||
|
||||
> Note: This functionality is in the experimental stage, and contains some hacks and workarounds which will be removed as it matures.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you start, you’ll need to install the experimental build of Docker, and the latest versions of Machine and Compose.
|
||||
|
||||
- To install the experimental Docker build on a Linux machine, follow the instructions [here](https://github.com/docker/docker/tree/master/experimental#install-docker-experimental).
|
||||
|
||||
- To install the experimental Docker build on a Mac, run these commands:
|
||||
|
||||
$ curl -L https://experimental.docker.com/builds/Darwin/x86_64/docker-latest > /usr/local/bin/docker
|
||||
$ chmod +x /usr/local/bin/docker
|
||||
|
||||
- To install Machine, follow the instructions [here](https://docs.docker.com/machine/install-machine/).
|
||||
|
||||
- To install Compose, follow the instructions [here](https://docs.docker.com/compose/install/).
|
||||
|
||||
You’ll also need a [Docker Hub](https://hub.docker.com/account/signup/) account and a [Digital Ocean](https://www.digitalocean.com/) account.
|
||||
|
||||
## Set up a swarm with multi-host networking
|
||||
|
||||
Set the `DIGITALOCEAN_ACCESS_TOKEN` environment variable to a valid Digital Ocean API token, which you can generate in the [API panel](https://cloud.digitalocean.com/settings/applications).
|
||||
|
||||
DIGITALOCEAN_ACCESS_TOKEN=abc12345
|
||||
|
||||
Start a consul server:
|
||||
|
||||
docker-machine create -d digitalocean --engine-install-url https://experimental.docker.com consul
|
||||
docker $(docker-machine config consul) run -d -p 8500:8500 -h consul progrium/consul -server -bootstrap
|
||||
|
||||
(In a real world setting you’d set up a distributed consul, but that’s beyond the scope of this guide!)
|
||||
|
||||
Create a Swarm token:
|
||||
|
||||
SWARM_TOKEN=$(docker run swarm create)
|
||||
|
||||
Create a Swarm master:
|
||||
|
||||
docker-machine create -d digitalocean --swarm --swarm-master --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 swarm-0
|
||||
|
||||
Create a Swarm node:
|
||||
|
||||
docker-machine create -d digitalocean --swarm --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 --engine-label com.docker.network.driver.overlay.neighbor_ip=$(docker-machine ip swarm-0) swarm-1
|
||||
|
||||
You can create more Swarm nodes if you want - it’s best to give them sensible names (swarm-2, swarm-3, etc).
|
||||
|
||||
Finally, point Docker at your swarm:
|
||||
|
||||
eval "$(docker-machine env --swarm swarm-0)"
|
||||
|
||||
## Run containers and get them communicating
|
||||
|
||||
Now that you’ve got a swarm up and running, you can create containers on it just like a single Docker instance:
|
||||
|
||||
$ docker run busybox echo hello world
|
||||
hello world
|
||||
|
||||
If you run `docker ps -a`, you can see what node that container was started on by looking at its name (here it’s swarm-3):
|
||||
|
||||
$ docker ps -a
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
41f59749737b busybox "echo hello world" 15 seconds ago Exited (0) 13 seconds ago swarm-3/trusting_leakey
|
||||
|
||||
As you start more containers, they’ll be placed on different nodes across the cluster, thanks to Swarm’s default “spread” scheduling strategy.
|
||||
|
||||
Every container started on this swarm will use the “overlay:multihost” network by default, meaning they can all intercommunicate. Each container gets an IP address on that network, and an `/etc/hosts` file which will be updated on-the-fly with every other container’s IP address and name. That means that if you have a running container named ‘foo’, other containers can access it at the hostname ‘foo’.
|
||||
|
||||
Let’s verify that multi-host networking is functioning. Start a long-running container:
|
||||
|
||||
$ docker run -d --name long-running busybox top
|
||||
<container id>
|
||||
|
||||
If you start a new container and inspect its /etc/hosts file, you’ll see the long-running container in there:
|
||||
|
||||
$ docker run busybox cat /etc/hosts
|
||||
...
|
||||
172.21.0.6 long-running
|
||||
|
||||
Verify that connectivity works between containers:
|
||||
|
||||
$ docker run busybox ping long-running
|
||||
PING long-running (172.21.0.6): 56 data bytes
|
||||
64 bytes from 172.21.0.6: seq=0 ttl=64 time=7.975 ms
|
||||
64 bytes from 172.21.0.6: seq=1 ttl=64 time=1.378 ms
|
||||
64 bytes from 172.21.0.6: seq=2 ttl=64 time=1.348 ms
|
||||
^C
|
||||
--- long-running ping statistics ---
|
||||
3 packets transmitted, 3 packets received, 0% packet loss
|
||||
round-trip min/avg/max = 1.140/2.099/7.975 ms
|
||||
|
||||
## Run a Compose application
|
||||
|
||||
Here’s an example of a simple Python + Redis app using multi-host networking on a swarm.
|
||||
|
||||
Create a directory for the app:
|
||||
|
||||
$ mkdir composetest
|
||||
$ cd composetest
|
||||
|
||||
Inside this directory, create 2 files.
|
||||
|
||||
First, create `app.py` - a simple web app that uses the Flask framework and increments a value in Redis:
|
||||
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
import os
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host='composetest_redis_1', port=6379)
|
||||
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.' % redis.get('hits')
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
|
||||
Note that we’re connecting to a host called `composetest_redis_1` - this is the name of the Redis container that Compose will start.
|
||||
|
||||
Second, create a Dockerfile for the app container:
|
||||
|
||||
FROM python:2.7
|
||||
RUN pip install flask redis
|
||||
ADD . /code
|
||||
WORKDIR /code
|
||||
CMD ["python", "app.py"]
|
||||
|
||||
Build the Docker image and push it to the Hub (you’ll need a Hub account). Replace `<username>` with your Docker Hub username:
|
||||
|
||||
$ docker build -t <username>/counter .
|
||||
$ docker push <username>/counter
|
||||
|
||||
Next, create a `docker-compose.yml`, which defines the configuration for the web and redis containers. Once again, replace `<username>` with your Hub username:
|
||||
|
||||
web:
|
||||
image: <username>/counter
|
||||
ports:
|
||||
- "80:5000"
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
Now start the app:
|
||||
|
||||
$ docker-compose up -d
|
||||
Pulling web (username/counter:latest)...
|
||||
swarm-0: Pulling username/counter:latest... : downloaded
|
||||
swarm-2: Pulling username/counter:latest... : downloaded
|
||||
swarm-1: Pulling username/counter:latest... : downloaded
|
||||
swarm-3: Pulling username/counter:latest... : downloaded
|
||||
swarm-4: Pulling username/counter:latest... : downloaded
|
||||
Creating composetest_web_1...
|
||||
Pulling redis (redis:latest)...
|
||||
swarm-2: Pulling redis:latest... : downloaded
|
||||
swarm-1: Pulling redis:latest... : downloaded
|
||||
swarm-3: Pulling redis:latest... : downloaded
|
||||
swarm-4: Pulling redis:latest... : downloaded
|
||||
swarm-0: Pulling redis:latest... : downloaded
|
||||
Creating composetest_redis_1...
|
||||
|
||||
Swarm has created containers for both web and redis, and placed them on different nodes, which you can check with `docker ps`:
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
92faad2135c9 redis "/entrypoint.sh redi 43 seconds ago Up 42 seconds swarm-2/composetest_redis_1
|
||||
adb809e5cdac username/counter "/bin/sh -c 'python 55 seconds ago Up 54 seconds 45.67.8.9:80->5000/tcp swarm-1/composetest_web_1
|
||||
|
||||
You can also see that the web container has exposed port 80 on its swarm node. If you curl that IP, you’ll get a response from the container:
|
||||
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 1 times.
|
||||
|
||||
If you hit it repeatedly, the counter will increment, demonstrating that the web and redis container are communicating:
|
||||
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 2 times.
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 3 times.
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 4 times.
|
||||
https://docs.docker.com/compose/networking
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
PyYAML==3.11
|
||||
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
||||
cached-property==1.2.0
|
||||
docker-py==1.8.0
|
||||
docker-py==1.10.6
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.1
|
||||
enum34==1.0.4
|
||||
enum34==1.0.4; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
ipaddress==1.0.16
|
||||
jsonschema==2.5.1
|
||||
requests==2.7.0
|
||||
six==1.7.3
|
||||
pypiwin32==219; sys_platform == 'win32'
|
||||
requests==2.11.1
|
||||
six==1.10.0
|
||||
texttable==0.8.4
|
||||
websocket-client==0.32.0
|
||||
|
||||
@@ -15,10 +15,10 @@ EOM
|
||||
|
||||
[[ -n "$1" ]] || usage
|
||||
PREV_RELEASE=$1
|
||||
VERSION=HEAD
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
URL="https://api.github.com/repos/docker/compose/compare"
|
||||
|
||||
contribs=$(curl -sf "$URL/$PREV_RELEASE...$VERSION" | \
|
||||
contribs=$(curl -sf "$URL/$PREV_RELEASE...$BRANCH" | \
|
||||
jq -r '.commits[].author.login' | \
|
||||
sort | \
|
||||
uniq -c | \
|
||||
|
||||
@@ -65,8 +65,8 @@ git config "branch.${BRANCH}.release" $VERSION
|
||||
|
||||
editor=${EDITOR:-vim}
|
||||
|
||||
echo "Update versions in docs/install.md, compose/__init__.py, script/run/run.sh"
|
||||
$editor docs/install.md
|
||||
echo "Update versions in compose/__init__.py, script/run/run.sh"
|
||||
# $editor docs/install.md
|
||||
$editor compose/__init__.py
|
||||
$editor script/run/run.sh
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.7.0"
|
||||
VERSION="1.9.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
|
||||
@@ -10,13 +10,13 @@ openssl_version() {
|
||||
python -c "import ssl; print ssl.OPENSSL_VERSION"
|
||||
}
|
||||
|
||||
desired_python_version="2.7.9"
|
||||
desired_python_brew_version="2.7.9"
|
||||
python_formula="https://raw.githubusercontent.com/Homebrew/homebrew/1681e193e4d91c9620c4901efd4458d9b6fcda8e/Library/Formula/python.rb"
|
||||
desired_python_version="2.7.12"
|
||||
desired_python_brew_version="2.7.12"
|
||||
python_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/737a2e34a89b213c1f0a2a24fc1a3c06635eed04/Formula/python.rb"
|
||||
|
||||
desired_openssl_version="1.0.1j"
|
||||
desired_openssl_brew_version="1.0.1j_1"
|
||||
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew/62fc2a1a65e83ba9dbb30b2e0a2b7355831c714b/Library/Formula/openssl.rb"
|
||||
desired_openssl_version="1.0.2j"
|
||||
desired_openssl_brew_version="1.0.2j"
|
||||
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ fi
|
||||
|
||||
|
||||
BUILD_NUMBER=${BUILD_NUMBER-$USER}
|
||||
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py34}
|
||||
|
||||
for version in $DOCKER_VERSIONS; do
|
||||
>&2 echo "Running tests against Docker $version"
|
||||
@@ -58,6 +59,6 @@ for version in $DOCKER_VERSIONS; do
|
||||
--env="DOCKER_VERSION=$version" \
|
||||
--entrypoint="tox" \
|
||||
"$TAG" \
|
||||
-e py27,py34 -- "$@"
|
||||
-e "$PY_TEST_VERSIONS" -- "$@"
|
||||
|
||||
done
|
||||
|
||||
@@ -28,6 +28,7 @@ from __future__ import unicode_literals
|
||||
import argparse
|
||||
import itertools
|
||||
import operator
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import requests
|
||||
@@ -103,6 +104,14 @@ def get_default(versions):
|
||||
return version
|
||||
|
||||
|
||||
def get_versions(tags):
|
||||
for tag in tags:
|
||||
try:
|
||||
yield Version.parse(tag['name'])
|
||||
except ValueError:
|
||||
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
|
||||
|
||||
|
||||
def get_github_releases(project):
|
||||
"""Query the Github API for a list of version tags and return them in
|
||||
sorted order.
|
||||
@@ -112,7 +121,7 @@ def get_github_releases(project):
|
||||
url = '{}/{}/tags'.format(GITHUB_API, project)
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
versions = [Version.parse(tag['name']) for tag in response.json()]
|
||||
versions = get_versions(response.json())
|
||||
return sorted(versions, reverse=True, key=operator.attrgetter('order'))
|
||||
|
||||
|
||||
|
||||
@@ -6,5 +6,5 @@ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
tox -e py27,py34 -- tests/unit
|
||||
else
|
||||
# TODO: we could also install py34 and test against it
|
||||
python -m tox -e py27 -- tests/unit
|
||||
tox -e py27 -- tests/unit
|
||||
fi
|
||||
|
||||
@@ -5,5 +5,6 @@ set -ex
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
pip install tox==2.1.1
|
||||
else
|
||||
pip install --user tox==2.1.1
|
||||
sudo pip install --upgrade pip tox==2.1.1 virtualenv
|
||||
pip --version
|
||||
fi
|
||||
|
||||
4
setup.py
4
setup.py
@@ -31,10 +31,10 @@ install_requires = [
|
||||
'cached-property >= 1.2.0, < 2',
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4',
|
||||
'requests >= 2.6.1, < 2.8',
|
||||
'requests >= 2.6.1, != 2.11.0, < 2.12',
|
||||
'texttable >= 0.8.1, < 0.9',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker-py > 1.7.2, < 2',
|
||||
'docker-py >= 1.10.6, < 2.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
@@ -12,6 +12,8 @@ from collections import Counter
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
|
||||
import py
|
||||
import six
|
||||
import yaml
|
||||
from docker import errors
|
||||
|
||||
@@ -22,6 +24,7 @@ from compose.project import OneOffFilter
|
||||
from tests.integration.testcases import DockerClientTestCase
|
||||
from tests.integration.testcases import get_links
|
||||
from tests.integration.testcases import pull_busybox
|
||||
from tests.integration.testcases import v2_1_only
|
||||
from tests.integration.testcases import v2_only
|
||||
|
||||
|
||||
@@ -113,6 +116,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
for n in networks:
|
||||
if n['Name'].startswith('{}_'.format(self.project.name)):
|
||||
self.client.remove_network(n['Name'])
|
||||
if hasattr(self, '_project'):
|
||||
del self._project
|
||||
|
||||
super(CLITestCase, self).tearDown()
|
||||
|
||||
@@ -140,20 +145,36 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
def test_help(self):
|
||||
self.base_dir = 'tests/fixtures/no-composefile'
|
||||
result = self.dispatch(['help', 'up'], returncode=1)
|
||||
assert 'Usage: up [options] [SERVICE...]' in result.stderr
|
||||
result = self.dispatch(['help', 'up'], returncode=0)
|
||||
assert 'Usage: up [options] [SERVICE...]' in result.stdout
|
||||
# Prevent tearDown from trying to create a project
|
||||
self.base_dir = None
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_shorthand_host_opt(self):
|
||||
self.dispatch(
|
||||
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
|
||||
'up', '-d'],
|
||||
returncode=0
|
||||
)
|
||||
|
||||
def test_host_not_reachable(self):
|
||||
result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
|
||||
assert "Couldn't connect to Docker daemon" in result.stderr
|
||||
|
||||
def test_host_not_reachable_volumes_from_container(self):
|
||||
self.base_dir = 'tests/fixtures/volumes-from-container'
|
||||
|
||||
container = self.client.create_container('busybox', 'true', name='composetest_data_container')
|
||||
self.addCleanup(self.client.remove_container, container)
|
||||
|
||||
result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
|
||||
assert "Couldn't connect to Docker daemon" in result.stderr
|
||||
|
||||
def test_config_list_services(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config', '--services'])
|
||||
assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_quiet_with_error(self):
|
||||
self.base_dir = None
|
||||
result = self.dispatch([
|
||||
@@ -162,14 +183,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
], returncode=1)
|
||||
assert "'notaservice' must be a mapping" in result.stderr
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_quiet(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
assert self.dispatch(['config', '-q']).stdout == ''
|
||||
|
||||
# TODO: this shouldn't be v2-dependent
|
||||
@v2_only()
|
||||
def test_config_default(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config'])
|
||||
@@ -198,6 +215,76 @@ class CLITestCase(DockerClientTestCase):
|
||||
}
|
||||
assert output == expected
|
||||
|
||||
def test_config_restart(self):
|
||||
self.base_dir = 'tests/fixtures/restart'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.0',
|
||||
'services': {
|
||||
'never': {
|
||||
'image': 'busybox',
|
||||
'restart': 'no',
|
||||
},
|
||||
'always': {
|
||||
'image': 'busybox',
|
||||
'restart': 'always',
|
||||
},
|
||||
'on-failure': {
|
||||
'image': 'busybox',
|
||||
'restart': 'on-failure',
|
||||
},
|
||||
'on-failure-5': {
|
||||
'image': 'busybox',
|
||||
'restart': 'on-failure:5',
|
||||
},
|
||||
'restart-null': {
|
||||
'image': 'busybox',
|
||||
'restart': ''
|
||||
},
|
||||
},
|
||||
'networks': {},
|
||||
'volumes': {},
|
||||
}
|
||||
|
||||
def test_config_external_network(self):
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
result = self.dispatch(['-f', 'external-networks.yml', 'config'])
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'networks' in json_result
|
||||
assert json_result['networks'] == {
|
||||
'networks_foo': {
|
||||
'external': True # {'name': 'networks_foo'}
|
||||
},
|
||||
'bar': {
|
||||
'external': {'name': 'networks_bar'}
|
||||
}
|
||||
}
|
||||
|
||||
def test_config_v1(self):
|
||||
self.base_dir = 'tests/fixtures/v1-config'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.1',
|
||||
'services': {
|
||||
'net': {
|
||||
'image': 'busybox',
|
||||
'network_mode': 'bridge',
|
||||
},
|
||||
'volume': {
|
||||
'image': 'busybox',
|
||||
'volumes': ['/data:rw'],
|
||||
'network_mode': 'bridge',
|
||||
},
|
||||
'app': {
|
||||
'image': 'busybox',
|
||||
'volumes_from': ['service:volume:rw'],
|
||||
'network_mode': 'service:net',
|
||||
},
|
||||
},
|
||||
'networks': {},
|
||||
'volumes': {},
|
||||
}
|
||||
|
||||
def test_ps(self):
|
||||
self.project.get_service('simple').create_container()
|
||||
result = self.dispatch(['ps'])
|
||||
@@ -243,12 +330,13 @@ class CLITestCase(DockerClientTestCase):
|
||||
def test_pull_with_ignore_pull_failures(self):
|
||||
result = self.dispatch([
|
||||
'-f', 'ignore-pull-failures.yml',
|
||||
'pull', '--ignore-pull-failures'])
|
||||
'pull', '--ignore-pull-failures']
|
||||
)
|
||||
|
||||
assert 'Pulling simple (busybox:latest)...' in result.stderr
|
||||
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
|
||||
assert 'Error: image library/nonexisting-image' in result.stderr
|
||||
assert 'not found' in result.stderr
|
||||
assert ('repository nonexisting-image not found' in result.stderr or
|
||||
'image library/nonexisting-image:latest not found' in result.stderr)
|
||||
|
||||
def test_build_plain(self):
|
||||
self.base_dir = 'tests/fixtures/simple-dockerfile'
|
||||
@@ -313,6 +401,32 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
assert not containers
|
||||
|
||||
def test_bundle_with_digests(self):
|
||||
self.base_dir = 'tests/fixtures/bundle-with-digests/'
|
||||
tmpdir = py.test.ensuretemp('cli_test_bundle')
|
||||
self.addCleanup(tmpdir.remove)
|
||||
filename = str(tmpdir.join('example.dab'))
|
||||
|
||||
self.dispatch(['bundle', '--output', filename])
|
||||
with open(filename, 'r') as fh:
|
||||
bundle = json.load(fh)
|
||||
|
||||
assert bundle == {
|
||||
'Version': '0.1',
|
||||
'Services': {
|
||||
'web': {
|
||||
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
|
||||
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
|
||||
'Networks': ['default'],
|
||||
},
|
||||
'redis': {
|
||||
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
|
||||
'374b2b7392de1e7d77be26ef8f7b'),
|
||||
'Networks': ['default'],
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
self.dispatch(['create'])
|
||||
service = self.project.get_service('simple')
|
||||
@@ -484,6 +598,24 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert 'forward_facing' in front_aliases
|
||||
assert 'ahead' in front_aliases
|
||||
|
||||
@v2_only()
|
||||
def test_up_with_network_internal(self):
|
||||
self.require_api_version('1.23')
|
||||
filename = 'network-internal.yml'
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
self.dispatch(['-f', filename, 'up', '-d'], None)
|
||||
internal_net = '{}_internal'.format(self.project.name)
|
||||
|
||||
networks = [
|
||||
n for n in self.client.networks()
|
||||
if n['Name'].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
# One network was created: internal
|
||||
assert sorted(n['Name'] for n in networks) == [internal_net]
|
||||
|
||||
assert networks[0]['Internal'] is True
|
||||
|
||||
@v2_only()
|
||||
def test_up_with_network_static_addresses(self):
|
||||
filename = 'network-static-addresses.yml'
|
||||
@@ -643,6 +775,46 @@ class CLITestCase(DockerClientTestCase):
|
||||
container = self.project.containers()[0]
|
||||
assert list(container.get('NetworkSettings.Networks')) == [network_name]
|
||||
|
||||
@v2_1_only()
|
||||
def test_up_with_network_labels(self):
|
||||
filename = 'network-label.yml'
|
||||
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
self._project = get_project(self.base_dir, [filename])
|
||||
|
||||
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
|
||||
|
||||
network_with_label = '{}_network_with_label'.format(self.project.name)
|
||||
|
||||
networks = [
|
||||
n for n in self.client.networks()
|
||||
if n['Name'].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
assert [n['Name'] for n in networks] == [network_with_label]
|
||||
|
||||
assert networks[0]['Labels'] == {'label_key': 'label_val'}
|
||||
|
||||
@v2_1_only()
|
||||
def test_up_with_volume_labels(self):
|
||||
filename = 'volume-label.yml'
|
||||
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
self._project = get_project(self.base_dir, [filename])
|
||||
|
||||
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
|
||||
|
||||
volume_with_label = '{}_volume_with_label'.format(self.project.name)
|
||||
|
||||
volumes = [
|
||||
v for v in self.client.volumes().get('Volumes', [])
|
||||
if v['Name'].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
assert [v['Name'] for v in volumes] == [volume_with_label]
|
||||
|
||||
assert volumes[0]['Labels'] == {'label_key': 'label_val'}
|
||||
|
||||
@v2_only()
|
||||
def test_up_no_services(self):
|
||||
self.base_dir = 'tests/fixtures/no-services'
|
||||
@@ -683,9 +855,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
['-f', 'v2-invalid.yml', 'up', '-d'],
|
||||
returncode=1)
|
||||
|
||||
# TODO: fix validation error messages for v2 files
|
||||
# assert "Unsupported config option for service 'web': 'net'" in exc.exconly()
|
||||
assert "Unsupported config option" in result.stderr
|
||||
assert "Unsupported config option for services.bar: 'net'" in result.stderr
|
||||
|
||||
def test_up_with_net_v1(self):
|
||||
self.base_dir = 'tests/fixtures/net-container'
|
||||
@@ -875,16 +1045,54 @@ class CLITestCase(DockerClientTestCase):
|
||||
[u'/bin/true'],
|
||||
)
|
||||
|
||||
def test_run_service_with_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
|
||||
name = 'service'
|
||||
self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld'])
|
||||
service = self.project.get_service(name)
|
||||
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
self.assertEqual(
|
||||
shlex.split(container.human_readable_command),
|
||||
[u'/bin/echo', u'helloworld'],
|
||||
)
|
||||
def test_run_service_with_dockerfile_entrypoint(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['printf']
|
||||
assert container.get('Config.Cmd') == ['default', 'args']
|
||||
|
||||
def test_run_service_with_dockerfile_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert not container.get('Config.Cmd')
|
||||
|
||||
def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['foo']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['printf']
|
||||
assert container.get('Config.Cmd') == ['default', 'args']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert not container.get('Config.Cmd')
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_and_command_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo'])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['foo']
|
||||
|
||||
def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self):
|
||||
self.base_dir = 'tests/fixtures/entrypoint-composefile'
|
||||
self.dispatch(['run', '--entrypoint', 'echo', 'test', ''])
|
||||
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
|
||||
assert container.get('Config.Entrypoint') == ['echo']
|
||||
assert container.get('Config.Cmd') == ['']
|
||||
|
||||
def test_run_service_with_user_overridden(self):
|
||||
self.base_dir = 'tests/fixtures/user-composefile'
|
||||
@@ -1072,7 +1280,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
|
||||
for _, config in networks.items():
|
||||
assert not config['Aliases']
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
@v2_only()
|
||||
def test_run_detached_connects_to_network(self):
|
||||
@@ -1089,7 +1300,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
]
|
||||
|
||||
for _, config in networks.items():
|
||||
assert not config['Aliases']
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
assert self.lookup(container, 'app')
|
||||
assert self.lookup(container, 'db')
|
||||
@@ -1120,6 +1334,35 @@ class CLITestCase(DockerClientTestCase):
|
||||
'simplecomposefile_simple_run_1',
|
||||
'exited'))
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_run_unicode_env_values_from_system(self):
|
||||
value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż'
|
||||
if six.PY2: # os.environ doesn't support unicode values in Py2
|
||||
os.environ['BAR'] = value.encode('utf-8')
|
||||
else: # ... and doesn't support byte values in Py3
|
||||
os.environ['BAR'] = value
|
||||
self.base_dir = 'tests/fixtures/unicode-environment'
|
||||
result = self.dispatch(['run', 'simple'])
|
||||
|
||||
if six.PY2: # Can't retrieve output on Py3. See issue #3670
|
||||
assert value == result.stdout.strip()
|
||||
|
||||
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
|
||||
environment = container.get('Config.Env')
|
||||
assert 'FOO={}'.format(value) in environment
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_run_env_values_from_system(self):
|
||||
os.environ['FOO'] = 'bar'
|
||||
os.environ['BAR'] = 'baz'
|
||||
|
||||
self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None)
|
||||
|
||||
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
|
||||
environment = container.get('Config.Env')
|
||||
assert 'FOO=bar' in environment
|
||||
assert 'BAR=baz' not in environment
|
||||
|
||||
def test_rm(self):
|
||||
service = self.project.get_service('simple')
|
||||
service.create_container()
|
||||
@@ -1143,8 +1386,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
|
||||
self.dispatch(['rm', '-f'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True)), 0)
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
|
||||
self.dispatch(['rm', '-f', '-a'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
|
||||
|
||||
service.create_container(one_off=False)
|
||||
@@ -1257,13 +1498,14 @@ class CLITestCase(DockerClientTestCase):
|
||||
'logscomposefile_another_1',
|
||||
'exited'))
|
||||
|
||||
# sleep for a short period to allow the tailing thread to receive the
|
||||
# event. This is not great, but there isn't an easy way to do this
|
||||
# without being able to stream stdout from the process.
|
||||
time.sleep(0.5)
|
||||
os.kill(proc.pid, signal.SIGINT)
|
||||
result = wait_on_process(proc, returncode=1)
|
||||
self.dispatch(['kill', 'simple'])
|
||||
|
||||
result = wait_on_process(proc)
|
||||
|
||||
assert 'hello' in result.stdout
|
||||
assert 'test' in result.stdout
|
||||
assert 'logscomposefile_another_1 exited with code 0' in result.stdout
|
||||
assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
|
||||
|
||||
def test_logs_default(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
@@ -1425,6 +1667,17 @@ class CLITestCase(DockerClientTestCase):
|
||||
assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2}
|
||||
|
||||
def test_events_human_readable(self):
|
||||
|
||||
def has_timestamp(string):
|
||||
str_iso_date, str_iso_time, container_info = string.split(' ', 2)
|
||||
try:
|
||||
return isinstance(datetime.datetime.strptime(
|
||||
'%s %s' % (str_iso_date, str_iso_time),
|
||||
'%Y-%m-%d %H:%M:%S.%f'),
|
||||
datetime.datetime)
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
events_proc = start_process(self.base_dir, ['events'])
|
||||
self.dispatch(['up', '-d', 'simple'])
|
||||
wait_on_condition(ContainerCountCondition(self.project, 1))
|
||||
@@ -1441,7 +1694,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
assert expected_template.format('create', container.id) in lines[0]
|
||||
assert expected_template.format('start', container.id) in lines[1]
|
||||
assert lines[0].startswith(datetime.date.today().isoformat())
|
||||
|
||||
assert has_timestamp(lines[0])
|
||||
|
||||
def test_env_file_relative_to_compose_file(self):
|
||||
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user