mirror of
https://github.com/docker/compose.git
synced 2026-02-11 02:59:25 +08:00
Compare commits
482 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b24ca75914 | ||
|
|
2b75741e5a | ||
|
|
7ff8c2b224 | ||
|
|
db31adc208 | ||
|
|
805f6a7683 | ||
|
|
d92f323e6d | ||
|
|
cf2dbf55b8 | ||
|
|
8d4c724c2d | ||
|
|
9cb2770da4 | ||
|
|
6a23491fa9 | ||
|
|
294b9742be | ||
|
|
a9b1f15f92 | ||
|
|
28139ab90d | ||
|
|
d0792b49fa | ||
|
|
5548aa5c79 | ||
|
|
16440ff055 | ||
|
|
7850d6de45 | ||
|
|
74b4fb89bb | ||
|
|
22ccf35fa1 | ||
|
|
7ad1fe24bd | ||
|
|
450ba978c1 | ||
|
|
3d6946417d | ||
|
|
31cf63b374 | ||
|
|
5c853c4a2c | ||
|
|
ad922cd7a1 | ||
|
|
49bafdc4cd | ||
|
|
989b2491b9 | ||
|
|
ca2ce3a034 | ||
|
|
dfe9dccab8 | ||
|
|
d456c3909d | ||
|
|
e832e86f09 | ||
|
|
33a108f9bb | ||
|
|
65cc84140a | ||
|
|
227435f613 | ||
|
|
70c7d27c4e | ||
|
|
04a773f1c8 | ||
|
|
f4dac02947 | ||
|
|
2c8aade13e | ||
|
|
119901c19b | ||
|
|
ef44c46c72 | ||
|
|
fc32ccefca | ||
|
|
1739448402 | ||
|
|
620dac53f6 | ||
|
|
dc62279d02 | ||
|
|
4035a203fa | ||
|
|
233c509f71 | ||
|
|
e1c1a4c0aa | ||
|
|
41406cdd68 | ||
|
|
cc0bfea8a1 | ||
|
|
090879b67a | ||
|
|
3ec7124539 | ||
|
|
f721389447 | ||
|
|
da650e9cfd | ||
|
|
38a6209acd | ||
|
|
04b7490ef2 | ||
|
|
d1fdf1b809 | ||
|
|
5c29ded6ac | ||
|
|
85c90daa18 | ||
|
|
4ba9d9dac2 | ||
|
|
4ffae4a1ac | ||
|
|
5f6d0d0824 | ||
|
|
05d8daa8e0 | ||
|
|
9df2be80a8 | ||
|
|
9d9b865796 | ||
|
|
61787fecea | ||
|
|
949dd5b2c7 | ||
|
|
4ca210edd7 | ||
|
|
4b4c4f37bd | ||
|
|
2407193594 | ||
|
|
98eab03c61 | ||
|
|
faa7da6eff | ||
|
|
89f6caf871 | ||
|
|
a3191ab90f | ||
|
|
35092f1d5e | ||
|
|
445fe89fce | ||
|
|
9ffe69a572 | ||
|
|
6ca781416e | ||
|
|
36f4d413fd | ||
|
|
c8643828d2 | ||
|
|
ced94a3504 | ||
|
|
b2cb5a48d2 | ||
|
|
9d0bbdf8dd | ||
|
|
ca298309e0 | ||
|
|
29f51f8216 | ||
|
|
da6cbd4535 | ||
|
|
29ceef6d93 | ||
|
|
8cff440800 | ||
|
|
e5f6ae767d | ||
|
|
c7dccccd1f | ||
|
|
3a775388b2 | ||
|
|
947bce043e | ||
|
|
cd44179305 | ||
|
|
7b9664be8e | ||
|
|
0c5c8d1f00 | ||
|
|
5a46230555 | ||
|
|
ec8bf066ba | ||
|
|
f03dec766d | ||
|
|
35aef1aee0 | ||
|
|
6c7c63ce34 | ||
|
|
e675d67774 | ||
|
|
200b408843 | ||
|
|
03d34336a8 | ||
|
|
847f839cab | ||
|
|
a68ca199a2 | ||
|
|
7937611366 | ||
|
|
b7edb3ca9d | ||
|
|
c3c5b354b8 | ||
|
|
95cf195dbd | ||
|
|
a80afd67ab | ||
|
|
4bc4d273ac | ||
|
|
4911c77134 | ||
|
|
c1b9a76a54 | ||
|
|
c31e25af72 | ||
|
|
c8295d36cc | ||
|
|
b12c29479e | ||
|
|
1e71eebc74 | ||
|
|
fdc34a187e | ||
|
|
2db0a377e2 | ||
|
|
81707ef1ad | ||
|
|
a8ad13734e | ||
|
|
8b033d3946 | ||
|
|
ef8ae07145 | ||
|
|
6f31e8ebe9 | ||
|
|
2bc10db545 | ||
|
|
531265bc84 | ||
|
|
c1223bfd3a | ||
|
|
e9da790f76 | ||
|
|
fc26982132 | ||
|
|
e98caf5cf9 | ||
|
|
20218394b9 | ||
|
|
fc8f564558 | ||
|
|
0fa5808389 | ||
|
|
2fc7cd6e03 | ||
|
|
d90202399a | ||
|
|
f42fd6a3ad | ||
|
|
d85688892c | ||
|
|
bd554a6fea | ||
|
|
317bbec98c | ||
|
|
6e6dbdad95 | ||
|
|
0b887b841f | ||
|
|
c6e03d739d | ||
|
|
6a6e7934bd | ||
|
|
ba71e2a549 | ||
|
|
c51d53afba | ||
|
|
bd7fcd1123 | ||
|
|
254bc4908c | ||
|
|
24c1d95869 | ||
|
|
5e2d43843c | ||
|
|
6e4a954dbd | ||
|
|
31dedd8bdd | ||
|
|
62b47224f0 | ||
|
|
db7e5124f3 | ||
|
|
a6b9982a1e | ||
|
|
40b8c3c892 | ||
|
|
ae96fc0071 | ||
|
|
8709dc3c24 | ||
|
|
a8ea82f78f | ||
|
|
cd2cdb25e3 | ||
|
|
5185c9f70a | ||
|
|
f0dd63d5bc | ||
|
|
9549bd0539 | ||
|
|
f33f673b49 | ||
|
|
8cf84ea552 | ||
|
|
8a47791161 | ||
|
|
ab03f2310b | ||
|
|
94ecc515d3 | ||
|
|
098ec3dcaa | ||
|
|
4e0f4aa20a | ||
|
|
c45c16cead | ||
|
|
d0c499329e | ||
|
|
4c31741ac9 | ||
|
|
a7a0888446 | ||
|
|
4d69a57edd | ||
|
|
3906bd067e | ||
|
|
63941b8f6c | ||
|
|
75600e37ad | ||
|
|
8197d0e261 | ||
|
|
745e838673 | ||
|
|
8346186469 | ||
|
|
9a8020d1bf | ||
|
|
bee65e8354 | ||
|
|
104568b27b | ||
|
|
52975eca6f | ||
|
|
cd47829f3d | ||
|
|
4647875408 | ||
|
|
16213dd493 | ||
|
|
4d4ef4e0b3 | ||
|
|
882ef2ccd8 | ||
|
|
d6cd76c3c1 | ||
|
|
bd0be2cdc7 | ||
|
|
c8751980f9 | ||
|
|
4f83a18912 | ||
|
|
52c19bf96c | ||
|
|
81cbf558c3 | ||
|
|
511fc4a05c | ||
|
|
911cd60360 | ||
|
|
93372dd665 | ||
|
|
c22cc02df5 | ||
|
|
37ee6b0c19 | ||
|
|
efee2df310 | ||
|
|
d0102f0761 | ||
|
|
c3df62472b | ||
|
|
bef0926c58 | ||
|
|
c7c88bb4ff | ||
|
|
5aa82a5519 | ||
|
|
a8d7ebd987 | ||
|
|
00f61196a4 | ||
|
|
c21d6706b6 | ||
|
|
c3c5d91c47 | ||
|
|
8ffeaf2a54 | ||
|
|
ac56ef3d65 | ||
|
|
ae96e1af16 | ||
|
|
4e73e86d94 | ||
|
|
c26b1c8ee9 | ||
|
|
77c939b256 | ||
|
|
b76ac6e633 | ||
|
|
26ea27172e | ||
|
|
7fa4cd1214 | ||
|
|
bc7161b475 | ||
|
|
e0af1a44ea | ||
|
|
bc14c473c9 | ||
|
|
464ab3d727 | ||
|
|
f3df2a9fec | ||
|
|
c421d23c34 | ||
|
|
f353d9fbc0 | ||
|
|
09018855ce | ||
|
|
719954b02f | ||
|
|
67bc3fabe4 | ||
|
|
e724a346c7 | ||
|
|
87b4545b44 | ||
|
|
58a7844129 | ||
|
|
4353f7b9f9 | ||
|
|
8f8693e13e | ||
|
|
363a6563c7 | ||
|
|
59d6af73fa | ||
|
|
cd7f67018e | ||
|
|
b7e8770c4f | ||
|
|
ad4cc5d6df | ||
|
|
ca14ed68f7 | ||
|
|
71514cb380 | ||
|
|
5b2a0cc73d | ||
|
|
acd8dce595 | ||
|
|
b3b44b8e4c | ||
|
|
aaccd12d3d | ||
|
|
b4c49ed805 | ||
|
|
4e108e377e | ||
|
|
5231288b4e | ||
|
|
e40fc02561 | ||
|
|
06db577105 | ||
|
|
c24d5380e6 | ||
|
|
60351a8e07 | ||
|
|
d827809ffb | ||
|
|
77e594dc94 | ||
|
|
08bc4b830b | ||
|
|
4c2112dbfd | ||
|
|
ac222140e7 | ||
|
|
e3525d64b5 | ||
|
|
bd246fb011 | ||
|
|
f31d4c8a93 | ||
|
|
7995fc2ed2 | ||
|
|
a5fd91c705 | ||
|
|
0e9ccd36f3 | ||
|
|
4fd5d58076 | ||
|
|
17e03b29f9 | ||
|
|
e3ba302627 | ||
|
|
87c30ae6e4 | ||
|
|
8212f1bd45 | ||
|
|
95b2eaac04 | ||
|
|
dca3bbdea3 | ||
|
|
8ed7dfef6f | ||
|
|
631f5be02f | ||
|
|
4f4ea2a402 | ||
|
|
5a5bffebd1 | ||
|
|
ff151c8ea0 | ||
|
|
66af37b135 | ||
|
|
c59c9dd951 | ||
|
|
ce880af821 | ||
|
|
5578ccbb01 | ||
|
|
38a11c4c28 | ||
|
|
e2b790f732 | ||
|
|
b6a7db787f | ||
|
|
db2d02dc0b | ||
|
|
f59b43ac27 | ||
|
|
2527ef8055 | ||
|
|
417e6ce0c9 | ||
|
|
7d2a89427c | ||
|
|
036a4c4258 | ||
|
|
cfcc12692f | ||
|
|
be92b79b42 | ||
|
|
417e8d80c3 | ||
|
|
8749bc0844 | ||
|
|
f3d0c63db2 | ||
|
|
93a846db31 | ||
|
|
77409737ce | ||
|
|
2594282082 | ||
|
|
8ad11c0bc8 | ||
|
|
c571bb485d | ||
|
|
8eb65ed946 | ||
|
|
9a0bb325f2 | ||
|
|
d64bf88e26 | ||
|
|
c8e096e089 | ||
|
|
b638728d6c | ||
|
|
1d5526c71d | ||
|
|
bc8d5923e7 | ||
|
|
a6bd1d22a0 | ||
|
|
c128e881c1 | ||
|
|
a15f996744 | ||
|
|
b3c1c9c954 | ||
|
|
ec437313a7 | ||
|
|
5945db0fa8 | ||
|
|
8574cb67a4 | ||
|
|
ae9d619d86 | ||
|
|
686c25d50f | ||
|
|
7d9aa8e0a9 | ||
|
|
d0e87929a1 | ||
|
|
ae63d35660 | ||
|
|
b9c502531d | ||
|
|
ef6555f084 | ||
|
|
1344099e29 | ||
|
|
48f3d41947 | ||
|
|
7da8e6be3b | ||
|
|
4795fd874f | ||
|
|
276fee105b | ||
|
|
8af4ae7935 | ||
|
|
91ceb33d5a | ||
|
|
0b4d9401ee | ||
|
|
889d3636f4 | ||
|
|
b0f945d2da | ||
|
|
93c529182e | ||
|
|
412034a023 | ||
|
|
30c9e7323a | ||
|
|
051f56a1e6 | ||
|
|
b5ce23885b | ||
|
|
0fdb8bf814 | ||
|
|
e538923545 | ||
|
|
c0f65a9f4c | ||
|
|
b0cb31c186 | ||
|
|
3080244c0b | ||
|
|
b183a66db1 | ||
|
|
022f81711e | ||
|
|
4f40d0c168 | ||
|
|
f5ac1fa073 | ||
|
|
f79eb7b9ad | ||
|
|
b0b6ed31c4 | ||
|
|
ea7ee301c0 | ||
|
|
41315b32cb | ||
|
|
80eaf4cc9f | ||
|
|
ef4eb66723 | ||
|
|
82bc7cd5ba | ||
|
|
3304c68891 | ||
|
|
1e6d912fbc | ||
|
|
4ef3bbcdf2 | ||
|
|
62059d55e6 | ||
|
|
ed50a0a3a0 | ||
|
|
28d2aff8b8 | ||
|
|
862971cffa | ||
|
|
c8022457eb | ||
|
|
9bbf1a33d1 | ||
|
|
0ac8c3cb03 | ||
|
|
d5c9626040 | ||
|
|
ad9c5ad938 | ||
|
|
70d2e64dfe | ||
|
|
1dccd58209 | ||
|
|
e0103ac0d4 | ||
|
|
4d745ab87a | ||
|
|
417d9c2d51 | ||
|
|
4997facbb4 | ||
|
|
df87bd91c8 | ||
|
|
1748b0f81a | ||
|
|
6829efd4d3 | ||
|
|
99f2a3a583 | ||
|
|
0f2f9db6d8 | ||
|
|
d6223371d6 | ||
|
|
4817d5944c | ||
|
|
f626fc5ce8 | ||
|
|
1579a125a3 | ||
|
|
7fb9ec29c4 | ||
|
|
f78e89f265 | ||
|
|
b06294399a | ||
|
|
b8e0aed21c | ||
|
|
4bce388b51 | ||
|
|
6c95eed781 | ||
|
|
4f366d8355 | ||
|
|
878d90febf | ||
|
|
1a77feea3f | ||
|
|
7e0ab0714f | ||
|
|
2e6bc078fb | ||
|
|
3dd860f0ba | ||
|
|
de800dea0f | ||
|
|
fed4377ef6 | ||
|
|
021bf46557 | ||
|
|
b7e5116267 | ||
|
|
9532e5a4f2 | ||
|
|
e5a118e3ce | ||
|
|
a631c1eddb | ||
|
|
855855a0e6 | ||
|
|
b808674132 | ||
|
|
7e574fca71 | ||
|
|
7d617d60bc | ||
|
|
da71e01d30 | ||
|
|
a89bc304f6 | ||
|
|
240495f07f | ||
|
|
2e19887bf1 | ||
|
|
a982e516fc | ||
|
|
3af56e1602 | ||
|
|
16f8106149 | ||
|
|
86a08c00f2 | ||
|
|
0ca9fa8b2b | ||
|
|
688f82c1cf | ||
|
|
9a44708081 | ||
|
|
89789c54ad | ||
|
|
d17c4d27fa | ||
|
|
25ee3f0033 | ||
|
|
8098b65576 | ||
|
|
fb81c37ca6 | ||
|
|
e6ec76161d | ||
|
|
b317071cf3 | ||
|
|
bb922d63f5 | ||
|
|
2291fa2d45 | ||
|
|
3c6652c101 | ||
|
|
43af1684c1 | ||
|
|
2cdde099fa | ||
|
|
310c7623f9 | ||
|
|
6e64802545 | ||
|
|
8b5015c10f | ||
|
|
ed549155b3 | ||
|
|
24a6c240fc | ||
|
|
15b763acdb | ||
|
|
3cd116b99d | ||
|
|
b559653c8c | ||
|
|
5f17423d3e | ||
|
|
2a442ec6d9 | ||
|
|
ceff5cb9ca | ||
|
|
4926f8aef6 | ||
|
|
927115c3d4 | ||
|
|
1d7247b67e | ||
|
|
a1cd00e3f0 | ||
|
|
fd568b389d | ||
|
|
4f95e81c6d | ||
|
|
619e783a05 | ||
|
|
f3f7f000fe | ||
|
|
219751abc7 | ||
|
|
0b48e137e8 | ||
|
|
947742852e | ||
|
|
94277a3eb0 | ||
|
|
11a2100d53 | ||
|
|
530d7af5cf | ||
|
|
502d58abe6 | ||
|
|
eb073c53f4 | ||
|
|
d866415b9a | ||
|
|
dd40658f87 | ||
|
|
b3382ffd4f | ||
|
|
15a0fac939 | ||
|
|
e3cff5d17d | ||
|
|
0f70b8638f | ||
|
|
8584525e8d | ||
|
|
e3e2247159 | ||
|
|
0650c4485a | ||
|
|
e708f4f59d | ||
|
|
907918b492 | ||
|
|
6dbe321a45 | ||
|
|
2a415ede08 | ||
|
|
43369cda9c | ||
|
|
a2557a3354 | ||
|
|
1a14449fe6 | ||
|
|
0b89ae6f20 | ||
|
|
cec6dc28bb | ||
|
|
853ce255ea | ||
|
|
db852e14e4 | ||
|
|
98dd0cd1f8 | ||
|
|
c441ac90d6 | ||
|
|
9d7b54d8fd | ||
|
|
59f04c6e29 | ||
|
|
367ae0c848 | ||
|
|
4e0f555c58 | ||
|
|
baf18decae | ||
|
|
826b8ca4d3 | ||
|
|
fa2fb6bd38 | ||
|
|
f9ea5ecf40 | ||
|
|
99f7eba930 | ||
|
|
e1b27acd02 | ||
|
|
f393447ac9 |
@@ -1,2 +1,4 @@
|
||||
.git
|
||||
build
|
||||
dist
|
||||
venv
|
||||
|
||||
132
CHANGES.md
132
CHANGES.md
@@ -1,6 +1,138 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.4.1 (2015-09-10)
|
||||
------------------
|
||||
|
||||
The following bugs have been fixed:
|
||||
|
||||
- Some configuration changes (notably changes to `links`, `volumes_from`, and
|
||||
`net`) were not properly triggering a container recreate as part of
|
||||
`docker-compose up`.
|
||||
- `docker-compose up <service>` was showing logs for all services instead of
|
||||
just the specified services.
|
||||
- Containers with custom container names were showing up in logs as
|
||||
`service_number` instead of their custom container name.
|
||||
- When scaling a service sometimes containers would be recreated even when
|
||||
the configuration had not changed.
|
||||
|
||||
|
||||
1.4.0 (2015-08-04)
|
||||
------------------
|
||||
|
||||
- By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications.
|
||||
|
||||
The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything.
|
||||
|
||||
- Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications.
|
||||
|
||||
- You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container.
|
||||
|
||||
- You no longer have to specify a `file` option when using `extends` - it will default to the current file.
|
||||
|
||||
- Service names can now contain dots, dashes and underscores.
|
||||
|
||||
- Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically:
|
||||
|
||||
$ echo 'redis: {"image": "redis"}' | docker-compose --file - up
|
||||
|
||||
- There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies.
|
||||
|
||||
- `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver.
|
||||
|
||||
- `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`.
|
||||
|
||||
- When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`.
|
||||
|
||||
- You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`.
|
||||
|
||||
- A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect.
|
||||
|
||||
- A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry.
|
||||
|
||||
- A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing.
|
||||
|
||||
- Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash.
|
||||
|
||||
Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden!
|
||||
|
||||
1.3.3 (2015-07-15)
|
||||
------------------
|
||||
|
||||
Two regressions have been fixed:
|
||||
|
||||
- When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time.
|
||||
- Compose would sometimes crash depending on the formatting of container data returned from the Docker API.
|
||||
|
||||
1.3.2 (2015-07-14)
|
||||
------------------
|
||||
|
||||
The following bugs have been fixed:
|
||||
|
||||
- When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them.
|
||||
- Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail.
|
||||
- When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated.
|
||||
- `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated.
|
||||
- `docker-compose up` would sometimes create two containers with the same numeric suffix.
|
||||
- `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed).
|
||||
- Some `docker-compose` commands would not show an error if invalid service names were passed in.
|
||||
|
||||
Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens!
|
||||
|
||||
1.3.1 (2015-06-21)
|
||||
------------------
|
||||
|
||||
The following bugs have been fixed:
|
||||
|
||||
- `docker-compose build` would always attempt to pull the base image before building.
|
||||
- `docker-compose help migrate-to-labels` failed with an error.
|
||||
- If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode.
|
||||
|
||||
1.3.0 (2015-06-18)
|
||||
------------------
|
||||
|
||||
Firstly, two important notes:
|
||||
|
||||
- **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details.
|
||||
|
||||
- Compose now requires Docker 1.6.0 or later.
|
||||
|
||||
We've done a lot of work in this release to remove hacks and make Compose more stable:
|
||||
|
||||
- Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools.
|
||||
|
||||
- Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure.
|
||||
|
||||
There are some new features:
|
||||
|
||||
- `docker-compose up` has an **experimental** new behaviour: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin:
|
||||
|
||||
$ docker-compose up --x-smart-recreate
|
||||
|
||||
- When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`.
|
||||
|
||||
Several new configuration keys have been added to `docker-compose.yml`:
|
||||
|
||||
- `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`.
|
||||
- `labels`, like `docker run --labels`, lets you add custom metadata to containers.
|
||||
- `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file.
|
||||
- `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine.
|
||||
- `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in.
|
||||
- `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only.
|
||||
- `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/reference/run/#security-configuration).
|
||||
- `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/reference/run/#logging-drivers-log-driver).
|
||||
|
||||
Many bugs have been fixed, including the following:
|
||||
|
||||
- The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins.
|
||||
- A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`.
|
||||
- Authenticating against third-party registries would sometimes fail.
|
||||
- `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place.
|
||||
- `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host.
|
||||
- Compose would refuse to create multiple volume entries with the same host path.
|
||||
|
||||
Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily!
|
||||
|
||||
1.2.0 (2015-04-16)
|
||||
------------------
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Contributing to Compose
|
||||
|
||||
Compose is a part of the Docker project, and follows the same rules and principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) to get an overview.
|
||||
Compose is a part of the Docker project, and follows the same rules and
|
||||
principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md)
|
||||
to get an overview.
|
||||
|
||||
## TL;DR
|
||||
|
||||
@@ -17,60 +19,45 @@ If you're looking contribute to Compose
|
||||
but you're new to the project or maybe even to Python, here are the steps
|
||||
that should get you started.
|
||||
|
||||
1. Fork [https://github.com/docker/compose](https://github.com/docker/compose) to your username.
|
||||
1. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`.
|
||||
1. Enter the local directory `cd compose`.
|
||||
1. Set up a development environment by running `python setup.py develop`. This will install the dependencies and set up a symlink from your `docker-compose` executable to the checkout of the repository. When you now run `docker-compose` from anywhere on your machine, it will run your development version of Compose.
|
||||
1. Fork [https://github.com/docker/compose](https://github.com/docker/compose)
|
||||
to your username.
|
||||
2. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`.
|
||||
3. You must [configure a remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) for your fork so that you can [sync changes you make](https://help.github.com/articles/syncing-a-fork/) with the original repository.
|
||||
4. Enter the local directory `cd compose`.
|
||||
5. Set up a development environment by running `python setup.py develop`. This
|
||||
will install the dependencies and set up a symlink from your `docker-compose`
|
||||
executable to the checkout of the repository. When you now run
|
||||
`docker-compose` from anywhere on your machine, it will run your development
|
||||
version of Compose.
|
||||
|
||||
## Submitting a pull request
|
||||
|
||||
See Docker's [basic contribution workflow](https://docs.docker.com/project/make-a-contribution/#the-basic-contribution-workflow) for a guide on how to submit a pull request for code or documentation.
|
||||
|
||||
## Running the test suite
|
||||
|
||||
Use the test script to run linting checks and then the full test suite:
|
||||
Use the test script to run linting checks and then the full test suite against
|
||||
different Python interpreters:
|
||||
|
||||
$ script/test
|
||||
|
||||
Tests are run against a Docker daemon inside a container, so that we can test against multiple Docker versions. By default they'll run against only the latest Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run against all supported versions:
|
||||
Tests are run against a Docker daemon inside a container, so that we can test
|
||||
against multiple Docker versions. By default they'll run against only the latest
|
||||
Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run
|
||||
against all supported versions:
|
||||
|
||||
$ DOCKER_VERSIONS=all script/test
|
||||
|
||||
Arguments to `script/test` are passed through to the `nosetests` executable, so you can specify a test directory, file, module, class or method:
|
||||
Arguments to `script/test` are passed through to the `nosetests` executable, so
|
||||
you can specify a test directory, file, module, class or method:
|
||||
|
||||
$ script/test tests/unit
|
||||
$ script/test tests/unit/cli_test.py
|
||||
$ script/test tests.integration.service_test
|
||||
$ script/test tests.integration.service_test:ServiceTest.test_containers
|
||||
|
||||
## Building binaries
|
||||
## Finding things to work on
|
||||
|
||||
Linux:
|
||||
We use a [Waffle.io board](https://waffle.io/docker/compose) to keep track of specific things we are working on and planning to work on. If you're looking for things to work on, stuff in the backlog is a great place to start.
|
||||
|
||||
$ script/build-linux
|
||||
|
||||
OS X:
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
Note that this only works on Mountain Lion, not Mavericks, due to a [bug in PyInstaller](http://www.pyinstaller.org/ticket/807).
|
||||
|
||||
## Release process
|
||||
|
||||
1. Open pull request that:
|
||||
|
||||
- Updates the version in `compose/__init__.py`
|
||||
- Updates the binary URL in `docs/install.md`
|
||||
- Updates the script URL in `docs/completion.md`
|
||||
- Adds release notes to `CHANGES.md`
|
||||
|
||||
2. Create unpublished GitHub release with release notes
|
||||
|
||||
3. Build Linux version on any Docker host with `script/build-linux` and attach to release
|
||||
|
||||
4. Build OS X version on Mountain Lion with `script/build-osx` and attach to release as `docker-compose-Darwin-x86_64` and `docker-compose-Linux-x86_64`.
|
||||
|
||||
5. Publish GitHub release, creating tag
|
||||
|
||||
6. Update website with `script/deploy-docs`
|
||||
|
||||
7. Upload PyPi package
|
||||
|
||||
$ git checkout $VERSION
|
||||
$ python setup.py sdist upload
|
||||
For more information about our project planning, take a look at our [GitHub wiki](https://github.com/docker/compose/wiki).
|
||||
|
||||
51
Dockerfile
51
Dockerfile
@@ -3,9 +3,11 @@ FROM debian:wheezy
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
python \
|
||||
python-pip \
|
||||
python-dev \
|
||||
gcc \
|
||||
make \
|
||||
zlib1g \
|
||||
zlib1g-dev \
|
||||
libssl-dev \
|
||||
git \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
@@ -15,16 +17,47 @@ RUN set -ex; \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV ALL_DOCKER_VERSIONS 1.3.3 1.4.1 1.5.0
|
||||
# Build Python 2.7.9 from source
|
||||
RUN set -ex; \
|
||||
curl -LO https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz; \
|
||||
tar -xzf Python-2.7.9.tgz; \
|
||||
cd Python-2.7.9; \
|
||||
./configure --enable-shared; \
|
||||
make; \
|
||||
make install; \
|
||||
cd ..; \
|
||||
rm -rf /Python-2.7.9; \
|
||||
rm Python-2.7.9.tgz
|
||||
|
||||
# Make libpython findable
|
||||
ENV LD_LIBRARY_PATH /usr/local/lib
|
||||
|
||||
# Install setuptools
|
||||
RUN set -ex; \
|
||||
curl -LO https://bootstrap.pypa.io/ez_setup.py; \
|
||||
python ez_setup.py; \
|
||||
rm ez_setup.py
|
||||
|
||||
# Install pip
|
||||
RUN set -ex; \
|
||||
curl -LO https://pypi.python.org/packages/source/p/pip/pip-7.0.1.tar.gz; \
|
||||
tar -xzf pip-7.0.1.tar.gz; \
|
||||
cd pip-7.0.1; \
|
||||
python setup.py install; \
|
||||
cd ..; \
|
||||
rm -rf pip-7.0.1; \
|
||||
rm pip-7.0.1.tar.gz
|
||||
|
||||
ENV ALL_DOCKER_VERSIONS 1.7.1 1.8.0-rc3
|
||||
|
||||
RUN set -ex; \
|
||||
for v in ${ALL_DOCKER_VERSIONS}; do \
|
||||
curl https://get.docker.com/builds/Linux/x86_64/docker-$v -o /usr/local/bin/docker-$v; \
|
||||
chmod +x /usr/local/bin/docker-$v; \
|
||||
done
|
||||
curl https://get.docker.com/builds/Linux/x86_64/docker-1.7.1 -o /usr/local/bin/docker-1.7.1; \
|
||||
chmod +x /usr/local/bin/docker-1.7.1; \
|
||||
curl https://test.docker.com/builds/Linux/x86_64/docker-1.8.0-rc3 -o /usr/local/bin/docker-1.8.0-rc3; \
|
||||
chmod +x /usr/local/bin/docker-1.8.0-rc3
|
||||
|
||||
# Set the default Docker to be run
|
||||
RUN ln -s /usr/local/bin/docker-1.3.3 /usr/local/bin/docker
|
||||
RUN ln -s /usr/local/bin/docker-1.7.1 /usr/local/bin/docker
|
||||
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
Aanand Prasad <aanand.prasad@gmail.com> (@aanand)
|
||||
Ben Firshman <ben@firshman.co.uk> (@bfirsh)
|
||||
Daniel Nephin <dnephin@gmail.com> (@dnephin)
|
||||
Mazz Mosley <mazz@houseofmnowster.com> (@mnowster)
|
||||
|
||||
@@ -4,7 +4,7 @@ include requirements.txt
|
||||
include requirements-dev.txt
|
||||
include tox.ini
|
||||
include *.md
|
||||
include contrib/completion/bash/docker-compose
|
||||
recursive-include contrib/completion *
|
||||
recursive-include tests *
|
||||
global-exclude *.pyc
|
||||
global-exclude *.pyo
|
||||
|
||||
62
README.md
62
README.md
@@ -1,45 +1,35 @@
|
||||
Docker Compose
|
||||
==============
|
||||
[](http://jenkins.dockerproject.com/job/Compose%20Master/)
|
||||
*(Previously known as Fig)*
|
||||
|
||||
Compose is a tool for defining and running complex applications with Docker.
|
||||
With Compose, you define a multi-container application in a single file, then
|
||||
spin your application up in a single command which does everything that needs to
|
||||
be done to get it running.
|
||||
Compose is a tool for defining and running multi-container applications with
|
||||
Docker. With Compose, you define a multi-container application in a single
|
||||
file, then spin your application up in a single command which does everything
|
||||
that needs to be done to get it running.
|
||||
|
||||
Compose is great for development environments, staging servers, and CI. We don't
|
||||
recommend that you use it in production yet.
|
||||
|
||||
Using Compose is basically a three-step process.
|
||||
|
||||
First, you define your app's environment with a `Dockerfile` so it can be
|
||||
reproduced anywhere:
|
||||
|
||||
```Dockerfile
|
||||
FROM python:2.7
|
||||
WORKDIR /code
|
||||
ADD requirements.txt /code/
|
||||
RUN pip install -r requirements.txt
|
||||
ADD . /code
|
||||
CMD python app.py
|
||||
```
|
||||
|
||||
Next, you define the services that make up your app in `docker-compose.yml` so
|
||||
1. Define your app's environment with a `Dockerfile` so it can be
|
||||
reproduced anywhere.
|
||||
2. Define the services that make up your app in `docker-compose.yml` so
|
||||
they can be run together in an isolated environment:
|
||||
3. Lastly, run `docker-compose up` and Compose will start and run your entire app.
|
||||
|
||||
```yaml
|
||||
web:
|
||||
build: .
|
||||
links:
|
||||
- db
|
||||
ports:
|
||||
- "8000:8000"
|
||||
db:
|
||||
image: postgres
|
||||
```
|
||||
A `docker-compose.yml` looks like this:
|
||||
|
||||
Lastly, run `docker-compose up` and Compose will start and run your entire app.
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
links:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
@@ -52,4 +42,16 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
- Full documentation is available on [Docker's website](http://docs.docker.com/compose/).
|
||||
- Hop into #docker-compose on Freenode if you have any questions.
|
||||
- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
[](http://jenkins.dockerproject.org/job/Compose%20Master/)
|
||||
|
||||
Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md).
|
||||
|
||||
Releasing
|
||||
---------
|
||||
|
||||
Releases are built by maintainers, following an outline of the [release process](https://github.com/docker/compose/blob/master/RELEASE_PROCESS.md).
|
||||
36
RELEASE_PROCESS.md
Normal file
36
RELEASE_PROCESS.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Building a Compose release
|
||||
|
||||
## Building binaries
|
||||
|
||||
`script/build-linux` builds the Linux binary inside a Docker container:
|
||||
|
||||
$ script/build-linux
|
||||
|
||||
`script/build-osx` builds the Mac OS X binary inside a virtualenv:
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
For official releases, you should build inside a Mountain Lion VM for proper
|
||||
compatibility. Run the this script first to prepare the environment before
|
||||
building - it will use Homebrew to make sure Python is installed and
|
||||
up-to-date.
|
||||
|
||||
$ script/prepare-osx
|
||||
|
||||
## Release process
|
||||
|
||||
1. Open pull request that:
|
||||
- Updates the version in `compose/__init__.py`
|
||||
- Updates the binary URL in `docs/install.md`
|
||||
- Adds release notes to `CHANGES.md`
|
||||
2. Create unpublished GitHub release with release notes
|
||||
3. Build Linux version on any Docker host with `script/build-linux` and attach
|
||||
to release
|
||||
4. Build OS X version on Mountain Lion with `script/build-osx` and attach to
|
||||
release as `docker-compose-Darwin-x86_64` and `docker-compose-Linux-x86_64`.
|
||||
5. Publish GitHub release, creating tag
|
||||
6. Update website with `script/deploy-docs`
|
||||
7. Upload PyPi package
|
||||
|
||||
$ git checkout $VERSION
|
||||
$ python setup.py sdist upload
|
||||
@@ -4,9 +4,12 @@
|
||||
|
||||
Over time we will extend Compose's remit to cover test, staging and production environments. This is not a simple task, and will take many incremental improvements such as:
|
||||
|
||||
- Compose’s brute-force “delete and recreate everything” approach is great for dev and testing, but it not sufficient for production environments. You should be able to define a "desired" state that Compose will intelligently converge to.
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#426](https://github.com/docker/fig/issues/426))
|
||||
- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
|
||||
- It should roll back to a known good state if it fails.
|
||||
- It should allow a user to check the actions it is about to perform before running them.
|
||||
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#1377](https://github.com/docker/compose/issues/1377))
|
||||
- Compose should recommend a technique for zero-downtime deploys.
|
||||
- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
|
||||
|
||||
## Integration with Swarm
|
||||
|
||||
|
||||
56
SWARM.md
56
SWARM.md
@@ -3,49 +3,37 @@ Docker Compose/Swarm integration
|
||||
|
||||
Eventually, Compose and Swarm aim to have full integration, meaning you can point a Compose app at a Swarm cluster and have it all just work as if you were using a single Docker host.
|
||||
|
||||
However, the current extent of integration is minimal: Compose can create containers on a Swarm cluster, but the majority of Compose apps won’t work out of the box unless all containers are scheduled on one host, defeating much of the purpose of using Swarm in the first place.
|
||||
However, integration is currently incomplete: Compose can create containers on a Swarm cluster, but the majority of Compose apps won’t work out of the box unless all containers are scheduled on one host, because links between containers do not work across hosts.
|
||||
|
||||
Still, Compose and Swarm can be useful in a “batch processing” scenario (where a large number of containers need to be spun up and down to do independent computation) or a “shared cluster” scenario (where multiple teams want to deploy apps on a cluster without worrying about where to put them).
|
||||
Docker networking is [getting overhauled](https://github.com/docker/libnetwork) in such a way that it’ll fit the multi-host model much better. For now, linked containers are automatically scheduled on the same host.
|
||||
|
||||
A number of things need to happen before full integration is achieved, which are documented below.
|
||||
Building
|
||||
--------
|
||||
|
||||
Re-deploying containers with `docker-compose up`
|
||||
------------------------------------------------
|
||||
Swarm can build an image from a Dockerfile just like a single-host Docker instance can, but the resulting image will only live on a single node and won't be distributed to other nodes.
|
||||
|
||||
Repeated invocations of `docker-compose up` will not work reliably when used against a Swarm cluster because of an under-the-hood design problem; [this will be fixed](https://github.com/docker/fig/pull/972) in the next version of Compose. For now, containers must be completely removed and re-created:
|
||||
If you want to use Compose to scale the service in question to multiple nodes, you'll have to build it yourself, push it to a registry (e.g. the Docker Hub) and reference it from `docker-compose.yml`:
|
||||
|
||||
$ docker-compose kill
|
||||
$ docker-compose rm --force
|
||||
$ docker-compose up
|
||||
$ docker build -t myusername/web .
|
||||
$ docker push myusername/web
|
||||
|
||||
Links and networking
|
||||
--------------------
|
||||
$ cat docker-compose.yml
|
||||
web:
|
||||
image: myusername/web
|
||||
|
||||
The primary thing stopping multi-container apps from working seamlessly on Swarm is getting them to talk to one another: enabling private communication between containers on different hosts hasn’t been solved in a non-hacky way.
|
||||
$ docker-compose up -d
|
||||
$ docker-compose scale web=3
|
||||
|
||||
Long-term, networking is [getting overhauled](https://github.com/docker/docker/issues/9983) in such a way that it’ll fit the multi-host model much better. For now, containers on different hosts cannot be linked. In the next version of Compose, linked services will be automatically scheduled on the same host; for now, this must be done manually (see “Co-scheduling containers” below).
|
||||
Scheduling
|
||||
----------
|
||||
|
||||
`volumes_from` and `net: container`
|
||||
-----------------------------------
|
||||
Swarm offers a rich set of scheduling and affinity hints, enabling you to control where containers are located. They are specified via container environment variables, so you can use Compose's `environment` option to set them.
|
||||
|
||||
For containers to share volumes or a network namespace, they must be scheduled on the same host - this is, after all, inherent to how both volumes and network namespaces work. In the next version of Compose, this co-scheduling will be automatic whenever `volumes_from` or `net: "container:..."` is specified; for now, containers which share volumes or a network namespace must be co-scheduled manually (see “Co-scheduling containers” below).
|
||||
environment:
|
||||
# Schedule containers on a node that has the 'storage' label set to 'ssd'
|
||||
- "constraint:storage==ssd"
|
||||
|
||||
Co-scheduling containers
|
||||
------------------------
|
||||
# Schedule containers where the 'redis' image is already pulled
|
||||
- "affinity:image==redis"
|
||||
|
||||
For now, containers can be manually scheduled on the same host using Swarm’s [affinity filters](https://github.com/docker/swarm/blob/master/scheduler/filter/README.md#affinity-filter). Here’s a simple example:
|
||||
|
||||
```yaml
|
||||
web:
|
||||
image: my-web-image
|
||||
links: ["db"]
|
||||
environment:
|
||||
- "affinity:container==myproject_db_*"
|
||||
db:
|
||||
image: postgres
|
||||
```
|
||||
|
||||
Here, we express an affinity filter on all web containers, saying that each one must run alongside a container whose name begins with `myproject_db_`.
|
||||
|
||||
- `myproject` is the common prefix Compose gives to all containers in your project, which is either generated from the name of the current directory or specified with `-p` or the `DOCKER_COMPOSE_PROJECT_NAME` environment variable.
|
||||
- `*` is a wildcard, which works just like filename wildcards in a Unix shell.
|
||||
For the full set of available filters and expressions, see the [Swarm documentation](https://docs.docker.com/swarm/scheduler/filter/).
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
from .service import Service # noqa:flake8
|
||||
|
||||
__version__ = '1.2.0'
|
||||
__version__ = '1.4.1'
|
||||
|
||||
@@ -41,7 +41,7 @@ class Command(DocoptCommand):
|
||||
raise errors.ConnectionErrorGeneric(self.get_client().base_url)
|
||||
|
||||
def perform_command(self, options, handler, command_options):
|
||||
if options['COMMAND'] == 'help':
|
||||
if options['COMMAND'] in ('help', 'version'):
|
||||
# Skip looking up the compose file.
|
||||
handler(None, command_options)
|
||||
return
|
||||
@@ -52,7 +52,7 @@ class Command(DocoptCommand):
|
||||
|
||||
explicit_config_path = options.get('--file') or os.environ.get('COMPOSE_FILE') or os.environ.get('FIG_FILE')
|
||||
project = self.get_project(
|
||||
self.get_config_path(explicit_config_path),
|
||||
explicit_config_path,
|
||||
project_name=options.get('--project-name'),
|
||||
verbose=options.get('--verbose'))
|
||||
|
||||
@@ -69,16 +69,18 @@ class Command(DocoptCommand):
|
||||
return verbose_proxy.VerboseProxy('docker', client)
|
||||
return client
|
||||
|
||||
def get_project(self, config_path, project_name=None, verbose=False):
|
||||
def get_project(self, config_path=None, project_name=None, verbose=False):
|
||||
config_details = config.find(self.base_dir, config_path)
|
||||
|
||||
try:
|
||||
return Project.from_dicts(
|
||||
self.get_project_name(config_path, project_name),
|
||||
config.load(config_path),
|
||||
self.get_project_name(config_details.working_dir, project_name),
|
||||
config.load(config_details),
|
||||
self.get_client(verbose=verbose))
|
||||
except ConfigError as e:
|
||||
raise errors.UserError(six.text_type(e))
|
||||
|
||||
def get_project_name(self, config_path, project_name=None):
|
||||
def get_project_name(self, working_dir, project_name=None):
|
||||
def normalize_name(name):
|
||||
return re.sub(r'[^a-z0-9]', '', name.lower())
|
||||
|
||||
@@ -86,48 +88,15 @@ class Command(DocoptCommand):
|
||||
log.warn('The FIG_PROJECT_NAME environment variable is deprecated.')
|
||||
log.warn('Please use COMPOSE_PROJECT_NAME instead.')
|
||||
|
||||
project_name = project_name or os.environ.get('COMPOSE_PROJECT_NAME') or os.environ.get('FIG_PROJECT_NAME')
|
||||
project_name = (
|
||||
project_name or
|
||||
os.environ.get('COMPOSE_PROJECT_NAME') or
|
||||
os.environ.get('FIG_PROJECT_NAME'))
|
||||
if project_name is not None:
|
||||
return normalize_name(project_name)
|
||||
|
||||
project = os.path.basename(os.path.dirname(os.path.abspath(config_path)))
|
||||
project = os.path.basename(os.path.abspath(working_dir))
|
||||
if project:
|
||||
return normalize_name(project)
|
||||
|
||||
return 'default'
|
||||
|
||||
def get_config_path(self, file_path=None):
|
||||
if file_path:
|
||||
return os.path.join(self.base_dir, file_path)
|
||||
|
||||
supported_filenames = [
|
||||
'docker-compose.yml',
|
||||
'docker-compose.yaml',
|
||||
'fig.yml',
|
||||
'fig.yaml',
|
||||
]
|
||||
|
||||
def expand(filename):
|
||||
return os.path.join(self.base_dir, filename)
|
||||
|
||||
candidates = [filename for filename in supported_filenames if os.path.exists(expand(filename))]
|
||||
|
||||
if len(candidates) == 0:
|
||||
raise errors.ComposeFileNotFound(supported_filenames)
|
||||
|
||||
winner = candidates[0]
|
||||
|
||||
if len(candidates) > 1:
|
||||
log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warning("Using %s\n", winner)
|
||||
|
||||
if winner == 'docker-compose.yaml':
|
||||
log.warning("Please be aware that .yml is the expected extension "
|
||||
"in most cases, and using .yaml can cause compatibility "
|
||||
"issues in future.\n")
|
||||
|
||||
if winner.startswith("fig."):
|
||||
log.warning("%s is deprecated and will not be supported in future. "
|
||||
"Please rename your config file to docker-compose.yml\n" % winner)
|
||||
|
||||
return expand(winner)
|
||||
|
||||
@@ -14,6 +14,8 @@ def docker_client():
|
||||
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
|
||||
|
||||
base_url = os.environ.get('DOCKER_HOST')
|
||||
api_version = os.environ.get('COMPOSE_API_VERSION', '1.19')
|
||||
|
||||
tls_config = None
|
||||
|
||||
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
|
||||
@@ -32,4 +34,4 @@ def docker_client():
|
||||
)
|
||||
|
||||
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
|
||||
return Client(base_url=base_url, tls=tls_config, version='1.15', timeout=timeout)
|
||||
return Client(base_url=base_url, tls=tls_config, version=api_version, timeout=timeout)
|
||||
|
||||
@@ -33,10 +33,7 @@ class DocoptCommand(object):
|
||||
if command is None:
|
||||
raise SystemExit(getdoc(self))
|
||||
|
||||
if not hasattr(self, command):
|
||||
raise NoSuchCommand(command, self)
|
||||
|
||||
handler = getattr(self, command)
|
||||
handler = self.get_handler(command)
|
||||
docstring = getdoc(handler)
|
||||
|
||||
if docstring is None:
|
||||
@@ -45,6 +42,14 @@ class DocoptCommand(object):
|
||||
command_options = docopt_full_help(docstring, options['ARGS'], options_first=True)
|
||||
return options, handler, command_options
|
||||
|
||||
def get_handler(self, command):
|
||||
command = command.replace('-', '_')
|
||||
|
||||
if not hasattr(self, command):
|
||||
raise NoSuchCommand(command, self)
|
||||
|
||||
return getattr(self, command)
|
||||
|
||||
|
||||
class NoSuchCommand(Exception):
|
||||
def __init__(self, command, supercommand):
|
||||
|
||||
@@ -53,12 +53,3 @@ class ConnectionErrorGeneric(UserError):
|
||||
|
||||
If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
|
||||
""" % url)
|
||||
|
||||
|
||||
class ComposeFileNotFound(UserError):
|
||||
def __init__(self, supported_filenames):
|
||||
super(ComposeFileNotFound, self).__init__("""
|
||||
Can't find a suitable configuration file. Are you in the right directory?
|
||||
|
||||
Supported filenames: %s
|
||||
""" % ", ".join(supported_filenames))
|
||||
|
||||
@@ -7,7 +7,7 @@ import texttable
|
||||
def get_tty_width():
|
||||
tty_size = os.popen('stty size', 'r').read().split()
|
||||
if len(tty_size) != 2:
|
||||
return 80
|
||||
return 0
|
||||
_, width = tty_size
|
||||
return int(width)
|
||||
|
||||
|
||||
@@ -11,18 +11,26 @@ from docker.errors import APIError
|
||||
import dockerpty
|
||||
|
||||
from .. import __version__
|
||||
from .. import legacy
|
||||
from ..const import DEFAULT_TIMEOUT
|
||||
from ..project import NoSuchService, ConfigurationError
|
||||
from ..service import BuildError, CannotBeScaledError
|
||||
from ..service import BuildError, NeedsBuildError
|
||||
from ..config import parse_environment
|
||||
from ..progress_stream import StreamOutputError
|
||||
from .command import Command
|
||||
from .docopt_command import NoSuchCommand
|
||||
from .errors import UserError
|
||||
from .formatter import Formatter
|
||||
from .log_printer import LogPrinter
|
||||
from .utils import yesno
|
||||
from .utils import yesno, get_version_info
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
INSECURE_SSL_WARNING = """
|
||||
Warning: --allow-insecure-ssl is deprecated and has no effect.
|
||||
It will be removed in a future version of Compose.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
setup_logging()
|
||||
@@ -32,7 +40,7 @@ def main():
|
||||
except KeyboardInterrupt:
|
||||
log.error("\nAborting.")
|
||||
sys.exit(1)
|
||||
except (UserError, NoSuchService, ConfigurationError) as e:
|
||||
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
|
||||
log.error(e.msg)
|
||||
sys.exit(1)
|
||||
except NoSuchCommand as e:
|
||||
@@ -46,6 +54,12 @@ def main():
|
||||
except BuildError as e:
|
||||
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
|
||||
sys.exit(1)
|
||||
except StreamOutputError as e:
|
||||
log.error(e)
|
||||
sys.exit(1)
|
||||
except NeedsBuildError as e:
|
||||
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
@@ -68,38 +82,40 @@ def parse_doc_section(name, source):
|
||||
|
||||
|
||||
class TopLevelCommand(Command):
|
||||
"""Fast, isolated development environments using Docker.
|
||||
"""Define and run multi-container applications with Docker.
|
||||
|
||||
Usage:
|
||||
docker-compose [options] [COMMAND] [ARGS...]
|
||||
docker-compose -h|--help
|
||||
|
||||
Options:
|
||||
--verbose Show more output
|
||||
--version Print version and exit
|
||||
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
|
||||
-p, --project-name NAME Specify an alternate project name (default: directory name)
|
||||
--verbose Show more output
|
||||
-v, --version Print version and exit
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
help Get help on a command
|
||||
kill Kill containers
|
||||
logs View output from containers
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
scale Set number of containers for a service
|
||||
start Start services
|
||||
stop Stop services
|
||||
restart Restart services
|
||||
up Create and start containers
|
||||
build Build or rebuild services
|
||||
help Get help on a command
|
||||
kill Kill containers
|
||||
logs View output from containers
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
restart Restart services
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
scale Set number of containers for a service
|
||||
start Start services
|
||||
stop Stop services
|
||||
up Create and start containers
|
||||
migrate-to-labels Recreate containers to add labels
|
||||
version Show the Docker-Compose version information
|
||||
|
||||
"""
|
||||
def docopt_options(self):
|
||||
options = super(TopLevelCommand, self).docopt_options()
|
||||
options['version'] = "docker-compose %s" % __version__
|
||||
options['version'] = get_version_info('compose')
|
||||
return options
|
||||
|
||||
def build(self, project, options):
|
||||
@@ -108,7 +124,7 @@ class TopLevelCommand(Command):
|
||||
|
||||
Services are built once and then tagged as `project_service`,
|
||||
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
|
||||
contents of its build directory, you can run `compose build` to rebuild it.
|
||||
contents of its build directory, you can run `docker-compose build` to rebuild it.
|
||||
|
||||
Usage: build [options] [SERVICE...]
|
||||
|
||||
@@ -124,10 +140,8 @@ class TopLevelCommand(Command):
|
||||
|
||||
Usage: help COMMAND
|
||||
"""
|
||||
command = options['COMMAND']
|
||||
if not hasattr(self, command):
|
||||
raise NoSuchCommand(command, self)
|
||||
raise SystemExit(getdoc(getattr(self, command)))
|
||||
handler = self.get_handler(options['COMMAND'])
|
||||
raise SystemExit(getdoc(handler))
|
||||
|
||||
def kill(self, project, options):
|
||||
"""
|
||||
@@ -165,13 +179,14 @@ class TopLevelCommand(Command):
|
||||
Usage: port [options] SERVICE PRIVATE_PORT
|
||||
|
||||
Options:
|
||||
--protocol=proto tcp or udp (defaults to tcp)
|
||||
--protocol=proto tcp or udp [default: tcp]
|
||||
--index=index index of the container if there are multiple
|
||||
instances of a service (defaults to 1)
|
||||
instances of a service [default: 1]
|
||||
"""
|
||||
index = int(options.get('--index'))
|
||||
service = project.get_service(options['SERVICE'])
|
||||
try:
|
||||
container = service.get_container(number=options.get('--index') or 1)
|
||||
container = service.get_container(number=index)
|
||||
except ValueError as e:
|
||||
raise UserError(str(e))
|
||||
print(container.get_local_port(
|
||||
@@ -222,13 +237,13 @@ class TopLevelCommand(Command):
|
||||
Usage: pull [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--allow-insecure-ssl Allow insecure connections to the docker
|
||||
registry
|
||||
--allow-insecure-ssl Deprecated - no effect.
|
||||
"""
|
||||
insecure_registry = options['--allow-insecure-ssl']
|
||||
if options['--allow-insecure-ssl']:
|
||||
log.warn(INSECURE_SSL_WARNING)
|
||||
|
||||
project.pull(
|
||||
service_names=options['SERVICE'],
|
||||
insecure_registry=insecure_registry
|
||||
)
|
||||
|
||||
def rm(self, project, options):
|
||||
@@ -270,8 +285,7 @@ class TopLevelCommand(Command):
|
||||
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
|
||||
|
||||
Options:
|
||||
--allow-insecure-ssl Allow insecure connections to the docker
|
||||
registry
|
||||
--allow-insecure-ssl Deprecated - no effect.
|
||||
-d Detached mode: Run container in the background, print
|
||||
new container name.
|
||||
--entrypoint CMD Override the entrypoint of the image.
|
||||
@@ -286,18 +300,17 @@ class TopLevelCommand(Command):
|
||||
"""
|
||||
service = project.get_service(options['SERVICE'])
|
||||
|
||||
insecure_registry = options['--allow-insecure-ssl']
|
||||
if options['--allow-insecure-ssl']:
|
||||
log.warn(INSECURE_SSL_WARNING)
|
||||
|
||||
if not options['--no-deps']:
|
||||
deps = service.get_linked_names()
|
||||
deps = service.get_linked_service_names()
|
||||
|
||||
if len(deps) > 0:
|
||||
project.up(
|
||||
service_names=deps,
|
||||
start_deps=True,
|
||||
recreate=False,
|
||||
insecure_registry=insecure_registry,
|
||||
detach=options['-d']
|
||||
allow_recreate=False,
|
||||
)
|
||||
|
||||
tty = True
|
||||
@@ -317,35 +330,43 @@ class TopLevelCommand(Command):
|
||||
}
|
||||
|
||||
if options['-e']:
|
||||
# Merge environment from config with -e command line
|
||||
container_options['environment'] = dict(
|
||||
parse_environment(service.options.get('environment')),
|
||||
**parse_environment(options['-e']))
|
||||
container_options['environment'] = parse_environment(options['-e'])
|
||||
|
||||
if options['--entrypoint']:
|
||||
container_options['entrypoint'] = options.get('--entrypoint')
|
||||
|
||||
if options['--rm']:
|
||||
container_options['restart'] = None
|
||||
|
||||
if options['--user']:
|
||||
container_options['user'] = options.get('--user')
|
||||
|
||||
if not options['--service-ports']:
|
||||
container_options['ports'] = []
|
||||
|
||||
container = service.create_container(
|
||||
one_off=True,
|
||||
insecure_registry=insecure_registry,
|
||||
**container_options
|
||||
)
|
||||
try:
|
||||
container = service.create_container(
|
||||
quiet=True,
|
||||
one_off=True,
|
||||
**container_options
|
||||
)
|
||||
except APIError as e:
|
||||
legacy.check_for_legacy_containers(
|
||||
project.client,
|
||||
project.name,
|
||||
[service.name],
|
||||
allow_one_off=False,
|
||||
)
|
||||
|
||||
raise e
|
||||
|
||||
if options['-d']:
|
||||
service.start_container(container)
|
||||
print(container.name)
|
||||
else:
|
||||
service.start_container(container)
|
||||
dockerpty.start(project.client, container.id, interactive=not options['-T'])
|
||||
exit_code = container.wait()
|
||||
if options['--rm']:
|
||||
log.info("Removing %s..." % container.name)
|
||||
project.client.remove_container(container.id)
|
||||
sys.exit(exit_code)
|
||||
|
||||
@@ -358,8 +379,14 @@ class TopLevelCommand(Command):
|
||||
|
||||
$ docker-compose scale web=2 worker=3
|
||||
|
||||
Usage: scale [SERVICE=NUM...]
|
||||
Usage: scale [options] [SERVICE=NUM...]
|
||||
|
||||
Options:
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
|
||||
(default: 10)
|
||||
"""
|
||||
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
||||
|
||||
for s in options['SERVICE=NUM']:
|
||||
if '=' not in s:
|
||||
raise UserError('Arguments to scale should be in the form service=num')
|
||||
@@ -369,15 +396,7 @@ class TopLevelCommand(Command):
|
||||
except ValueError:
|
||||
raise UserError('Number of containers for service "%s" is not a '
|
||||
'number' % service_name)
|
||||
try:
|
||||
project.get_service(service_name).scale(num)
|
||||
except CannotBeScaledError:
|
||||
raise UserError(
|
||||
'Service "%s" cannot be scaled because it specifies a port '
|
||||
'on the host. If multiple containers for this service were '
|
||||
'created, the port would clash.\n\nRemove the ":" from the '
|
||||
'port definition in docker-compose.yml so Docker can choose a random '
|
||||
'port for each container.' % service_name)
|
||||
project.get_service(service_name).scale(num, timeout=timeout)
|
||||
|
||||
def start(self, project, options):
|
||||
"""
|
||||
@@ -399,9 +418,8 @@ class TopLevelCommand(Command):
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
|
||||
(default: 10)
|
||||
"""
|
||||
timeout = options.get('--timeout')
|
||||
params = {} if timeout is None else {'timeout': int(timeout)}
|
||||
project.stop(service_names=options['SERVICE'], **params)
|
||||
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
||||
project.stop(service_names=options['SERVICE'], timeout=timeout)
|
||||
|
||||
def restart(self, project, options):
|
||||
"""
|
||||
@@ -413,75 +431,134 @@ class TopLevelCommand(Command):
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
|
||||
(default: 10)
|
||||
"""
|
||||
timeout = options.get('--timeout')
|
||||
params = {} if timeout is None else {'timeout': int(timeout)}
|
||||
project.restart(service_names=options['SERVICE'], **params)
|
||||
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
||||
project.restart(service_names=options['SERVICE'], timeout=timeout)
|
||||
|
||||
def up(self, project, options):
|
||||
"""
|
||||
Build, (re)create, start and attach to containers for a service.
|
||||
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||
|
||||
By default, `docker-compose up` will aggregate the output of each container, and
|
||||
when it exits, all containers will be stopped. If you run `docker-compose up -d`,
|
||||
it'll start the containers in the background and leave them running.
|
||||
Unless they are already running, this command also starts any linked services.
|
||||
|
||||
If there are existing containers for a service, `docker-compose up` will stop
|
||||
and recreate them (preserving mounted volumes with volumes-from),
|
||||
so that changes in `docker-compose.yml` are picked up. If you do not want existing
|
||||
containers to be recreated, `docker-compose up --no-recreate` will re-use existing
|
||||
containers.
|
||||
The `docker-compose up` command aggregates the output of each container. When
|
||||
the command exits, all containers are stopped. Running `docker-compose up -d`
|
||||
starts the containers in the background and leaves them running.
|
||||
|
||||
If there are existing containers for a service, and the service's configuration
|
||||
or image was changed after the container's creation, `docker-compose up` picks
|
||||
up the changes by stopping and recreating the containers (preserving mounted
|
||||
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
|
||||
flag.
|
||||
|
||||
If you want to force Compose to stop and recreate all containers, use the
|
||||
`--force-recreate` flag.
|
||||
|
||||
Usage: up [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--allow-insecure-ssl Allow insecure connections to the docker
|
||||
registry
|
||||
--allow-insecure-ssl Deprecated - no effect.
|
||||
-d Detached mode: Run containers in the background,
|
||||
print new container names.
|
||||
--no-color Produce monochrome output.
|
||||
--no-deps Don't start linked services.
|
||||
--force-recreate Recreate containers even if their configuration and
|
||||
image haven't changed. Incompatible with --no-recreate.
|
||||
--no-recreate If containers already exist, don't recreate them.
|
||||
Incompatible with --force-recreate.
|
||||
--no-build Don't build an image, even if it's missing
|
||||
-t, --timeout TIMEOUT When attached, use this timeout in seconds
|
||||
for the shutdown. (default: 10)
|
||||
|
||||
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
|
||||
when attached or when containers are already
|
||||
running. (default: 10)
|
||||
"""
|
||||
insecure_registry = options['--allow-insecure-ssl']
|
||||
if options['--allow-insecure-ssl']:
|
||||
log.warn(INSECURE_SSL_WARNING)
|
||||
|
||||
detached = options['-d']
|
||||
|
||||
monochrome = options['--no-color']
|
||||
|
||||
start_deps = not options['--no-deps']
|
||||
recreate = not options['--no-recreate']
|
||||
allow_recreate = not options['--no-recreate']
|
||||
force_recreate = options['--force-recreate']
|
||||
service_names = options['SERVICE']
|
||||
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
|
||||
|
||||
project.up(
|
||||
if force_recreate and not allow_recreate:
|
||||
raise UserError("--force-recreate and --no-recreate cannot be combined.")
|
||||
|
||||
to_attach = project.up(
|
||||
service_names=service_names,
|
||||
start_deps=start_deps,
|
||||
recreate=recreate,
|
||||
insecure_registry=insecure_registry,
|
||||
detach=detached,
|
||||
allow_recreate=allow_recreate,
|
||||
force_recreate=force_recreate,
|
||||
do_build=not options['--no-build'],
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
to_attach = [c for s in project.get_services(service_names) for c in s.containers()]
|
||||
|
||||
if not detached:
|
||||
print("Attaching to", list_containers(to_attach))
|
||||
log_printer = LogPrinter(to_attach, attach_params={"logs": True}, monochrome=monochrome)
|
||||
log_printer = build_log_printer(to_attach, service_names, monochrome)
|
||||
attach_to_logs(project, log_printer, service_names, timeout)
|
||||
|
||||
try:
|
||||
log_printer.run()
|
||||
finally:
|
||||
def handler(signal, frame):
|
||||
project.kill(service_names=service_names)
|
||||
sys.exit(0)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
def migrate_to_labels(self, project, _options):
|
||||
"""
|
||||
Recreate containers to add labels
|
||||
|
||||
print("Gracefully stopping... (press Ctrl+C again to force)")
|
||||
timeout = options.get('--timeout')
|
||||
params = {} if timeout is None else {'timeout': int(timeout)}
|
||||
project.stop(service_names=service_names, **params)
|
||||
If you're coming from Compose 1.2 or earlier, you'll need to remove or
|
||||
migrate your existing containers after upgrading Compose. This is
|
||||
because, as of version 1.3, Compose uses Docker labels to keep track
|
||||
of containers, and so they need to be recreated with labels added.
|
||||
|
||||
If Compose detects containers that were created without labels, it
|
||||
will refuse to run so that you don't end up with two sets of them. If
|
||||
you want to keep using your existing containers (for example, because
|
||||
they have data volumes you want to preserve) you can migrate them with
|
||||
the following command:
|
||||
|
||||
docker-compose migrate-to-labels
|
||||
|
||||
Alternatively, if you're not worried about keeping them, you can
|
||||
remove them - Compose will just create new ones.
|
||||
|
||||
docker rm -f myapp_web_1 myapp_db_1 ...
|
||||
|
||||
Usage: migrate-to-labels
|
||||
"""
|
||||
legacy.migrate_project_to_labels(project)
|
||||
|
||||
def version(self, project, options):
|
||||
"""
|
||||
Show version informations
|
||||
|
||||
Usage: version [--short]
|
||||
|
||||
Options:
|
||||
--short Shows only Compose's version number.
|
||||
"""
|
||||
if options['--short']:
|
||||
print(__version__)
|
||||
else:
|
||||
print(get_version_info('full'))
|
||||
|
||||
|
||||
def build_log_printer(containers, service_names, monochrome):
|
||||
return LogPrinter(
|
||||
[c for c in containers if c.service in service_names],
|
||||
attach_params={"logs": True},
|
||||
monochrome=monochrome)
|
||||
|
||||
|
||||
def attach_to_logs(project, log_printer, service_names, timeout):
|
||||
print("Attaching to", list_containers(log_printer.containers))
|
||||
try:
|
||||
log_printer.run()
|
||||
finally:
|
||||
def handler(signal, frame):
|
||||
project.kill(service_names=service_names)
|
||||
sys.exit(0)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
|
||||
print("Gracefully stopping... (press Ctrl+C again to force)")
|
||||
project.stop(service_names=service_names, timeout=timeout)
|
||||
|
||||
|
||||
def list_containers(containers):
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
from .. import __version__
|
||||
import datetime
|
||||
from docker import version as docker_py_version
|
||||
import os
|
||||
import subprocess
|
||||
import platform
|
||||
import subprocess
|
||||
import ssl
|
||||
|
||||
|
||||
def yesno(prompt, default=None):
|
||||
@@ -62,6 +66,25 @@ def mkdir(path, permissions=0o700):
|
||||
return path
|
||||
|
||||
|
||||
def find_candidates_in_parent_dirs(filenames, path):
|
||||
"""
|
||||
Given a directory path to start, looks for filenames in the
|
||||
directory, and then each parent directory successively,
|
||||
until found.
|
||||
|
||||
Returns tuple (candidates, path).
|
||||
"""
|
||||
candidates = [filename for filename in filenames
|
||||
if os.path.exists(os.path.join(path, filename))]
|
||||
|
||||
if len(candidates) == 0:
|
||||
parent_dir = os.path.join(path, '..')
|
||||
if os.path.abspath(parent_dir) != os.path.abspath(path):
|
||||
return find_candidates_in_parent_dirs(filenames, parent_dir)
|
||||
|
||||
return (candidates, path)
|
||||
|
||||
|
||||
def split_buffer(reader, separator):
|
||||
"""
|
||||
Given a generator which yields strings and a separator string,
|
||||
@@ -101,3 +124,16 @@ def is_mac():
|
||||
|
||||
def is_ubuntu():
|
||||
return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
|
||||
|
||||
|
||||
def get_version_info(scope):
|
||||
versioninfo = 'docker-compose version: %s' % __version__
|
||||
if scope == 'compose':
|
||||
return versioninfo
|
||||
elif scope == 'full':
|
||||
return versioninfo + '\n' \
|
||||
+ "docker-py version: %s\n" % docker_py_version \
|
||||
+ "%s version: %s\n" % (platform.python_implementation(), platform.python_version()) \
|
||||
+ "OpenSSL version: %s" % ssl.OPENSSL_VERSION
|
||||
else:
|
||||
raise RuntimeError('passed unallowed value to `cli.utils.get_version_info`')
|
||||
|
||||
@@ -1,31 +1,49 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
from collections import namedtuple
|
||||
|
||||
import six
|
||||
|
||||
from compose.cli.utils import find_candidates_in_parent_dirs
|
||||
|
||||
|
||||
DOCKER_CONFIG_KEYS = [
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
'cpu_shares',
|
||||
'command',
|
||||
'cpu_shares',
|
||||
'cpuset',
|
||||
'detach',
|
||||
'devices',
|
||||
'dns',
|
||||
'dns_search',
|
||||
'domainname',
|
||||
'entrypoint',
|
||||
'env_file',
|
||||
'environment',
|
||||
'extra_hosts',
|
||||
'hostname',
|
||||
'image',
|
||||
'labels',
|
||||
'links',
|
||||
'log_driver',
|
||||
'log_opt',
|
||||
'mac_address',
|
||||
'mem_limit',
|
||||
'memswap_limit',
|
||||
'net',
|
||||
'pid',
|
||||
'ports',
|
||||
'privileged',
|
||||
'read_only',
|
||||
'restart',
|
||||
'security_opt',
|
||||
'stdin_open',
|
||||
'tty',
|
||||
'user',
|
||||
'volume_driver',
|
||||
'volumes',
|
||||
'volumes_from',
|
||||
'working_dir',
|
||||
@@ -33,29 +51,88 @@ DOCKER_CONFIG_KEYS = [
|
||||
|
||||
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
|
||||
'build',
|
||||
'container_name',
|
||||
'dockerfile',
|
||||
'expose',
|
||||
'external_links',
|
||||
'name',
|
||||
]
|
||||
|
||||
DOCKER_CONFIG_HINTS = {
|
||||
'cpu_share' : 'cpu_shares',
|
||||
'link' : 'links',
|
||||
'port' : 'ports',
|
||||
'privilege' : 'privileged',
|
||||
'cpu_share': 'cpu_shares',
|
||||
'add_host': 'extra_hosts',
|
||||
'hosts': 'extra_hosts',
|
||||
'extra_host': 'extra_hosts',
|
||||
'device': 'devices',
|
||||
'link': 'links',
|
||||
'memory_swap': 'memswap_limit',
|
||||
'port': 'ports',
|
||||
'privilege': 'privileged',
|
||||
'priviliged': 'privileged',
|
||||
'privilige' : 'privileged',
|
||||
'volume' : 'volumes',
|
||||
'workdir' : 'working_dir',
|
||||
'privilige': 'privileged',
|
||||
'volume': 'volumes',
|
||||
'workdir': 'working_dir',
|
||||
}
|
||||
|
||||
|
||||
def load(filename):
|
||||
working_dir = os.path.dirname(filename)
|
||||
return from_dictionary(load_yaml(filename), working_dir=working_dir, filename=filename)
|
||||
SUPPORTED_FILENAMES = [
|
||||
'docker-compose.yml',
|
||||
'docker-compose.yaml',
|
||||
'fig.yml',
|
||||
'fig.yaml',
|
||||
]
|
||||
|
||||
|
||||
def from_dictionary(dictionary, working_dir=None, filename=None):
|
||||
PATH_START_CHARS = [
|
||||
'/',
|
||||
'.',
|
||||
'~',
|
||||
]
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ConfigDetails = namedtuple('ConfigDetails', 'config working_dir filename')
|
||||
|
||||
|
||||
def find(base_dir, filename):
|
||||
if filename == '-':
|
||||
return ConfigDetails(yaml.safe_load(sys.stdin), os.getcwd(), None)
|
||||
|
||||
if filename:
|
||||
filename = os.path.join(base_dir, filename)
|
||||
else:
|
||||
filename = get_config_path(base_dir)
|
||||
return ConfigDetails(load_yaml(filename), os.path.dirname(filename), filename)
|
||||
|
||||
|
||||
def get_config_path(base_dir):
|
||||
(candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
|
||||
|
||||
if len(candidates) == 0:
|
||||
raise ComposeFileNotFound(SUPPORTED_FILENAMES)
|
||||
|
||||
winner = candidates[0]
|
||||
|
||||
if len(candidates) > 1:
|
||||
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warn("Using %s\n", winner)
|
||||
|
||||
if winner == 'docker-compose.yaml':
|
||||
log.warn("Please be aware that .yml is the expected extension "
|
||||
"in most cases, and using .yaml can cause compatibility "
|
||||
"issues in future.\n")
|
||||
|
||||
if winner.startswith("fig."):
|
||||
log.warn("%s is deprecated and will not be supported in future. "
|
||||
"Please rename your config file to docker-compose.yml\n" % winner)
|
||||
|
||||
return os.path.join(path, winner)
|
||||
|
||||
|
||||
def load(config_details):
|
||||
dictionary, working_dir, filename = config_details
|
||||
service_dicts = []
|
||||
|
||||
for service_name, service_dict in list(dictionary.items()):
|
||||
@@ -63,25 +140,26 @@ def from_dictionary(dictionary, working_dir=None, filename=None):
|
||||
raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your docker-compose.yml must map to a dictionary of configuration options.' % service_name)
|
||||
loader = ServiceLoader(working_dir=working_dir, filename=filename)
|
||||
service_dict = loader.make_service_dict(service_name, service_dict)
|
||||
validate_paths(service_dict)
|
||||
service_dicts.append(service_dict)
|
||||
|
||||
return service_dicts
|
||||
|
||||
|
||||
def make_service_dict(name, service_dict, working_dir=None):
|
||||
return ServiceLoader(working_dir=working_dir).make_service_dict(name, service_dict)
|
||||
|
||||
|
||||
class ServiceLoader(object):
|
||||
def __init__(self, working_dir, filename=None, already_seen=None):
|
||||
self.working_dir = working_dir
|
||||
self.filename = filename
|
||||
self.working_dir = os.path.abspath(working_dir)
|
||||
if filename:
|
||||
self.filename = os.path.abspath(filename)
|
||||
else:
|
||||
self.filename = filename
|
||||
self.already_seen = already_seen or []
|
||||
|
||||
def make_service_dict(self, name, service_dict):
|
||||
def detect_cycle(self, name):
|
||||
if self.signature(name) in self.already_seen:
|
||||
raise CircularReference(self.already_seen)
|
||||
raise CircularReference(self.already_seen + [self.signature(name)])
|
||||
|
||||
def make_service_dict(self, name, service_dict):
|
||||
service_dict = service_dict.copy()
|
||||
service_dict['name'] = name
|
||||
service_dict = resolve_environment(service_dict, working_dir=self.working_dir)
|
||||
@@ -92,12 +170,17 @@ class ServiceLoader(object):
|
||||
if 'extends' not in service_dict:
|
||||
return service_dict
|
||||
|
||||
extends_options = process_extends_options(service_dict['name'], service_dict['extends'])
|
||||
extends_options = self.validate_extends_options(service_dict['name'], service_dict['extends'])
|
||||
|
||||
if self.working_dir is None:
|
||||
raise Exception("No working_dir passed to ServiceLoader()")
|
||||
|
||||
other_config_path = expand_path(self.working_dir, extends_options['file'])
|
||||
if 'file' in extends_options:
|
||||
extends_from_filename = extends_options['file']
|
||||
other_config_path = expand_path(self.working_dir, extends_from_filename)
|
||||
else:
|
||||
other_config_path = self.filename
|
||||
|
||||
other_working_dir = os.path.dirname(other_config_path)
|
||||
other_already_seen = self.already_seen + [self.signature(service_dict['name'])]
|
||||
other_loader = ServiceLoader(
|
||||
@@ -108,6 +191,7 @@ class ServiceLoader(object):
|
||||
|
||||
other_config = load_yaml(other_config_path)
|
||||
other_service_dict = other_config[extends_options['service']]
|
||||
other_loader.detect_cycle(extends_options['service'])
|
||||
other_service_dict = other_loader.make_service_dict(
|
||||
service_dict['name'],
|
||||
other_service_dict,
|
||||
@@ -123,25 +207,29 @@ class ServiceLoader(object):
|
||||
def signature(self, name):
|
||||
return (self.filename, name)
|
||||
|
||||
def validate_extends_options(self, service_name, extends_options):
|
||||
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
|
||||
|
||||
def process_extends_options(service_name, extends_options):
|
||||
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
|
||||
if not isinstance(extends_options, dict):
|
||||
raise ConfigurationError("%s must be a dictionary" % error_prefix)
|
||||
|
||||
if not isinstance(extends_options, dict):
|
||||
raise ConfigurationError("%s must be a dictionary" % error_prefix)
|
||||
|
||||
if 'service' not in extends_options:
|
||||
raise ConfigurationError(
|
||||
"%s you need to specify a service, e.g. 'service: web'" % error_prefix
|
||||
)
|
||||
|
||||
for k, _ in extends_options.items():
|
||||
if k not in ['file', 'service']:
|
||||
if 'service' not in extends_options:
|
||||
raise ConfigurationError(
|
||||
"%s unsupported configuration option '%s'" % (error_prefix, k)
|
||||
"%s you need to specify a service, e.g. 'service: web'" % error_prefix
|
||||
)
|
||||
|
||||
return extends_options
|
||||
if 'file' not in extends_options and self.filename is None:
|
||||
raise ConfigurationError(
|
||||
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
|
||||
)
|
||||
|
||||
for k, _ in extends_options.items():
|
||||
if k not in ['file', 'service']:
|
||||
raise ConfigurationError(
|
||||
"%s unsupported configuration option '%s'" % (error_prefix, k)
|
||||
)
|
||||
|
||||
return extends_options
|
||||
|
||||
|
||||
def validate_extended_service_dict(service_dict, filename, service):
|
||||
@@ -168,12 +256,18 @@ def process_container_options(service_dict, working_dir=None):
|
||||
|
||||
service_dict = service_dict.copy()
|
||||
|
||||
if 'volumes' in service_dict:
|
||||
service_dict['volumes'] = resolve_host_paths(service_dict['volumes'], working_dir=working_dir)
|
||||
if 'memswap_limit' in service_dict and 'mem_limit' not in service_dict:
|
||||
raise ConfigurationError("Invalid 'memswap_limit' configuration for %s service: when defining 'memswap_limit' you must set 'mem_limit' as well" % service_dict['name'])
|
||||
|
||||
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
|
||||
service_dict['volumes'] = resolve_volume_paths(service_dict, working_dir=working_dir)
|
||||
|
||||
if 'build' in service_dict:
|
||||
service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir)
|
||||
|
||||
if 'labels' in service_dict:
|
||||
service_dict['labels'] = parse_labels(service_dict['labels'])
|
||||
|
||||
return service_dict
|
||||
|
||||
|
||||
@@ -186,10 +280,19 @@ def merge_service_dicts(base, override):
|
||||
override.get('environment'),
|
||||
)
|
||||
|
||||
if 'volumes' in base or 'volumes' in override:
|
||||
d['volumes'] = merge_volumes(
|
||||
base.get('volumes'),
|
||||
override.get('volumes'),
|
||||
path_mapping_keys = ['volumes', 'devices']
|
||||
|
||||
for key in path_mapping_keys:
|
||||
if key in base or key in override:
|
||||
d[key] = merge_path_mappings(
|
||||
base.get(key),
|
||||
override.get(key),
|
||||
)
|
||||
|
||||
if 'labels' in base or 'labels' in override:
|
||||
d['labels'] = merge_labels(
|
||||
base.get('labels'),
|
||||
override.get('labels'),
|
||||
)
|
||||
|
||||
if 'image' in override and 'build' in d:
|
||||
@@ -210,7 +313,7 @@ def merge_service_dicts(base, override):
|
||||
if key in base or key in override:
|
||||
d[key] = to_list(base.get(key)) + to_list(override.get(key))
|
||||
|
||||
already_merged_keys = ['environment', 'volumes'] + list_keys + list_or_string_keys
|
||||
already_merged_keys = ['environment', 'labels'] + path_mapping_keys + list_keys + list_or_string_keys
|
||||
|
||||
for k in set(ALLOWED_KEYS) - set(already_merged_keys):
|
||||
if k in override:
|
||||
@@ -279,7 +382,7 @@ def parse_environment(environment):
|
||||
return dict(split_env(e) for e in environment)
|
||||
|
||||
if isinstance(environment, dict):
|
||||
return environment
|
||||
return dict(environment)
|
||||
|
||||
raise ConfigurationError(
|
||||
"environment \"%s\" must be a list or mapping," %
|
||||
@@ -318,18 +421,33 @@ def env_vars_from_file(filename):
|
||||
return env
|
||||
|
||||
|
||||
def resolve_host_paths(volumes, working_dir=None):
|
||||
def resolve_volume_paths(service_dict, working_dir=None):
|
||||
if working_dir is None:
|
||||
raise Exception("No working_dir passed to resolve_host_paths()")
|
||||
raise Exception("No working_dir passed to resolve_volume_paths()")
|
||||
|
||||
return [resolve_host_path(v, working_dir) for v in volumes]
|
||||
return [
|
||||
resolve_volume_path(v, working_dir, service_dict['name'])
|
||||
for v in service_dict['volumes']
|
||||
]
|
||||
|
||||
|
||||
def resolve_host_path(volume, working_dir):
|
||||
container_path, host_path = split_volume(volume)
|
||||
def resolve_volume_path(volume, working_dir, service_name):
|
||||
container_path, host_path = split_path_mapping(volume)
|
||||
container_path = os.path.expanduser(os.path.expandvars(container_path))
|
||||
|
||||
if host_path is not None:
|
||||
host_path = os.path.expanduser(host_path)
|
||||
host_path = os.path.expandvars(host_path)
|
||||
host_path = os.path.expanduser(os.path.expandvars(host_path))
|
||||
|
||||
if not any(host_path.startswith(c) for c in PATH_START_CHARS):
|
||||
log.warn(
|
||||
'Warning: the mapping "{0}:{1}" in the volumes config for '
|
||||
'service "{2}" is ambiguous. In a future version of Docker, '
|
||||
'it will designate a "named" volume '
|
||||
'(see https://github.com/docker/docker/pull/14242). '
|
||||
'To prevent unexpected behaviour, change it to "./{0}:{1}"'
|
||||
.format(host_path, container_path, service_name)
|
||||
)
|
||||
|
||||
return "%s:%s" % (expand_path(working_dir, host_path), container_path)
|
||||
else:
|
||||
return container_path
|
||||
@@ -338,32 +456,34 @@ def resolve_host_path(volume, working_dir):
|
||||
def resolve_build_path(build_path, working_dir=None):
|
||||
if working_dir is None:
|
||||
raise Exception("No working_dir passed to resolve_build_path")
|
||||
|
||||
_path = expand_path(working_dir, build_path)
|
||||
if not os.path.exists(_path) or not os.access(_path, os.R_OK):
|
||||
raise ConfigurationError("build path %s either does not exist or is not accessible." % _path)
|
||||
else:
|
||||
return _path
|
||||
return expand_path(working_dir, build_path)
|
||||
|
||||
|
||||
def merge_volumes(base, override):
|
||||
d = dict_from_volumes(base)
|
||||
d.update(dict_from_volumes(override))
|
||||
return volumes_from_dict(d)
|
||||
def validate_paths(service_dict):
|
||||
if 'build' in service_dict:
|
||||
build_path = service_dict['build']
|
||||
if not os.path.exists(build_path) or not os.access(build_path, os.R_OK):
|
||||
raise ConfigurationError("build path %s either does not exist or is not accessible." % build_path)
|
||||
|
||||
|
||||
def dict_from_volumes(volumes):
|
||||
if volumes:
|
||||
return dict(split_volume(v) for v in volumes)
|
||||
def merge_path_mappings(base, override):
|
||||
d = dict_from_path_mappings(base)
|
||||
d.update(dict_from_path_mappings(override))
|
||||
return path_mappings_from_dict(d)
|
||||
|
||||
|
||||
def dict_from_path_mappings(path_mappings):
|
||||
if path_mappings:
|
||||
return dict(split_path_mapping(v) for v in path_mappings)
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def volumes_from_dict(d):
|
||||
return [join_volume(v) for v in d.items()]
|
||||
def path_mappings_from_dict(d):
|
||||
return [join_path_mapping(v) for v in d.items()]
|
||||
|
||||
|
||||
def split_volume(string):
|
||||
def split_path_mapping(string):
|
||||
if ':' in string:
|
||||
(host, container) = string.split(':', 1)
|
||||
return (container, host)
|
||||
@@ -371,7 +491,7 @@ def split_volume(string):
|
||||
return (string, None)
|
||||
|
||||
|
||||
def join_volume(pair):
|
||||
def join_path_mapping(pair):
|
||||
(container, host) = pair
|
||||
if host is None:
|
||||
return container
|
||||
@@ -379,6 +499,35 @@ def join_volume(pair):
|
||||
return ":".join((host, container))
|
||||
|
||||
|
||||
def merge_labels(base, override):
|
||||
labels = parse_labels(base)
|
||||
labels.update(parse_labels(override))
|
||||
return labels
|
||||
|
||||
|
||||
def parse_labels(labels):
|
||||
if not labels:
|
||||
return {}
|
||||
|
||||
if isinstance(labels, list):
|
||||
return dict(split_label(e) for e in labels)
|
||||
|
||||
if isinstance(labels, dict):
|
||||
return labels
|
||||
|
||||
raise ConfigurationError(
|
||||
"labels \"%s\" must be a list or mapping" %
|
||||
labels
|
||||
)
|
||||
|
||||
|
||||
def split_label(label):
|
||||
if '=' in label:
|
||||
return label.split('=', 1)
|
||||
else:
|
||||
return label, ''
|
||||
|
||||
|
||||
def expand_path(working_dir, path):
|
||||
return os.path.abspath(os.path.join(working_dir, path))
|
||||
|
||||
@@ -430,3 +579,12 @@ class CircularReference(ConfigurationError):
|
||||
for (filename, service_name) in self.trail
|
||||
]
|
||||
return "Circular reference:\n {}".format("\n extends ".join(lines))
|
||||
|
||||
|
||||
class ComposeFileNotFound(ConfigurationError):
|
||||
def __init__(self, supported_filenames):
|
||||
super(ComposeFileNotFound, self).__init__("""
|
||||
Can't find a suitable configuration file in this directory or any parent. Are you in the right directory?
|
||||
|
||||
Supported filenames: %s
|
||||
""" % ", ".join(supported_filenames))
|
||||
|
||||
8
compose/const.py
Normal file
8
compose/const.py
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
DEFAULT_TIMEOUT = 10
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
|
||||
LABEL_PROJECT = 'com.docker.compose.project'
|
||||
LABEL_SERVICE = 'com.docker.compose.service'
|
||||
LABEL_VERSION = 'com.docker.compose.version'
|
||||
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
|
||||
@@ -4,6 +4,8 @@ from __future__ import absolute_import
|
||||
import six
|
||||
from functools import reduce
|
||||
|
||||
from .const import LABEL_CONTAINER_NUMBER, LABEL_SERVICE
|
||||
|
||||
|
||||
class Container(object):
|
||||
"""
|
||||
@@ -20,10 +22,14 @@ class Container(object):
|
||||
"""
|
||||
Construct a container object from the output of GET /containers/json.
|
||||
"""
|
||||
name = get_container_name(dictionary)
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
new_dictionary = {
|
||||
'Id': dictionary['Id'],
|
||||
'Image': dictionary['Image'],
|
||||
'Name': '/' + get_container_name(dictionary),
|
||||
'Name': '/' + name,
|
||||
}
|
||||
return cls(client, new_dictionary, **kwargs)
|
||||
|
||||
@@ -44,6 +50,10 @@ class Container(object):
|
||||
def image(self):
|
||||
return self.dictionary['Image']
|
||||
|
||||
@property
|
||||
def image_config(self):
|
||||
return self.client.inspect_image(self.image)
|
||||
|
||||
@property
|
||||
def short_id(self):
|
||||
return self.id[:10]
|
||||
@@ -52,16 +62,21 @@ class Container(object):
|
||||
def name(self):
|
||||
return self.dictionary['Name'][1:]
|
||||
|
||||
@property
|
||||
def service(self):
|
||||
return self.labels.get(LABEL_SERVICE)
|
||||
|
||||
@property
|
||||
def name_without_project(self):
|
||||
return '_'.join(self.dictionary['Name'].split('_')[1:])
|
||||
return '{0}_{1}'.format(self.service, self.number)
|
||||
|
||||
@property
|
||||
def number(self):
|
||||
try:
|
||||
return int(self.name.split('_')[-1])
|
||||
except ValueError:
|
||||
return None
|
||||
number = self.labels.get(LABEL_CONTAINER_NUMBER)
|
||||
if not number:
|
||||
raise ValueError("Container {0} does not have a {1} label".format(
|
||||
self.short_id, LABEL_CONTAINER_NUMBER))
|
||||
return int(number)
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
@@ -79,6 +94,14 @@ class Container(object):
|
||||
return ', '.join(format_port(*item)
|
||||
for item in sorted(six.iteritems(self.ports)))
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
return self.get('Config.Labels') or {}
|
||||
|
||||
@property
|
||||
def log_config(self):
|
||||
return self.get('HostConfig.LogConfig') or None
|
||||
|
||||
@property
|
||||
def human_readable_state(self):
|
||||
if self.is_running:
|
||||
@@ -126,8 +149,8 @@ class Container(object):
|
||||
def kill(self, **options):
|
||||
return self.client.kill(self.id, **options)
|
||||
|
||||
def restart(self):
|
||||
return self.client.restart(self.id)
|
||||
def restart(self, **options):
|
||||
return self.client.restart(self.id, **options)
|
||||
|
||||
def remove(self, **options):
|
||||
return self.client.remove_container(self.id, **options)
|
||||
@@ -147,6 +170,7 @@ class Container(object):
|
||||
self.has_been_inspected = True
|
||||
return self.dictionary
|
||||
|
||||
# TODO: only used by tests, move to test module
|
||||
def links(self):
|
||||
links = []
|
||||
for container in self.client.containers():
|
||||
@@ -163,13 +187,16 @@ class Container(object):
|
||||
return self.client.attach_socket(self.id, **kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Container: %s>' % self.name
|
||||
return '<Container: %s (%s)>' % (self.name, self.id[:6])
|
||||
|
||||
def __eq__(self, other):
|
||||
if type(self) != type(other):
|
||||
return False
|
||||
return self.id == other.id
|
||||
|
||||
def __hash__(self):
|
||||
return self.id.__hash__()
|
||||
|
||||
|
||||
def get_container_name(container):
|
||||
if not container.get('Name') and not container.get('Names'):
|
||||
|
||||
180
compose/legacy.py
Normal file
180
compose/legacy.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from .const import LABEL_VERSION
|
||||
from .container import get_container_name, Container
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO: remove this section when migrate_project_to_labels is removed
|
||||
NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
|
||||
|
||||
ERROR_MESSAGE_FORMAT = """
|
||||
Compose found the following containers without labels:
|
||||
|
||||
{names_list}
|
||||
|
||||
As of Compose 1.3.0, containers are identified with labels instead of naming convention. If you want to continue using these containers, run:
|
||||
|
||||
$ docker-compose migrate-to-labels
|
||||
|
||||
Alternatively, remove them:
|
||||
|
||||
$ docker rm -f {rm_args}
|
||||
"""
|
||||
|
||||
ONE_OFF_ADDENDUM_FORMAT = """
|
||||
You should also remove your one-off containers:
|
||||
|
||||
$ docker rm -f {rm_args}
|
||||
"""
|
||||
|
||||
ONE_OFF_ERROR_MESSAGE_FORMAT = """
|
||||
Compose found the following containers without labels:
|
||||
|
||||
{names_list}
|
||||
|
||||
As of Compose 1.3.0, containers are identified with labels instead of naming convention.
|
||||
|
||||
Remove them before continuing:
|
||||
|
||||
$ docker rm -f {rm_args}
|
||||
"""
|
||||
|
||||
|
||||
def check_for_legacy_containers(
|
||||
client,
|
||||
project,
|
||||
services,
|
||||
allow_one_off=True):
|
||||
"""Check if there are containers named using the old naming convention
|
||||
and warn the user that those containers may need to be migrated to
|
||||
using labels, so that compose can find them.
|
||||
"""
|
||||
containers = get_legacy_containers(client, project, services, one_off=False)
|
||||
|
||||
if containers:
|
||||
one_off_containers = get_legacy_containers(client, project, services, one_off=True)
|
||||
|
||||
raise LegacyContainersError(
|
||||
[c.name for c in containers],
|
||||
[c.name for c in one_off_containers],
|
||||
)
|
||||
|
||||
if not allow_one_off:
|
||||
one_off_containers = get_legacy_containers(client, project, services, one_off=True)
|
||||
|
||||
if one_off_containers:
|
||||
raise LegacyOneOffContainersError(
|
||||
[c.name for c in one_off_containers],
|
||||
)
|
||||
|
||||
|
||||
class LegacyError(Exception):
|
||||
def __unicode__(self):
|
||||
return self.msg
|
||||
|
||||
__str__ = __unicode__
|
||||
|
||||
|
||||
class LegacyContainersError(LegacyError):
|
||||
def __init__(self, names, one_off_names):
|
||||
self.names = names
|
||||
self.one_off_names = one_off_names
|
||||
|
||||
self.msg = ERROR_MESSAGE_FORMAT.format(
|
||||
names_list="\n".join(" {}".format(name) for name in names),
|
||||
rm_args=" ".join(names),
|
||||
)
|
||||
|
||||
if one_off_names:
|
||||
self.msg += ONE_OFF_ADDENDUM_FORMAT.format(rm_args=" ".join(one_off_names))
|
||||
|
||||
|
||||
class LegacyOneOffContainersError(LegacyError):
|
||||
def __init__(self, one_off_names):
|
||||
self.one_off_names = one_off_names
|
||||
|
||||
self.msg = ONE_OFF_ERROR_MESSAGE_FORMAT.format(
|
||||
names_list="\n".join(" {}".format(name) for name in one_off_names),
|
||||
rm_args=" ".join(one_off_names),
|
||||
)
|
||||
|
||||
|
||||
def add_labels(project, container):
|
||||
project_name, service_name, one_off, number = NAME_RE.match(container.name).groups()
|
||||
if project_name != project.name or service_name not in project.service_names:
|
||||
return
|
||||
service = project.get_service(service_name)
|
||||
service.recreate_container(container)
|
||||
|
||||
|
||||
def migrate_project_to_labels(project):
|
||||
log.info("Running migration to labels for project %s", project.name)
|
||||
|
||||
containers = get_legacy_containers(
|
||||
project.client,
|
||||
project.name,
|
||||
project.service_names,
|
||||
one_off=False,
|
||||
)
|
||||
|
||||
for container in containers:
|
||||
add_labels(project, container)
|
||||
|
||||
|
||||
def get_legacy_containers(
|
||||
client,
|
||||
project,
|
||||
services,
|
||||
one_off=False):
|
||||
|
||||
return list(_get_legacy_containers_iter(
|
||||
client,
|
||||
project,
|
||||
services,
|
||||
one_off=one_off,
|
||||
))
|
||||
|
||||
|
||||
def _get_legacy_containers_iter(
|
||||
client,
|
||||
project,
|
||||
services,
|
||||
one_off=False):
|
||||
|
||||
containers = client.containers(all=True)
|
||||
|
||||
for service in services:
|
||||
for container in containers:
|
||||
if LABEL_VERSION in (container.get('Labels') or {}):
|
||||
continue
|
||||
|
||||
name = get_container_name(container)
|
||||
if has_container(project, service, name, one_off=one_off):
|
||||
yield Container.from_ps(client, container)
|
||||
|
||||
|
||||
def has_container(project, service, name, one_off=False):
|
||||
if not name or not is_valid_name(name, one_off):
|
||||
return False
|
||||
container_project, container_service, _container_number = parse_name(name)
|
||||
return container_project == project and container_service == service
|
||||
|
||||
|
||||
def is_valid_name(name, one_off=False):
|
||||
match = NAME_RE.match(name)
|
||||
if match is None:
|
||||
return False
|
||||
if one_off:
|
||||
return match.group(3) == 'run_'
|
||||
else:
|
||||
return match.group(3) is None
|
||||
|
||||
|
||||
def parse_name(name):
|
||||
match = NAME_RE.match(name)
|
||||
(project, service_name, _, suffix) = match.groups()
|
||||
return (project, service_name, int(suffix))
|
||||
@@ -74,8 +74,9 @@ def print_output_event(event, stream, is_terminal):
|
||||
stream.write("%s %s%s" % (status, event['progress'], terminator))
|
||||
elif 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total']) * 100
|
||||
total = detail.get('total')
|
||||
if 'current' in detail and total:
|
||||
percentage = float(detail['current']) / float(total) * 100
|
||||
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
|
||||
else:
|
||||
stream.write('%s%s' % (status, terminator))
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from functools import reduce
|
||||
import logging
|
||||
|
||||
from functools import reduce
|
||||
from .config import get_service_name_from_net, ConfigurationError
|
||||
from .service import Service
|
||||
from .container import Container
|
||||
from docker.errors import APIError
|
||||
|
||||
from .config import get_service_name_from_net, ConfigurationError
|
||||
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
|
||||
from .container import Container
|
||||
from .legacy import check_for_legacy_containers
|
||||
from .service import ContainerNet
|
||||
from .service import Net
|
||||
from .service import Service
|
||||
from .service import ServiceNet
|
||||
from .utils import parallel_execute
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -60,6 +67,12 @@ class Project(object):
|
||||
self.services = services
|
||||
self.client = client
|
||||
|
||||
def labels(self, one_off=False):
|
||||
return [
|
||||
'{0}={1}'.format(LABEL_PROJECT, self.name),
|
||||
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def from_dicts(cls, name, service_dicts, client):
|
||||
"""
|
||||
@@ -71,10 +84,20 @@ class Project(object):
|
||||
volumes_from = project.get_volumes_from(service_dict)
|
||||
net = project.get_net(service_dict)
|
||||
|
||||
project.services.append(Service(client=client, project=name, links=links, net=net,
|
||||
volumes_from=volumes_from, **service_dict))
|
||||
project.services.append(
|
||||
Service(
|
||||
client=client,
|
||||
project=name,
|
||||
links=links,
|
||||
net=net,
|
||||
volumes_from=volumes_from,
|
||||
**service_dict))
|
||||
return project
|
||||
|
||||
@property
|
||||
def service_names(self):
|
||||
return [service.name for service in self.services]
|
||||
|
||||
def get_service(self, name):
|
||||
"""
|
||||
Retrieve a service by name. Raises NoSuchService
|
||||
@@ -86,6 +109,16 @@ class Project(object):
|
||||
|
||||
raise NoSuchService(name)
|
||||
|
||||
def validate_service_names(self, service_names):
|
||||
"""
|
||||
Validate that the given list of service names only contains valid
|
||||
services. Raises NoSuchService if one of the names is invalid.
|
||||
"""
|
||||
valid_names = self.service_names
|
||||
for name in service_names:
|
||||
if name not in valid_names:
|
||||
raise NoSuchService(name)
|
||||
|
||||
def get_services(self, service_names=None, include_deps=False):
|
||||
"""
|
||||
Returns a list of this project's services filtered
|
||||
@@ -102,7 +135,7 @@ class Project(object):
|
||||
"""
|
||||
if service_names is None or len(service_names) == 0:
|
||||
return self.get_services(
|
||||
service_names=[s.name for s in self.services],
|
||||
service_names=self.service_names,
|
||||
include_deps=include_deps
|
||||
)
|
||||
else:
|
||||
@@ -148,38 +181,56 @@ class Project(object):
|
||||
return volumes_from
|
||||
|
||||
def get_net(self, service_dict):
|
||||
if 'net' in service_dict:
|
||||
net_name = get_service_name_from_net(service_dict.get('net'))
|
||||
net = service_dict.pop('net', None)
|
||||
if not net:
|
||||
return Net(None)
|
||||
|
||||
if net_name:
|
||||
try:
|
||||
net = self.get_service(net_name)
|
||||
except NoSuchService:
|
||||
try:
|
||||
net = Container.from_id(self.client, net_name)
|
||||
except APIError:
|
||||
raise ConfigurationError('Serivce "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
|
||||
else:
|
||||
net = service_dict['net']
|
||||
net_name = get_service_name_from_net(net)
|
||||
if not net_name:
|
||||
return Net(net)
|
||||
|
||||
del service_dict['net']
|
||||
|
||||
else:
|
||||
net = 'bridge'
|
||||
|
||||
return net
|
||||
try:
|
||||
return ServiceNet(self.get_service(net_name))
|
||||
except NoSuchService:
|
||||
pass
|
||||
try:
|
||||
return ContainerNet(Container.from_id(self.client, net_name))
|
||||
except APIError:
|
||||
raise ConfigurationError(
|
||||
'Service "%s" is trying to use the network of "%s", '
|
||||
'which is not the name of a service or container.' % (
|
||||
service_dict['name'],
|
||||
net_name))
|
||||
|
||||
def start(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
service.start(**options)
|
||||
|
||||
def stop(self, service_names=None, **options):
|
||||
for service in reversed(self.get_services(service_names)):
|
||||
service.stop(**options)
|
||||
parallel_execute(
|
||||
objects=self.containers(service_names),
|
||||
obj_callable=lambda c: c.stop(**options),
|
||||
msg_index=lambda c: c.name,
|
||||
msg="Stopping"
|
||||
)
|
||||
|
||||
def kill(self, service_names=None, **options):
|
||||
for service in reversed(self.get_services(service_names)):
|
||||
service.kill(**options)
|
||||
parallel_execute(
|
||||
objects=self.containers(service_names),
|
||||
obj_callable=lambda c: c.kill(**options),
|
||||
msg_index=lambda c: c.name,
|
||||
msg="Killing"
|
||||
)
|
||||
|
||||
def remove_stopped(self, service_names=None, **options):
|
||||
all_containers = self.containers(service_names, stopped=True)
|
||||
stopped_containers = [c for c in all_containers if not c.is_running]
|
||||
parallel_execute(
|
||||
objects=stopped_containers,
|
||||
obj_callable=lambda c: c.remove(**options),
|
||||
msg_index=lambda c: c.name,
|
||||
msg="Removing"
|
||||
)
|
||||
|
||||
def restart(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
@@ -195,46 +246,99 @@ class Project(object):
|
||||
def up(self,
|
||||
service_names=None,
|
||||
start_deps=True,
|
||||
recreate=True,
|
||||
insecure_registry=False,
|
||||
detach=False,
|
||||
do_build=True):
|
||||
running_containers = []
|
||||
for service in self.get_services(service_names, include_deps=start_deps):
|
||||
if recreate:
|
||||
for (_, container) in service.recreate_containers(
|
||||
insecure_registry=insecure_registry,
|
||||
detach=detach,
|
||||
do_build=do_build):
|
||||
running_containers.append(container)
|
||||
allow_recreate=True,
|
||||
force_recreate=False,
|
||||
do_build=True,
|
||||
timeout=DEFAULT_TIMEOUT):
|
||||
|
||||
if force_recreate and not allow_recreate:
|
||||
raise ValueError("force_recreate and allow_recreate are in conflict")
|
||||
|
||||
services = self.get_services(service_names, include_deps=start_deps)
|
||||
|
||||
for service in services:
|
||||
service.remove_duplicate_containers()
|
||||
|
||||
plans = self._get_convergence_plans(
|
||||
services,
|
||||
allow_recreate=allow_recreate,
|
||||
force_recreate=force_recreate,
|
||||
)
|
||||
|
||||
return [
|
||||
container
|
||||
for service in services
|
||||
for container in service.execute_convergence_plan(
|
||||
plans[service.name],
|
||||
do_build=do_build,
|
||||
timeout=timeout
|
||||
)
|
||||
]
|
||||
|
||||
def _get_convergence_plans(self,
|
||||
services,
|
||||
allow_recreate=True,
|
||||
force_recreate=False):
|
||||
|
||||
plans = {}
|
||||
|
||||
for service in services:
|
||||
updated_dependencies = [
|
||||
name
|
||||
for name in service.get_dependency_names()
|
||||
if name in plans
|
||||
and plans[name].action == 'recreate'
|
||||
]
|
||||
|
||||
if updated_dependencies and allow_recreate:
|
||||
log.debug(
|
||||
'%s has upstream changes (%s)',
|
||||
service.name, ", ".join(updated_dependencies),
|
||||
)
|
||||
plan = service.convergence_plan(
|
||||
allow_recreate=allow_recreate,
|
||||
force_recreate=True,
|
||||
)
|
||||
else:
|
||||
for container in service.start_or_create_containers(
|
||||
insecure_registry=insecure_registry,
|
||||
detach=detach,
|
||||
do_build=do_build):
|
||||
running_containers.append(container)
|
||||
plan = service.convergence_plan(
|
||||
allow_recreate=allow_recreate,
|
||||
force_recreate=force_recreate,
|
||||
)
|
||||
|
||||
return running_containers
|
||||
plans[service.name] = plan
|
||||
|
||||
def pull(self, service_names=None, insecure_registry=False):
|
||||
return plans
|
||||
|
||||
def pull(self, service_names=None):
|
||||
for service in self.get_services(service_names, include_deps=True):
|
||||
service.pull(insecure_registry=insecure_registry)
|
||||
|
||||
def remove_stopped(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
service.remove_stopped(**options)
|
||||
service.pull()
|
||||
|
||||
def containers(self, service_names=None, stopped=False, one_off=False):
|
||||
return [Container.from_ps(self.client, container)
|
||||
for container in self.client.containers(all=stopped)
|
||||
for service in self.get_services(service_names)
|
||||
if service.has_container(container, one_off=one_off)]
|
||||
if service_names:
|
||||
self.validate_service_names(service_names)
|
||||
else:
|
||||
service_names = self.service_names
|
||||
|
||||
containers = filter(None, [
|
||||
Container.from_ps(self.client, container)
|
||||
for container in self.client.containers(
|
||||
all=stopped,
|
||||
filters={'label': self.labels(one_off=one_off)})])
|
||||
|
||||
def matches_service_names(container):
|
||||
return container.labels.get(LABEL_SERVICE) in service_names
|
||||
|
||||
if not containers:
|
||||
check_for_legacy_containers(
|
||||
self.client,
|
||||
self.name,
|
||||
self.service_names,
|
||||
)
|
||||
|
||||
return filter(matches_service_names, containers)
|
||||
|
||||
def _inject_deps(self, acc, service):
|
||||
net_name = service.get_net_name()
|
||||
dep_names = (service.get_linked_names() +
|
||||
service.get_volumes_from_names() +
|
||||
([net_name] if net_name else []))
|
||||
dep_names = service.get_dependency_names()
|
||||
|
||||
if len(dep_names) > 0:
|
||||
dep_services = self.get_services(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
0
compose/state.py
Normal file
0
compose/state.py
Normal file
100
compose/utils.py
Normal file
100
compose/utils.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import codecs
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from docker.errors import APIError
|
||||
from Queue import Queue, Empty
|
||||
from threading import Thread
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def parallel_execute(objects, obj_callable, msg_index, msg):
|
||||
"""
|
||||
For a given list of objects, call the callable passing in the first
|
||||
object we give it.
|
||||
"""
|
||||
stream = codecs.getwriter('utf-8')(sys.stdout)
|
||||
lines = []
|
||||
errors = {}
|
||||
|
||||
for obj in objects:
|
||||
write_out_msg(stream, lines, msg_index(obj), msg)
|
||||
|
||||
q = Queue()
|
||||
|
||||
def inner_execute_function(an_callable, parameter, msg_index):
|
||||
try:
|
||||
result = an_callable(parameter)
|
||||
except APIError as e:
|
||||
errors[msg_index] = e.explanation
|
||||
result = "error"
|
||||
except Exception as e:
|
||||
errors[msg_index] = e
|
||||
result = 'unexpected_exception'
|
||||
|
||||
q.put((msg_index, result))
|
||||
|
||||
for an_object in objects:
|
||||
t = Thread(
|
||||
target=inner_execute_function,
|
||||
args=(obj_callable, an_object, msg_index(an_object)),
|
||||
)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
done = 0
|
||||
total_to_execute = len(objects)
|
||||
|
||||
while done < total_to_execute:
|
||||
try:
|
||||
msg_index, result = q.get(timeout=1)
|
||||
|
||||
if result == 'unexpected_exception':
|
||||
raise errors[msg_index]
|
||||
if result == 'error':
|
||||
write_out_msg(stream, lines, msg_index, msg, status='error')
|
||||
else:
|
||||
write_out_msg(stream, lines, msg_index, msg)
|
||||
done += 1
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
if errors:
|
||||
stream.write("\n")
|
||||
for error in errors:
|
||||
stream.write("ERROR: for {} {} \n".format(error, errors[error]))
|
||||
|
||||
|
||||
def write_out_msg(stream, lines, msg_index, msg, status="done"):
|
||||
"""
|
||||
Using special ANSI code characters we can write out the msg over the top of
|
||||
a previous status message, if it exists.
|
||||
"""
|
||||
obj_index = msg_index
|
||||
if msg_index in lines:
|
||||
position = lines.index(obj_index)
|
||||
diff = len(lines) - position
|
||||
# move up
|
||||
stream.write("%c[%dA" % (27, diff))
|
||||
# erase
|
||||
stream.write("%c[2K\r" % 27)
|
||||
stream.write("{} {}... {}\n".format(msg, obj_index, status))
|
||||
# move back down
|
||||
stream.write("%c[%dB" % (27, diff))
|
||||
else:
|
||||
diff = 0
|
||||
lines.append(obj_index)
|
||||
stream.write("{} {}... \r\n".format(msg, obj_index))
|
||||
|
||||
stream.flush()
|
||||
|
||||
|
||||
def json_hash(obj):
|
||||
dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
|
||||
h = hashlib.sha256()
|
||||
h.update(dump)
|
||||
return h.hexdigest()
|
||||
@@ -82,7 +82,7 @@ __docker-compose_services_stopped() {
|
||||
_docker-compose_build() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--no-cache" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --no-cache" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_from_build
|
||||
@@ -94,7 +94,7 @@ _docker-compose_build() {
|
||||
_docker-compose_docker-compose() {
|
||||
case "$prev" in
|
||||
--file|-f)
|
||||
_filedir y?(a)ml
|
||||
_filedir "y?(a)ml"
|
||||
return
|
||||
;;
|
||||
--project-name|-p)
|
||||
@@ -104,7 +104,7 @@ _docker-compose_docker-compose() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help -h --verbose --version --file -f --project-name -p" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help -h --verbose --version -v --file -f --project-name -p" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
|
||||
@@ -128,7 +128,7 @@ _docker-compose_kill() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-s" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_running
|
||||
@@ -140,7 +140,7 @@ _docker-compose_kill() {
|
||||
_docker-compose_logs() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--no-color" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --no-color" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_all
|
||||
@@ -149,6 +149,15 @@ _docker-compose_logs() {
|
||||
}
|
||||
|
||||
|
||||
_docker-compose_migrate-to-labels() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
_docker-compose_port() {
|
||||
case "$prev" in
|
||||
--protocol)
|
||||
@@ -162,7 +171,7 @@ _docker-compose_port() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--protocol --index" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_all
|
||||
@@ -174,7 +183,7 @@ _docker-compose_port() {
|
||||
_docker-compose_ps() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-q" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_all
|
||||
@@ -186,7 +195,7 @@ _docker-compose_ps() {
|
||||
_docker-compose_pull() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--allow-insecure-ssl" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_from_image
|
||||
@@ -204,7 +213,7 @@ _docker-compose_restart() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_running
|
||||
@@ -216,7 +225,7 @@ _docker-compose_restart() {
|
||||
_docker-compose_rm() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--force -f -v" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_stopped
|
||||
@@ -239,7 +248,7 @@ _docker-compose_run() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --entrypoint -e --no-deps --rm --service-ports -T --user -u" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --no-deps --rm --service-ports -T --user -u" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_all
|
||||
@@ -258,11 +267,24 @@ _docker-compose_scale() {
|
||||
compopt -o nospace
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
_docker-compose_start() {
|
||||
__docker-compose_services_stopped
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_stopped
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
@@ -275,7 +297,7 @@ _docker-compose_stop() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_running
|
||||
@@ -293,7 +315,7 @@ _docker-compose_up() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --no-build --no-color --no-deps --no-recreate -t --timeout" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-d --help --no-build --no-color --no-deps --no-recreate --force-recreate --timeout -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker-compose_services_all
|
||||
@@ -302,12 +324,25 @@ _docker-compose_up() {
|
||||
}
|
||||
|
||||
|
||||
_docker-compose_version() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--short" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
_docker-compose() {
|
||||
local previous_extglob_setting=$(shopt -p extglob)
|
||||
shopt -s extglob
|
||||
|
||||
local commands=(
|
||||
build
|
||||
help
|
||||
kill
|
||||
logs
|
||||
migrate-to-labels
|
||||
port
|
||||
ps
|
||||
pull
|
||||
@@ -318,6 +353,7 @@ _docker-compose() {
|
||||
start
|
||||
stop
|
||||
up
|
||||
version
|
||||
)
|
||||
|
||||
COMPREPLY=()
|
||||
@@ -352,6 +388,7 @@ _docker-compose() {
|
||||
local completions_func=_docker-compose_${command}
|
||||
declare -F $completions_func >/dev/null && $completions_func
|
||||
|
||||
eval "$previous_extglob_setting"
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
321
contrib/completion/zsh/_docker-compose
Normal file
321
contrib/completion/zsh/_docker-compose
Normal file
@@ -0,0 +1,321 @@
|
||||
#compdef docker-compose
|
||||
|
||||
# Description
|
||||
# -----------
|
||||
# zsh completion for docker-compose
|
||||
# https://github.com/sdurrheimer/docker-compose-zsh-completion
|
||||
# -------------------------------------------------------------------------
|
||||
# Version
|
||||
# -------
|
||||
# 0.1.0
|
||||
# -------------------------------------------------------------------------
|
||||
# Authors
|
||||
# -------
|
||||
# * Steve Durrheimer <s.durrheimer@gmail.com>
|
||||
# -------------------------------------------------------------------------
|
||||
# Inspiration
|
||||
# -----------
|
||||
# * @albers docker-compose bash completion script
|
||||
# * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# For compatibility reasons, Compose and therefore its completion supports several
|
||||
# stack compositon files as listed here, in descending priority.
|
||||
# Support for these filenames might be dropped in some future version.
|
||||
__docker-compose_compose_file() {
|
||||
local file
|
||||
for file in docker-compose.y{,a}ml fig.y{,a}ml ; do
|
||||
[ -e $file ] && {
|
||||
echo $file
|
||||
return
|
||||
}
|
||||
done
|
||||
echo docker-compose.yml
|
||||
}
|
||||
|
||||
# Extracts all service names from docker-compose.yml.
|
||||
___docker-compose_all_services_in_compose_file() {
|
||||
local already_selected
|
||||
local -a services
|
||||
already_selected=$(echo ${words[@]} | tr " " "|")
|
||||
awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | grep -Ev "$already_selected"
|
||||
}
|
||||
|
||||
# All services, even those without an existing container
|
||||
__docker-compose_services_all() {
|
||||
services=$(___docker-compose_all_services_in_compose_file)
|
||||
_alternative "args:services:($services)"
|
||||
}
|
||||
|
||||
# All services that have an entry with the given key in their docker-compose.yml section
|
||||
___docker-compose_services_with_key() {
|
||||
local already_selected
|
||||
local -a buildable
|
||||
already_selected=$(echo ${words[@]} | tr " " "|")
|
||||
# flatten sections to one line, then filter lines containing the key and return section name.
|
||||
awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 2>/dev/null | grep -Ev "$already_selected"
|
||||
}
|
||||
|
||||
# All services that are defined by a Dockerfile reference
|
||||
__docker-compose_services_from_build() {
|
||||
buildable=$(___docker-compose_services_with_key build)
|
||||
_alternative "args:buildable services:($buildable)"
|
||||
}
|
||||
|
||||
# All services that are defined by an image
|
||||
__docker-compose_services_from_image() {
|
||||
pullable=$(___docker-compose_services_with_key image)
|
||||
_alternative "args:pullable services:($pullable)"
|
||||
}
|
||||
|
||||
__docker-compose_get_services() {
|
||||
local kind expl
|
||||
declare -a running stopped lines args services
|
||||
|
||||
docker_status=$(docker ps > /dev/null 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
_message "Error! Docker is not running."
|
||||
return 1
|
||||
fi
|
||||
|
||||
kind=$1
|
||||
shift
|
||||
[[ $kind = (stopped|all) ]] && args=($args -a)
|
||||
|
||||
lines=(${(f)"$(_call_program commands docker ps ${args})"})
|
||||
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)"})
|
||||
|
||||
# Parse header line to find columns
|
||||
local i=1 j=1 k header=${lines[1]}
|
||||
declare -A begin end
|
||||
while (( $j < ${#header} - 1 )) {
|
||||
i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1))
|
||||
j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1))
|
||||
k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2))
|
||||
begin[${header[$i,$(($j-1))]}]=$i
|
||||
end[${header[$i,$(($j-1))]}]=$k
|
||||
}
|
||||
lines=(${lines[2,-1]})
|
||||
|
||||
# Container ID
|
||||
local line s name
|
||||
local -a names
|
||||
for line in $lines; do
|
||||
if [[ $services == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
|
||||
names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
|
||||
for name in $names; do
|
||||
s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
|
||||
s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
|
||||
s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}"
|
||||
if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
|
||||
stopped=($stopped $s)
|
||||
else
|
||||
running=($running $s)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
[[ $kind = (running|all) ]] && _describe -t services-running "running services" running
|
||||
[[ $kind = (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped
|
||||
}
|
||||
|
||||
__docker-compose_stoppedservices() {
|
||||
__docker-compose_get_services stopped "$@"
|
||||
}
|
||||
|
||||
__docker-compose_runningservices() {
|
||||
__docker-compose_get_services running "$@"
|
||||
}
|
||||
|
||||
__docker-compose_services () {
|
||||
__docker-compose_get_services all "$@"
|
||||
}
|
||||
|
||||
__docker-compose_caching_policy() {
|
||||
oldp=( "$1"(Nmh+1) ) # 1 hour
|
||||
(( $#oldp ))
|
||||
}
|
||||
|
||||
__docker-compose_commands () {
|
||||
local cache_policy
|
||||
|
||||
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
|
||||
if [[ -z "$cache_policy" ]]; then
|
||||
zstyle ":completion:${curcontext}:" cache-policy __docker-compose_caching_policy
|
||||
fi
|
||||
|
||||
if ( [[ ${+_docker_compose_subcommands} -eq 0 ]] || _cache_invalid docker_compose_subcommands) \
|
||||
&& ! _retrieve_cache docker_compose_subcommands;
|
||||
then
|
||||
local -a lines
|
||||
lines=(${(f)"$(_call_program commands docker-compose 2>&1)"})
|
||||
_docker_compose_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:})
|
||||
_store_cache docker_compose_subcommands _docker_compose_subcommands
|
||||
fi
|
||||
_describe -t docker-compose-commands "docker-compose command" _docker_compose_subcommands
|
||||
}
|
||||
|
||||
__docker-compose_subcommand () {
|
||||
local -a _command_args
|
||||
integer ret=1
|
||||
case "$words[1]" in
|
||||
(build)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'--no-cache[Do not use cache when building the image]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(help)
|
||||
_arguments ':subcommand:__docker-compose_commands' && ret=0
|
||||
;;
|
||||
(kill)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'-s[SIGNAL to send to the container. Default signal is SIGKILL.]:signal:_signals' \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(logs)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(migrate-to-labels)
|
||||
_arguments -A '-*' \
|
||||
'--help[Print usage]' \
|
||||
'(-):Recreate containers to add labels' && ret=0
|
||||
;;
|
||||
(port)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'--protocol=-[tcp or udap (defaults to tcp)]:protocol:(tcp udp)' \
|
||||
'--index=-[index of the container if there are mutiple instances of a service (defaults to 1)]:index: ' \
|
||||
'1:running services:__docker-compose_runningservices' \
|
||||
'2:port:_ports' && ret=0
|
||||
;;
|
||||
(ps)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'-q[Only display IDs]' \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(pull)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'*:services:__docker-compose_services_from_image' && ret=0
|
||||
;;
|
||||
(rm)
|
||||
_arguments \
|
||||
'(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \
|
||||
'--help[Print usage]' \
|
||||
'-v[Remove volumes associated with containers]' \
|
||||
'*:stopped services:__docker-compose_stoppedservices' && ret=0
|
||||
;;
|
||||
(run)
|
||||
_arguments \
|
||||
'-d[Detached mode: Run container in the background, print new container name.]' \
|
||||
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
|
||||
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
|
||||
'--help[Print usage]' \
|
||||
'(-u --user)'{-u,--user=-}'[Run as specified username or uid]:username or uid:_users' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
'--rm[Remove container after run. Ignored in detached mode.]' \
|
||||
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
|
||||
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
|
||||
'(-):services:__docker-compose_services' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal' && ret=0
|
||||
;;
|
||||
(scale)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(start)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'*:stopped services:__docker-compose_stoppedservices' && ret=0
|
||||
;;
|
||||
(stop|restart)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
'*:running services:__docker-compose_runningservices' && ret=0
|
||||
;;
|
||||
(up)
|
||||
_arguments \
|
||||
'-d[Detached mode: Run containers in the background, print new container names.]' \
|
||||
'--help[Print usage]' \
|
||||
'--no-color[Produce monochrome output.]' \
|
||||
"--no-deps[Don't start linked services.]" \
|
||||
"--no-recreate[If containers already exist, don't recreate them.]" \
|
||||
"--force-recreate[Recreate containers even if their configuration and image haven't changed]" \
|
||||
"--no-build[Don't build an image, even if it's missing]" \
|
||||
'(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: " \
|
||||
'*:services:__docker-compose_services_all' && ret=0
|
||||
;;
|
||||
(version)
|
||||
_arguments \
|
||||
'--help[Print usage]' \
|
||||
"--short[Shows only Compose's version number.]" && ret=0
|
||||
;;
|
||||
(*)
|
||||
_message 'Unknown sub command'
|
||||
esac
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
_docker-compose () {
|
||||
# Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
|
||||
# Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
|
||||
if [[ $service != docker-compose ]]; then
|
||||
_call_function - _$service
|
||||
return
|
||||
fi
|
||||
|
||||
local curcontext="$curcontext" state line ret=1
|
||||
typeset -A opt_args
|
||||
|
||||
_arguments -C \
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'--verbose[Show more output]' \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument' && ret=0
|
||||
|
||||
local counter=1
|
||||
#local compose_file compose_project
|
||||
while [ $counter -lt ${#words[@]} ]; do
|
||||
case "${words[$counter]}" in
|
||||
-f|--file)
|
||||
(( counter++ ))
|
||||
compose_file="${words[$counter]}"
|
||||
;;
|
||||
-p|--project-name)
|
||||
(( counter++ ))
|
||||
compose_project="${words[$counter]}"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
case $state in
|
||||
(command)
|
||||
__docker-compose_commands && ret=0
|
||||
;;
|
||||
(option-or-argument)
|
||||
curcontext=${curcontext%:*:*}:docker-compose-$words[1]:
|
||||
__docker-compose_subcommand && ret=0
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
_docker-compose "$@"
|
||||
@@ -1,15 +1,26 @@
|
||||
FROM docs/base:latest
|
||||
MAINTAINER Sven Dowideit <SvenDowideit@docker.com> (@SvenDowideit)
|
||||
MAINTAINER Mary Anthony <mary@docker.com> (@moxiegirl)
|
||||
|
||||
# to get the git info for this repo
|
||||
# To get the git info for this repo
|
||||
COPY . /src
|
||||
|
||||
# Reset the /docs dir so we can replace the theme meta with the new repo's git info
|
||||
RUN git reset --hard
|
||||
COPY . /docs/content/compose/
|
||||
|
||||
RUN grep "__version" /src/compose/__init__.py | sed "s/.*'\(.*\)'/\1/" > /docs/VERSION
|
||||
COPY docs/* /docs/sources/compose/
|
||||
COPY docs/mkdocs.yml /docs/mkdocs-compose.yml
|
||||
RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/docker
|
||||
RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm
|
||||
RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine
|
||||
RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry
|
||||
RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials
|
||||
RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content
|
||||
|
||||
# Then build everything together, ready for mkdocs
|
||||
RUN /docs/build.sh
|
||||
|
||||
# Sed to process GitHub Markdown
|
||||
# 1-2 Remove comment code from metadata block
|
||||
# 3 Change ](/word to ](/project/ in links
|
||||
# 4 Change ](word.md) to ](/project/word)
|
||||
# 5 Remove .md extension from link text
|
||||
# 6 Change ](../ to ](/project/word)
|
||||
# 7 Change ](../../ to ](/project/ --> not implemented
|
||||
#
|
||||
#
|
||||
RUN /src/pre-process.sh /docs
|
||||
|
||||
55
docs/Makefile
Normal file
55
docs/Makefile
Normal file
@@ -0,0 +1,55 @@
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
|
||||
# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
DOCKER_ENVS := \
|
||||
-e BUILDFLAGS \
|
||||
-e DOCKER_CLIENTONLY \
|
||||
-e DOCKER_EXECDRIVER \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TIMEOUT
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
|
||||
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
|
||||
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
|
||||
|
||||
# to allow `make DOCSPORT=9000 docs`
|
||||
DOCSPORT := 8000
|
||||
|
||||
# Get the IP ADDRESS
|
||||
DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''")
|
||||
HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)")
|
||||
HUGO_BIND_IP=0.0.0.0
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
|
||||
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
|
||||
|
||||
# for some docs workarounds (see below in "docs-build" target)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
||||
|
||||
default: docs
|
||||
|
||||
docs: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
||||
|
||||
docs-draft: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
|
||||
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
|
||||
|
||||
docs-build:
|
||||
# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files
|
||||
# echo "$(GIT_BRANCH)" > GIT_BRANCH
|
||||
# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET
|
||||
# echo "$(GITCOMMIT)" > GITCOMMIT
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" .
|
||||
77
docs/README.md
Normal file
77
docs/README.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Contributing to the Docker Compose documentation
|
||||
|
||||
The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site.
|
||||
|
||||
You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files.
|
||||
|
||||
If you want to add a new file or change the location of the document in the menu, you do need to know a little more.
|
||||
|
||||
## Documentation contributing workflow
|
||||
|
||||
1. Edit a Markdown file in the tree.
|
||||
|
||||
2. Save your changes.
|
||||
|
||||
3. Make sure you are in the `docs` subdirectory.
|
||||
|
||||
4. Build the documentation.
|
||||
|
||||
$ make docs
|
||||
---> ffcf3f6c4e97
|
||||
Removing intermediate container a676414185e8
|
||||
Successfully built ffcf3f6c4e97
|
||||
docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0
|
||||
ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead.
|
||||
0 of 4 drafts rendered
|
||||
0 future content
|
||||
12 pages created
|
||||
0 paginator pages created
|
||||
0 tags created
|
||||
0 categories created
|
||||
in 55 ms
|
||||
Serving pages from /docs/public
|
||||
Web Server is available at http://0.0.0.0:8000/
|
||||
Press Ctrl+C to stop
|
||||
|
||||
5. Open the available server in your browser.
|
||||
|
||||
The documentation server has the complete menu but only the Docker Compose
|
||||
documentation resolves. You can't access the other project docs from this
|
||||
localized build.
|
||||
|
||||
## Tips on Hugo metadata and menu positioning
|
||||
|
||||
The top of each Docker Compose documentation file contains TOML metadata. The metadata is commented out to prevent it from appearing in GitHub.
|
||||
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Extending services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
The metadata alone has this structure:
|
||||
|
||||
+++
|
||||
title = "Extending services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=2
|
||||
+++
|
||||
|
||||
The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Compose *to the menu with the* `smn_workdw_compose` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title.
|
||||
|
||||
You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up.
|
||||
|
||||
|
||||
## Other key documentation repositories
|
||||
|
||||
The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Compose documentation.
|
||||
|
||||
The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site.
|
||||
181
docs/cli.md
181
docs/cli.md
@@ -1,181 +0,0 @@
|
||||
page_title: Compose CLI reference
|
||||
page_description: Compose CLI reference
|
||||
page_keywords: fig, composition, compose, docker, orchestration, cli, reference
|
||||
|
||||
|
||||
# CLI reference
|
||||
|
||||
Most Docker Compose commands are run against one or more services. If
|
||||
the service is not specified, the command will apply to all services.
|
||||
|
||||
For full usage information, run `docker-compose [COMMAND] --help`.
|
||||
|
||||
## Commands
|
||||
|
||||
### build
|
||||
|
||||
Builds or rebuilds services.
|
||||
|
||||
Services are built once and then tagged as `project_service`, e.g.,
|
||||
`composetest_db`. If you change a service's Dockerfile or the contents of its
|
||||
build directory, run `docker-compose build` to rebuild it.
|
||||
|
||||
### help
|
||||
|
||||
Displays help and usage instructions for a command.
|
||||
|
||||
### kill
|
||||
|
||||
Forces running containers to stop by sending a `SIGKILL` signal. Optionally the
|
||||
signal can be passed, for example:
|
||||
|
||||
$ docker-compose kill -s SIGINT
|
||||
|
||||
### logs
|
||||
|
||||
Displays log output from services.
|
||||
|
||||
### port
|
||||
|
||||
Prints the public port for a port binding
|
||||
|
||||
### ps
|
||||
|
||||
Lists containers.
|
||||
|
||||
### pull
|
||||
|
||||
Pulls service images.
|
||||
|
||||
### rm
|
||||
|
||||
Removes stopped service containers.
|
||||
|
||||
|
||||
### run
|
||||
|
||||
Runs a one-off command on a service.
|
||||
|
||||
For example,
|
||||
|
||||
$ docker-compose run web python manage.py shell
|
||||
|
||||
will start the `web` service and then run `manage.py shell` in python.
|
||||
Note that by default, linked services will also be started, unless they are
|
||||
already running.
|
||||
|
||||
One-off commands are started in new containers with the same configuration as a
|
||||
normal container for that service, so volumes, links, etc will all be created as
|
||||
expected. When using `run`, there are two differences from bringing up a
|
||||
container normally:
|
||||
|
||||
1. the command will be overridden with the one specified. So, if you run
|
||||
`docker-compose run web bash`, the container's web command (which could default
|
||||
to, e.g., `python app.py`) will be overridden to `bash`
|
||||
|
||||
2. by default no ports will be created in case they collide with already opened
|
||||
ports.
|
||||
|
||||
Links are also created between one-off commands and the other containers which
|
||||
are part of that service. So, for example, you could run:
|
||||
|
||||
$ docker-compose run db psql -h db -U docker
|
||||
|
||||
This would open up an interactive PostgreSQL shell for the linked `db` container
|
||||
(which would get created or started as needed).
|
||||
|
||||
If you do not want linked containers to start when running the one-off command,
|
||||
specify the `--no-deps` flag:
|
||||
|
||||
$ docker-compose run --no-deps web python manage.py shell
|
||||
|
||||
Similarly, if you do want the service's ports to be created and mapped to the
|
||||
host, specify the `--service-ports` flag:
|
||||
$ docker-compose run --service-ports web python manage.py shell
|
||||
|
||||
### scale
|
||||
|
||||
Sets the number of containers to run for a service.
|
||||
|
||||
Numbers are specified as arguments in the form `service=num`. For example:
|
||||
|
||||
$ docker-compose scale web=2 worker=3
|
||||
|
||||
### start
|
||||
|
||||
Starts existing containers for a service.
|
||||
|
||||
### stop
|
||||
|
||||
Stops running containers without removing them. They can be started again with
|
||||
`docker-compose start`.
|
||||
|
||||
### up
|
||||
|
||||
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||
|
||||
Linked services will be started, unless they are already running.
|
||||
|
||||
By default, `docker-compose up` will aggregate the output of each container and,
|
||||
when it exits, all containers will be stopped. Running `docker-compose up -d`,
|
||||
will start the containers in the background and leave them running.
|
||||
|
||||
By default, if there are existing containers for a service, `docker-compose up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `docker-compose.yml` are picked up. If you do not want containers stopped and recreated, use `docker-compose up --no-recreate`. This will still start any stopped containers, if needed.
|
||||
|
||||
[volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/
|
||||
|
||||
## Options
|
||||
|
||||
### --verbose
|
||||
|
||||
Shows more output
|
||||
|
||||
### --version
|
||||
|
||||
Prints version and exits
|
||||
|
||||
### -f, --file FILE
|
||||
|
||||
Specifies an alternate Compose yaml file (default: `docker-compose.yml`)
|
||||
|
||||
### -p, --project-name NAME
|
||||
|
||||
Specifies an alternate project name (default: current directory name)
|
||||
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Several environment variables are available for you to configure Compose's behaviour.
|
||||
|
||||
Variables starting with `DOCKER_` are the same as those used to configure the
|
||||
Docker command-line client. If you're using boot2docker, `$(boot2docker shellinit)`
|
||||
will set them to their correct values.
|
||||
|
||||
### COMPOSE\_PROJECT\_NAME
|
||||
|
||||
Sets the project name, which is prepended to the name of every container started by Compose. Defaults to the `basename` of the current working directory.
|
||||
|
||||
### COMPOSE\_FILE
|
||||
|
||||
Sets the path to the `docker-compose.yml` to use. Defaults to `docker-compose.yml` in the current working directory.
|
||||
|
||||
### DOCKER\_HOST
|
||||
|
||||
Sets the URL of the docker daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`.
|
||||
|
||||
### DOCKER\_TLS\_VERIFY
|
||||
|
||||
When set to anything other than an empty string, enables TLS communication with
|
||||
the daemon.
|
||||
|
||||
### DOCKER\_CERT\_PATH
|
||||
|
||||
Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
@@ -1,28 +1,53 @@
|
||||
---
|
||||
layout: default
|
||||
title: Command Completion
|
||||
---
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Command Completion"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=3
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
Command Completion
|
||||
==================
|
||||
# Command Completion
|
||||
|
||||
Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion)
|
||||
for the bash shell.
|
||||
for the bash and zsh shell.
|
||||
|
||||
Installing Command Completion
|
||||
-----------------------------
|
||||
## Installing Command Completion
|
||||
|
||||
### Bash
|
||||
|
||||
Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available.
|
||||
On a Mac, install with `brew install bash-completion`
|
||||
|
||||
Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
|
||||
|
||||
curl -L https://raw.githubusercontent.com/docker/compose/1.2.0/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
|
||||
|
||||
Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
|
||||
|
||||
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose --version | awk 'NR==1{print $NF}')/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
|
||||
|
||||
Completion will be available upon next login.
|
||||
|
||||
Available completions
|
||||
---------------------
|
||||
### Zsh
|
||||
|
||||
Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`
|
||||
|
||||
mkdir -p ~/.zsh/completion
|
||||
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose --version | awk 'NR==1{print $NF}')/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose
|
||||
|
||||
Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`
|
||||
|
||||
fpath=(~/.zsh/completion $fpath)
|
||||
|
||||
Make sure `compinit` is loaded or do it by adding in `~/.zshrc`
|
||||
|
||||
autoload -Uz compinit && compinit -i
|
||||
|
||||
Then reload your shell
|
||||
|
||||
exec $SHELL -l
|
||||
|
||||
## Available completions
|
||||
|
||||
Depending on what you typed on the command line so far, it will complete
|
||||
|
||||
- available docker-compose commands
|
||||
@@ -34,8 +59,11 @@ Enjoy working with Compose faster and with less typos!
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
page_title: Quickstart Guide: Compose and Django
|
||||
page_description: Getting started with Docker Compose and Django
|
||||
page_keywords: documentation, docs, docker, compose, orchestration, containers,
|
||||
django
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart Guide: Compose and Django"
|
||||
description = "Getting started with Docker Compose and Django"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=4
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Getting started with Compose and Django
|
||||
## Quickstart Guide: Compose and Django
|
||||
|
||||
|
||||
This Quick-start Guide will demonstrate how to use Compose to set up and run a
|
||||
@@ -55,7 +61,7 @@ mounted inside the containers, and what ports they expose.
|
||||
links:
|
||||
- db
|
||||
|
||||
See the [`docker-compose.yml` reference](yml.html) for more information on how
|
||||
See the [`docker-compose.yml` reference](yml.md) for more information on how
|
||||
this file works.
|
||||
|
||||
### Build the project
|
||||
@@ -109,8 +115,7 @@ Then, run `docker-compose up`:
|
||||
myapp_web_1 | Starting development server at http://0.0.0.0:8000/
|
||||
myapp_web_1 | Quit the server with CONTROL-C.
|
||||
|
||||
Your Django app should nw be running at port 8000 on your Docker daemon (if
|
||||
you're using Boot2docker, `boot2docker ip` will tell you its address).
|
||||
Your Django app should nw be running at port 8000 on your Docker daemon. If you are using a Docker Machine VM, you can use the `docker-machine ip MACHINE_NAME` to get the IP address.
|
||||
|
||||
You can also run management commands with Docker. To set up your database, for
|
||||
example, run `docker-compose up` and in another terminal run:
|
||||
@@ -119,8 +124,11 @@ example, run `docker-compose up` and in another terminal run:
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
|
||||
22
docs/env.md
22
docs/env.md
@@ -1,10 +1,15 @@
|
||||
---
|
||||
layout: default
|
||||
title: Compose environment variables reference
|
||||
---
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Compose environment variables reference"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
parent="smn_compose_ref"
|
||||
weight=3
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
Environment variables reference
|
||||
===============================
|
||||
# Compose environment variables reference
|
||||
|
||||
**Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](yml.md#links) for details.
|
||||
|
||||
@@ -34,8 +39,11 @@ Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1`
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
|
||||
363
docs/extends.md
Normal file
363
docs/extends.md
Normal file
@@ -0,0 +1,363 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Extending services in Compose"
|
||||
description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Extending services in Compose
|
||||
|
||||
Docker Compose's `extends` keyword enables sharing of common configurations
|
||||
among different files, or even different projects entirely. Extending services
|
||||
is useful if you have several applications that reuse commonly-defined services.
|
||||
Using `extends` you can define a service in one place and refer to it from
|
||||
anywhere.
|
||||
|
||||
Alternatively, you can deploy the same application to multiple environments with
|
||||
a slightly different set of services in each case (or with changes to the
|
||||
configuration of some services). Moreover, you can do so without copy-pasting
|
||||
the configuration around.
|
||||
|
||||
### Understand the extends configuration
|
||||
|
||||
When defining any service in `docker-compose.yml`, you can declare that you are
|
||||
extending another service like this:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
|
||||
This instructs Compose to re-use the configuration for the `webapp` service
|
||||
defined in the `common-services.yml` file. Suppose that `common-services.yml`
|
||||
looks like this:
|
||||
|
||||
webapp:
|
||||
build: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- "/data"
|
||||
|
||||
In this case, you'll get exactly the same result as if you wrote
|
||||
`docker-compose.yml` with that `build`, `ports` and `volumes` configuration
|
||||
defined directly under `web`.
|
||||
|
||||
You can go further and define (or re-define) configuration locally in
|
||||
`docker-compose.yml`:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
environment:
|
||||
- DEBUG=1
|
||||
cpu_shares: 5
|
||||
|
||||
You can also write other services and link your `web` service to them:
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
environment:
|
||||
- DEBUG=1
|
||||
cpu_shares: 5
|
||||
links:
|
||||
- db
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
For full details on how to use `extends`, refer to the [reference](#reference).
|
||||
|
||||
### Example use case
|
||||
|
||||
In this example, you’ll repurpose the example app from the [quick start
|
||||
guide](index.md). (If you're not familiar with Compose, it's recommended that
|
||||
you go through the quick start first.) This example assumes you want to use
|
||||
Compose both to develop an application locally and then deploy it to a
|
||||
production environment.
|
||||
|
||||
The local and production environments are similar, but there are some
|
||||
differences. In development, you mount the application code as a volume so that
|
||||
it can pick up changes; in production, the code should be immutable from the
|
||||
outside. This ensures it’s not accidentally changed. The development environment
|
||||
uses a local Redis container, but in production another team manages the Redis
|
||||
service, which is listening at `redis-production.example.com`.
|
||||
|
||||
To configure with `extends` for this sample, you must:
|
||||
|
||||
1. Define the web application as a Docker image in `Dockerfile` and a Compose
|
||||
service in `common.yml`.
|
||||
|
||||
2. Define the development environment in the standard Compose file,
|
||||
`docker-compose.yml`.
|
||||
|
||||
- Use `extends` to pull in the web service.
|
||||
- Configure a volume to enable code reloading.
|
||||
- Create an additional Redis service for the application to use locally.
|
||||
|
||||
3. Define the production environment in a third Compose file, `production.yml`.
|
||||
|
||||
- Use `extends` to pull in the web service.
|
||||
- Configure the web service to talk to the external, production Redis service.
|
||||
|
||||
#### Define the web app
|
||||
|
||||
Defining the web application requires the following:
|
||||
|
||||
1. Create an `app.py` file.
|
||||
|
||||
This file contains a simple Python application that uses Flask to serve HTTP
|
||||
and increments a counter in Redis:
|
||||
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host=os.environ['REDIS_HOST'], port=6379)
|
||||
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.\n' % redis.get('hits')
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
|
||||
This code uses a `REDIS_HOST` environment variable to determine where to
|
||||
find Redis.
|
||||
|
||||
2. Define the Python dependencies in a `requirements.txt` file:
|
||||
|
||||
flask
|
||||
redis
|
||||
|
||||
3. Create a `Dockerfile` to build an image containing the app:
|
||||
|
||||
FROM python:2.7
|
||||
ADD . /code
|
||||
WORKDIR /code
|
||||
RUN pip install -r requirements.txt
|
||||
CMD python app.py
|
||||
|
||||
4. Create a Compose configuration file called `common.yml`:
|
||||
|
||||
This configuration defines how to run the app.
|
||||
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
|
||||
Typically, you would have dropped this configuration into
|
||||
`docker-compose.yml` file, but in order to pull it into multiple files with
|
||||
`extends`, it needs to be in a separate file.
|
||||
|
||||
#### Define the development environment
|
||||
|
||||
1. Create a `docker-compose.yml` file.
|
||||
|
||||
The `extends` option pulls in the `web` service from the `common.yml` file
|
||||
you created in the previous section.
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: web
|
||||
volumes:
|
||||
- .:/code
|
||||
links:
|
||||
- redis
|
||||
environment:
|
||||
- REDIS_HOST=redis
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
The new addition defines a `web` service that:
|
||||
|
||||
- Fetches the base configuration for `web` out of `common.yml`.
|
||||
- Adds `volumes` and `links` configuration to the base (`common.yml`)
|
||||
configuration.
|
||||
- Sets the `REDIS_HOST` environment variable to point to the linked redis
|
||||
container. This environment uses a stock `redis` image from the Docker Hub.
|
||||
|
||||
2. Run `docker-compose up`.
|
||||
|
||||
Compose creates, links, and starts a web and redis container linked together.
|
||||
It mounts your application code inside the web container.
|
||||
|
||||
3. Verify that the code is mounted by changing the message in
|
||||
`app.py`—say, from `Hello world!` to `Hello from Compose!`.
|
||||
|
||||
Don't forget to refresh your browser to see the change!
|
||||
|
||||
#### Define the production environment
|
||||
|
||||
You are almost done. Now, define your production environment:
|
||||
|
||||
1. Create a `production.yml` file.
|
||||
|
||||
As with `docker-compose.yml`, the `extends` option pulls in the `web` service
|
||||
from `common.yml`.
|
||||
|
||||
web:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: web
|
||||
environment:
|
||||
- REDIS_HOST=redis-production.example.com
|
||||
|
||||
2. Run `docker-compose -f production.yml up`.
|
||||
|
||||
Compose creates *just* a web container and configures the Redis connection via
|
||||
the `REDIS_HOST` environment variable. This variable points to the production
|
||||
Redis instance.
|
||||
|
||||
> **Note**: If you try to load up the webapp in your browser you'll get an
|
||||
> error—`redis-production.example.com` isn't actually a Redis server.
|
||||
|
||||
You've now done a basic `extends` configuration. As your application develops,
|
||||
you can make any necessary changes to the web service in `common.yml`. Compose
|
||||
picks up both the development and production environments when you next run
|
||||
`docker-compose`. You don't have to do any copy-and-paste, and you don't have to
|
||||
manually keep both environments in sync.
|
||||
|
||||
|
||||
### Reference
|
||||
|
||||
You can use `extends` on any service together with other configuration keys. It
|
||||
always expects a dictionary that should always contain the key: `service` and optionally the `file` key.
|
||||
|
||||
The `file` key specifies the location of a Compose configuration file defining
|
||||
the extension. The `file` value can be an absolute or relative path. If you
|
||||
specify a relative path, Docker Compose treats it as relative to the location
|
||||
of the current file. If you don't specify a `file`, Compose looks in the
|
||||
current configuration file.
|
||||
|
||||
The `service` key specifies the name of the service to extend, for example `web`
|
||||
or `database`.
|
||||
|
||||
You can extend a service that itself extends another. You can extend
|
||||
indefinitely. Compose does not support circular references and `docker-compose`
|
||||
returns an error if it encounters them.
|
||||
|
||||
#### Adding and overriding configuration
|
||||
|
||||
Compose copies configurations from the original service over to the local one,
|
||||
**except** for `links` and `volumes_from`. These exceptions exist to avoid
|
||||
implicit dependencies—you always define `links` and `volumes_from`
|
||||
locally. This ensures dependencies between services are clearly visible when
|
||||
reading the current file. Defining these locally also ensures changes to the
|
||||
referenced file don't result in breakage.
|
||||
|
||||
If a configuration option is defined in both the original service and the local
|
||||
service, the local value either *override*s or *extend*s the definition of the
|
||||
original service. This works differently for other configuration options.
|
||||
|
||||
For single-value options like `image`, `command` or `mem_limit`, the new value
|
||||
replaces the old value. **This is the default behaviour - all exceptions are
|
||||
listed below.**
|
||||
|
||||
# original service
|
||||
command: python app.py
|
||||
|
||||
# local service
|
||||
command: python otherapp.py
|
||||
|
||||
# result
|
||||
command: python otherapp.py
|
||||
|
||||
In the case of `build` and `image`, using one in the local service causes
|
||||
Compose to discard the other, if it was defined in the original service.
|
||||
|
||||
# original service
|
||||
build: .
|
||||
|
||||
# local service
|
||||
image: redis
|
||||
|
||||
# result
|
||||
image: redis
|
||||
|
||||
# original service
|
||||
image: redis
|
||||
|
||||
# local service
|
||||
build: .
|
||||
|
||||
# result
|
||||
build: .
|
||||
|
||||
For the **multi-value options** `ports`, `expose`, `external_links`, `dns` and
|
||||
`dns_search`, Compose concatenates both sets of values:
|
||||
|
||||
# original service
|
||||
expose:
|
||||
- "3000"
|
||||
|
||||
# local service
|
||||
expose:
|
||||
- "4000"
|
||||
- "5000"
|
||||
|
||||
# result
|
||||
expose:
|
||||
- "3000"
|
||||
- "4000"
|
||||
- "5000"
|
||||
|
||||
In the case of `environment` and `labels`, Compose "merges" entries together
|
||||
with locally-defined values taking precedence:
|
||||
|
||||
# original service
|
||||
environment:
|
||||
- FOO=original
|
||||
- BAR=original
|
||||
|
||||
# local service
|
||||
environment:
|
||||
- BAR=local
|
||||
- BAZ=local
|
||||
|
||||
# result
|
||||
environment:
|
||||
- FOO=original
|
||||
- BAR=local
|
||||
- BAZ=local
|
||||
|
||||
Finally, for `volumes` and `devices`, Compose "merges" entries together with
|
||||
locally-defined bindings taking precedence:
|
||||
|
||||
# original service
|
||||
volumes:
|
||||
- /original-dir/foo:/foo
|
||||
- /original-dir/bar:/bar
|
||||
|
||||
# local service
|
||||
volumes:
|
||||
- /local-dir/bar:/bar
|
||||
- /local-dir/baz/:baz
|
||||
|
||||
# result
|
||||
volumes:
|
||||
- /original-dir/foo:/foo
|
||||
- /local-dir/bar:/bar
|
||||
- /local-dir/baz/:baz
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
176
docs/index.md
176
docs/index.md
@@ -1,47 +1,44 @@
|
||||
page_title: Compose: Multi-container orchestration for Docker
|
||||
page_description: Introduction and Overview of Compose
|
||||
page_keywords: documentation, docs, docker, compose, orchestration, containers
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Overview of Docker Compose"
|
||||
description = "Introduction and Overview of Compose"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Docker Compose
|
||||
# Overview of Docker Compose
|
||||
|
||||
Compose is a tool for defining and running complex applications with Docker.
|
||||
With Compose, you define a multi-container application in a single file, then
|
||||
spin your application up in a single command which does everything that needs to
|
||||
be done to get it running.
|
||||
Compose is a tool for defining and running multi-container applications with
|
||||
Docker. With Compose, you define a multi-container application in a single
|
||||
file, then spin your application up in a single command which does everything
|
||||
that needs to be done to get it running.
|
||||
|
||||
Compose is great for development environments, staging servers, and CI. We don't
|
||||
recommend that you use it in production yet.
|
||||
|
||||
Using Compose is basically a three-step process.
|
||||
|
||||
First, you define your app's environment with a `Dockerfile` so it can be
|
||||
reproduced anywhere:
|
||||
|
||||
```Dockerfile
|
||||
FROM python:2.7
|
||||
WORKDIR /code
|
||||
ADD requirements.txt /code/
|
||||
RUN pip install -r requirements.txt
|
||||
ADD . /code
|
||||
CMD python app.py
|
||||
```
|
||||
|
||||
Next, you define the services that make up your app in `docker-compose.yml` so
|
||||
1. Define your app's environment with a `Dockerfile` so it can be
|
||||
reproduced anywhere.
|
||||
2. Define the services that make up your app in `docker-compose.yml` so
|
||||
they can be run together in an isolated environment:
|
||||
3. Lastly, run `docker-compose up` and Compose will start and run your entire app.
|
||||
|
||||
```yaml
|
||||
web:
|
||||
build: .
|
||||
links:
|
||||
- db
|
||||
ports:
|
||||
- "8000:8000"
|
||||
db:
|
||||
image: postgres
|
||||
```
|
||||
A `docker-compose.yml` looks like this:
|
||||
|
||||
Lastly, run `docker-compose up` and Compose will start and run your entire app.
|
||||
web:
|
||||
build: .
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
- .:/code
|
||||
links:
|
||||
- redis
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
Compose has commands for managing the whole lifecycle of your application:
|
||||
|
||||
@@ -53,6 +50,9 @@ Compose has commands for managing the whole lifecycle of your application:
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
@@ -75,23 +75,21 @@ Next, you'll want to make a directory for the project:
|
||||
$ cd composetest
|
||||
|
||||
Inside this directory, create `app.py`, a simple web app that uses the Flask
|
||||
framework and increments a value in Redis:
|
||||
framework and increments a value in Redis. Don't worry if you don't have Redis installed, docker is going to take care of that for you when we [define services](#define-services):
|
||||
|
||||
```python
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
import os
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host='redis', port=6379)
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.' % redis.get('hits')
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host='redis', port=6379)
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
```
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.' % redis.get('hits')
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
|
||||
Next, define the Python dependencies in a file called `requirements.txt`:
|
||||
|
||||
@@ -108,13 +106,19 @@ specify how to build the image using a file called
|
||||
ADD . /code
|
||||
WORKDIR /code
|
||||
RUN pip install -r requirements.txt
|
||||
CMD python app.py
|
||||
|
||||
This tells Docker to include Python, your code, and your Python dependencies in
|
||||
a Docker image. For more information on how to write Dockerfiles, see the
|
||||
[Docker user
|
||||
guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile)
|
||||
and the
|
||||
[Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
This tells Docker to:
|
||||
|
||||
* Build an image starting with the Python 2.7 image.
|
||||
* Add the current directory `.` into the path `/code` in the image.
|
||||
* Set the working directory to `/code`.
|
||||
* Install your Python dependencies.
|
||||
* Set the default command for the container to `python app.py`
|
||||
|
||||
For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
You can test that this builds by running `docker build -t web .`.
|
||||
|
||||
### Define services
|
||||
|
||||
@@ -122,7 +126,6 @@ Next, define a set of services using `docker-compose.yml`:
|
||||
|
||||
web:
|
||||
build: .
|
||||
command: python app.py
|
||||
ports:
|
||||
- "5000:5000"
|
||||
volumes:
|
||||
@@ -134,19 +137,20 @@ Next, define a set of services using `docker-compose.yml`:
|
||||
|
||||
This defines two services:
|
||||
|
||||
- `web`, which is built from the `Dockerfile` in the current directory. It also
|
||||
says to run the command `python app.py` inside the image, forward the exposed
|
||||
port 5000 on the container to port 5000 on the host machine, connect up the
|
||||
Redis service, and mount the current directory inside the container so we can
|
||||
work on code without having to rebuild the image.
|
||||
- `redis`, which uses the public image
|
||||
[redis](https://registry.hub.docker.com/_/redis/), which gets pulled from the
|
||||
Docker Hub registry.
|
||||
#### web
|
||||
|
||||
* Builds from the `Dockerfile` in the current directory.
|
||||
* Forwards the exposed port 5000 on the container to port 5000 on the host machine.
|
||||
* Connects the web container to the Redis service via a link.
|
||||
* Mounts the current directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image.
|
||||
|
||||
#### redis
|
||||
|
||||
* Uses the public [Redis](https://registry.hub.docker.com/_/redis/) image which gets pulled from the Docker Hub registry.
|
||||
|
||||
### Build and run your app with Compose
|
||||
|
||||
Now, when you run `docker-compose up`, Compose will pull a Redis image, build an
|
||||
image for your code, and start everything up:
|
||||
Now, when you run `docker-compose up`, Compose will pull a Redis image, build an image for your code, and start everything up:
|
||||
|
||||
$ docker-compose up
|
||||
Pulling image redis...
|
||||
@@ -155,13 +159,21 @@ image for your code, and start everything up:
|
||||
Starting composetest_web_1...
|
||||
redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
|
||||
web_1 | * Running on http://0.0.0.0:5000/
|
||||
web_1 | * Restarting with stat
|
||||
|
||||
The web app should now be listening on port 5000 on your Docker daemon host (if
|
||||
you're using Boot2docker, `boot2docker ip` will tell you its address).
|
||||
If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` will tell you its address and you can open `http://MACHINE_VM_IP:5000` in a browser.
|
||||
|
||||
If you're not using Boot2docker and are on linux, then the web app should now be listening on port 5000 on your Docker daemon host. If http://0.0.0.0:5000 doesn't resolve, you can also try localhost:5000.
|
||||
|
||||
You should get a message in your browser saying:
|
||||
|
||||
`Hello World! I have been seen 1 times.`
|
||||
|
||||
Refreshing the page will increment the number.
|
||||
|
||||
If you want to run your services in the background, you can pass the `-d` flag
|
||||
(for daemon mode) to `docker-compose up` and use `docker-compose ps` to see what
|
||||
is currently running:
|
||||
(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to
|
||||
see what is currently running:
|
||||
|
||||
$ docker-compose up -d
|
||||
Starting composetest_redis_1...
|
||||
@@ -178,7 +190,7 @@ services. For example, to see what environment variables are available to the
|
||||
|
||||
$ docker-compose run web env
|
||||
|
||||
See `docker-compose --help` to see other available commands.
|
||||
See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands.
|
||||
|
||||
If you started Compose with `docker-compose up -d`, you'll probably want to stop
|
||||
your services once you've finished with them:
|
||||
@@ -191,3 +203,31 @@ At this point, you have seen the basics of how Compose works.
|
||||
[Rails](rails.md), or [Wordpress](wordpress.md).
|
||||
- See the reference guides for complete details on the [commands](cli.md), the
|
||||
[configuration file](yml.md) and [environment variables](env.md).
|
||||
|
||||
## Release Notes
|
||||
|
||||
### Version 1.2.0 (April 7, 2015)
|
||||
|
||||
For complete information on this release, see the [1.2.0 Milestone project page](https://github.com/docker/compose/wiki/1.2.0-Milestone-Project-Page).
|
||||
In addition to bug fixes and refinements, this release adds the following:
|
||||
|
||||
* The `extends` keyword, which adds the ability to extend services by sharing common configurations. For details, see
|
||||
[PR #1088](https://github.com/docker/compose/pull/1088).
|
||||
|
||||
* Better integration with Swarm. Swarm will now schedule inter-dependent
|
||||
containers on the same host. For details, see
|
||||
[PR #972](https://github.com/docker/compose/pull/972).
|
||||
|
||||
## Getting help
|
||||
|
||||
Docker Compose is still in its infancy and under active development. If you need
|
||||
help, would like to contribute, or simply want to talk about the project with
|
||||
like-minded individuals, we have a number of open channels for communication.
|
||||
|
||||
* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues).
|
||||
|
||||
* To talk about the project with people in real time: please join the `#docker-compose` channel on IRC.
|
||||
|
||||
* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls).
|
||||
|
||||
For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/project/get-help/).
|
||||
|
||||
109
docs/install.md
109
docs/install.md
@@ -1,42 +1,103 @@
|
||||
page_title: Installing Compose
|
||||
page_description: How to intall Docker Compose
|
||||
page_keywords: compose, orchestration, install, installation, docker, documentation
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Docker Compose"
|
||||
description = "How to install Docker Compose"
|
||||
keywords = ["compose, orchestration, install, installation, docker, documentation"]
|
||||
[menu.main]
|
||||
parent="mn_install"
|
||||
weight=4
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Installing Compose
|
||||
# Install Docker Compose
|
||||
|
||||
To install Compose, you'll need to install Docker first. You'll then install
|
||||
Compose with a `curl` command.
|
||||
You can run Compose on OS X and 64-bit Linux. It is currently not supported on
|
||||
the Windows operating system. To install Compose, you'll need to install Docker
|
||||
first.
|
||||
|
||||
### Install Docker
|
||||
Depending on how your system is configured, you may require `sudo` access to
|
||||
install Compose. If your system requires `sudo`, you will receive "Permission
|
||||
denied" errors when installing Compose. If this is the case for you, preface the
|
||||
install commands with `sudo` to install.
|
||||
|
||||
First, install Docker version 1.3 or greater:
|
||||
To install Compose, do the following:
|
||||
|
||||
- [Instructions for Mac OS X](http://docs.docker.com/installation/mac/)
|
||||
- [Instructions for Ubuntu](http://docs.docker.com/installation/ubuntulinux/)
|
||||
- [Instructions for other systems](http://docs.docker.com/installation/)
|
||||
1. Install Docker Engine version 1.7.1 or greater:
|
||||
|
||||
### Install Compose
|
||||
* <a href="https://docs.docker.com/installation/mac/" target="_blank">Mac OS X installation</a> (installs both Engine and Compose)
|
||||
|
||||
* <a href="https://docs.docker.com/installation/ubuntulinux/" target="_blank">Ubuntu installation</a>
|
||||
|
||||
* <a href="https://docs.docker.com/installation/" target="_blank">other system installations</a>
|
||||
|
||||
2. Mac OS X users are done installing. Others should continue to the next step.
|
||||
|
||||
3. Go to the <a href="https://github.com/docker/compose/releases" target="_blank">repository release page</a>.
|
||||
|
||||
To install Compose, run the following commands:
|
||||
4. Enter the `curl` command in your termial.
|
||||
|
||||
curl -L https://github.com/docker/compose/releases/download/1.2.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
The command has the following format:
|
||||
|
||||
Optionally, you can also install [command completion](completion.md) for the
|
||||
bash shell.
|
||||
curl -L https://github.com/docker/compose/releases/download/VERSION_NUM/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
||||
|
||||
If you have problems installing with `curl`, you can use `pip` instead: `pip install -U docker-compose`
|
||||
|
||||
4. Apply executable permissions to the binary:
|
||||
|
||||
Compose is available for OS X and 64-bit Linux. If you're on another platform,
|
||||
Compose can also be installed as a Python package:
|
||||
$ chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
$ sudo pip install -U docker-compose
|
||||
5. Optionally, install [command completion](completion.md) for the
|
||||
`bash` and `zsh` shell.
|
||||
|
||||
No further steps are required; Compose should now be successfully installed.
|
||||
You can test the installation by running `docker-compose --version`.
|
||||
6. Test the installation.
|
||||
|
||||
## Compose documentation
|
||||
$ docker-compose --version
|
||||
docker-compose version: 1.4.1
|
||||
|
||||
- [User guide](index.md)
|
||||
## Upgrading
|
||||
|
||||
If you're upgrading from Compose 1.2 or earlier, you'll need to remove or migrate
|
||||
your existing containers after upgrading Compose. This is because, as of version
|
||||
1.3, Compose uses Docker labels to keep track of containers, and so they need to
|
||||
be recreated with labels added.
|
||||
|
||||
If Compose detects containers that were created without labels, it will refuse
|
||||
to run so that you don't end up with two sets of them. If you want to keep using
|
||||
your existing containers (for example, because they have data volumes you want
|
||||
to preserve) you can migrate them with the following command:
|
||||
|
||||
$ docker-compose migrate-to-labels
|
||||
|
||||
Alternatively, if you're not worried about keeping them, you can remove them &endash;
|
||||
Compose will just create new ones.
|
||||
|
||||
$ docker rm -f -v myapp_web_1 myapp_db_1 ...
|
||||
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To uninstall Docker Compose if you installed using `curl`:
|
||||
|
||||
$ rm /usr/local/bin/docker-compose
|
||||
|
||||
|
||||
To uninstall Docker Compose if you installed using `pip`:
|
||||
|
||||
$ pip uninstall docker-compose
|
||||
|
||||
>**Note**: If you get a "Permission denied" error using either of the above
|
||||
>methods, you probably do not have the proper permissions to remove
|
||||
>`docker-compose`. To force the removal, prepend `sudo` to either of the above
|
||||
>commands and run again.
|
||||
|
||||
|
||||
## Where to go next
|
||||
|
||||
- [User guide](/)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
- ['compose/index.md', 'User Guide', 'Docker Compose' ]
|
||||
- ['compose/install.md', 'Installation', 'Docker Compose']
|
||||
- ['compose/cli.md', 'Reference', 'Compose command line']
|
||||
- ['compose/yml.md', 'Reference', 'Compose yml']
|
||||
- ['compose/env.md', 'Reference', 'Compose ENV variables']
|
||||
- ['compose/completion.md', 'Reference', 'Compose commandline completion']
|
||||
- ['compose/django.md', 'Examples', 'Getting started with Compose and Django']
|
||||
- ['compose/rails.md', 'Examples', 'Getting started with Compose and Rails']
|
||||
- ['compose/wordpress.md', 'Examples', 'Getting started with Compose and Wordpress']
|
||||
61
docs/pre-process.sh
Executable file
61
docs/pre-process.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Populate an array with just docker dirs and one with content dirs
|
||||
docker_dir=(`ls -d /docs/content/docker/*`)
|
||||
content_dir=(`ls -d /docs/content/*`)
|
||||
|
||||
# Loop content not of docker/
|
||||
#
|
||||
# Sed to process GitHub Markdown
|
||||
# 1-2 Remove comment code from metadata block
|
||||
# 3 Remove .md extension from link text
|
||||
# 4 Change ](/ to ](/project/ in links
|
||||
# 5 Change ](word) to ](/project/word)
|
||||
# 6 Change ](../../ to ](/project/
|
||||
# 7 Change ](../ to ](/project/word)
|
||||
#
|
||||
for i in "${content_dir[@]}"
|
||||
do
|
||||
:
|
||||
case $i in
|
||||
"/docs/content/windows")
|
||||
;;
|
||||
"/docs/content/mac")
|
||||
;;
|
||||
"/docs/content/linux")
|
||||
;;
|
||||
"/docs/content/docker")
|
||||
y=${i##*/}
|
||||
find $i -type f -name "*.md" -exec sed -i.old \
|
||||
-e '/^<!.*metadata]>/g' \
|
||||
-e '/^<!.*end-metadata.*>/g' {} \;
|
||||
;;
|
||||
*)
|
||||
y=${i##*/}
|
||||
find $i -type f -name "*.md" -exec sed -i.old \
|
||||
-e '/^<!.*metadata]>/g' \
|
||||
-e '/^<!.*end-metadata.*>/g' \
|
||||
-e 's/\(\]\)\([(]\)\(\/\)/\1\2\/'$y'\//g' \
|
||||
-e 's/\(\][(]\)\([A-z].*\)\(\.md\)/\1\/'$y'\/\2/g' \
|
||||
-e 's/\([(]\)\(.*\)\(\.md\)/\1\2/g' \
|
||||
-e 's/\(\][(]\)\(\.\/\)/\1\/'$y'\//g' \
|
||||
-e 's/\(\][(]\)\(\.\.\/\.\.\/\)/\1\/'$y'\//g' \
|
||||
-e 's/\(\][(]\)\(\.\.\/\)/\1\/'$y'\//g' {} \;
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#
|
||||
# Move docker directories to content
|
||||
#
|
||||
for i in "${docker_dir[@]}"
|
||||
do
|
||||
:
|
||||
if [ -d $i ]
|
||||
then
|
||||
mv $i /docs/content/
|
||||
fi
|
||||
done
|
||||
|
||||
rm -rf /docs/content/docker
|
||||
|
||||
96
docs/production.md
Normal file
96
docs/production.md
Normal file
@@ -0,0 +1,96 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Using Compose in production"
|
||||
description = "Guide to using Docker Compose in production"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers, production"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Using Compose in production
|
||||
|
||||
While **Compose is not yet considered production-ready**, if you'd like to experiment and learn more about using it in production deployments, this guide
|
||||
can help.
|
||||
The project is actively working towards becoming
|
||||
production-ready; to learn more about the progress being made, check out the
|
||||
[roadmap](https://github.com/docker/compose/blob/master/ROADMAP.md) for details
|
||||
on how it's coming along and what still needs to be done.
|
||||
|
||||
When deploying to production, you'll almost certainly want to make changes to
|
||||
your app configuration that are more appropriate to a live environment. These
|
||||
changes may include:
|
||||
|
||||
- Removing any volume bindings for application code, so that code stays inside
|
||||
the container and can't be changed from outside
|
||||
- Binding to different ports on the host
|
||||
- Setting environment variables differently (e.g., to decrease the verbosity of
|
||||
logging, or to enable email sending)
|
||||
- Specifying a restart policy (e.g., `restart: always`) to avoid downtime
|
||||
- Adding extra services (e.g., a log aggregator)
|
||||
|
||||
For this reason, you'll probably want to define a separate Compose file, say
|
||||
`production.yml`, which specifies production-appropriate configuration.
|
||||
|
||||
> **Note:** The [extends](extends.md) keyword is useful for maintaining multiple
|
||||
> Compose files which re-use common services without having to manually copy and
|
||||
> paste.
|
||||
|
||||
Once you've got an alternate configuration file, make Compose use it
|
||||
by setting the `COMPOSE_FILE` environment variable:
|
||||
|
||||
$ COMPOSE_FILE=production.yml
|
||||
$ docker-compose up -d
|
||||
|
||||
> **Note:** You can also use the file for a one-off command without setting
|
||||
> an environment variable. You do this by passing the `-f` flag, e.g.,
|
||||
> `docker-compose -f production.yml up -d`.
|
||||
|
||||
### Deploying changes
|
||||
|
||||
When you make changes to your app code, you'll need to rebuild your image and
|
||||
recreate your app's containers. To redeploy a service called
|
||||
`web`, you would use:
|
||||
|
||||
$ docker-compose build web
|
||||
$ docker-compose up --no-deps -d web
|
||||
|
||||
This will first rebuild the image for `web` and then stop, destroy, and recreate
|
||||
*just* the `web` service. The `--no-deps` flag prevents Compose from also
|
||||
recreating any services which `web` depends on.
|
||||
|
||||
### Running Compose on a single server
|
||||
|
||||
You can use Compose to deploy an app to a remote Docker host by setting the
|
||||
`DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables
|
||||
appropriately. For tasks like this,
|
||||
[Docker Machine](https://docs.docker.com/machine) makes managing local and
|
||||
remote Docker hosts very easy, and is recommended even if you're not deploying
|
||||
remotely.
|
||||
|
||||
Once you've set up your environment variables, all the normal `docker-compose`
|
||||
commands will work with no further configuration.
|
||||
|
||||
### Running Compose on a Swarm cluster
|
||||
|
||||
[Docker Swarm](https://docs.docker.com/swarm), a Docker-native clustering
|
||||
system, exposes the same API as a single Docker host, which means you can use
|
||||
Compose against a Swarm instance and run your apps across multiple hosts.
|
||||
|
||||
Compose/Swarm integration is still in the experimental stage, and Swarm is still
|
||||
in beta, but if you'd like to explore and experiment, check out the
|
||||
[integration guide](https://github.com/docker/compose/blob/master/SWARM.md).
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
page_title: Quickstart Guide: Compose and Rails
|
||||
page_description: Getting started with Docker Compose and Rails
|
||||
page_keywords: documentation, docs, docker, compose, orchestration, containers,
|
||||
rails
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart Guide: Compose and Rails"
|
||||
description = "Getting started with Docker Compose and Rails"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=5
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
## Getting started with Compose and Rails
|
||||
## Quickstart Guide: Compose and Rails
|
||||
|
||||
This Quickstart guide will show you how to use Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md).
|
||||
|
||||
@@ -35,8 +40,6 @@ Finally, `docker-compose.yml` is where the magic happens. This file describes th
|
||||
|
||||
db:
|
||||
image: postgres
|
||||
ports:
|
||||
- "5432"
|
||||
web:
|
||||
build: .
|
||||
command: bundle exec rails s -p 3000 -b '0.0.0.0'
|
||||
@@ -114,13 +117,16 @@ Finally, you need to create the database. In another terminal, run:
|
||||
|
||||
$ docker-compose run web rake db:create
|
||||
|
||||
That's it. Your app should now be running on port 3000 on your Docker daemon (if
|
||||
you're using Boot2docker, `boot2docker ip` will tell you its address).
|
||||
That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
|
||||
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
|
||||
23
docs/reference/build.md
Normal file
23
docs/reference/build.md
Normal file
@@ -0,0 +1,23 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "build"
|
||||
description = "build"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, build"]
|
||||
[menu.main]
|
||||
identifier="build.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# build
|
||||
|
||||
```
|
||||
Usage: build [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--no-cache Do not use cache when building the image.
|
||||
```
|
||||
|
||||
Services are built once and then tagged as `project_service`, e.g.,
|
||||
`composetest_db`. If you change a service's Dockerfile or the contents of its
|
||||
build directory, run `docker-compose build` to rebuild it.
|
||||
55
docs/reference/docker-compose.md
Normal file
55
docs/reference/docker-compose.md
Normal file
@@ -0,0 +1,55 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "docker-compose"
|
||||
description = "docker-compose Command Binary"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, docker-compose"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
weight=-2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# docker-compose Command
|
||||
|
||||
```
|
||||
Usage:
|
||||
docker-compose [options] [COMMAND] [ARGS...]
|
||||
docker-compose -h|--help
|
||||
|
||||
Options:
|
||||
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
|
||||
-p, --project-name NAME Specify an alternate project name (default: directory name)
|
||||
--verbose Show more output
|
||||
-v, --version Print version and exit
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
help Get help on a command
|
||||
kill Kill containers
|
||||
logs View output from containers
|
||||
port Print the public port for a port binding
|
||||
ps List containers
|
||||
pull Pulls service images
|
||||
restart Restart services
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
scale Set number of containers for a service
|
||||
start Start services
|
||||
stop Stop services
|
||||
up Create and start containers
|
||||
migrate-to-labels Recreate containers to add labels
|
||||
```
|
||||
|
||||
The Docker Compose binary. You use this command to build and manage multiple services in Docker containers.
|
||||
|
||||
Use the `-f` flag to specify the location of a Compose configuration file. This
|
||||
flag is optional. If you don't provide this flag. Compose looks for a file named
|
||||
`docker-compose.yml` in the working directory. If the file is not found,
|
||||
Compose looks in each parent directory successively, until it finds the file.
|
||||
|
||||
Use a `-` as the filename to read configuration file from stdin. When stdin is
|
||||
used all paths in the configuration are relative to the current working
|
||||
directory.
|
||||
|
||||
Each configuration can has a project name. If you supply a `-p` flag, you can specify a project name. If you don't specify the flag, Compose uses the current directory name.
|
||||
18
docs/reference/help.md
Normal file
18
docs/reference/help.md
Normal file
@@ -0,0 +1,18 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "help"
|
||||
description = "help"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, help"]
|
||||
[menu.main]
|
||||
identifier="help.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# help
|
||||
|
||||
```
|
||||
Usage: help COMMAND
|
||||
```
|
||||
|
||||
Displays help and usage instructions for a command.
|
||||
29
docs/reference/index.md
Normal file
29
docs/reference/index.md
Normal file
@@ -0,0 +1,29 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Compose CLI reference"
|
||||
description = "Compose CLI reference"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
identifier = "smn_compose_cli"
|
||||
parent = "smn_compose_ref"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
## Compose CLI reference
|
||||
|
||||
The following pages describe the usage information for the [docker-compose](/reference/docker-compose.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line.
|
||||
|
||||
* [build](/reference/reference/build.md)
|
||||
* [help](/reference/help.md)
|
||||
* [kill](/reference/kill.md)
|
||||
* [ps](/reference/ps.md)
|
||||
* [restart](/reference/restart.md)
|
||||
* [run](/reference/run.md)
|
||||
* [start](/reference/start.md)
|
||||
* [up](/reference/up.md)
|
||||
* [logs](/reference/logs.md)
|
||||
* [port](/reference/port.md)
|
||||
* [pull](/reference/pull.md)
|
||||
* [rm](/reference/rm.md)
|
||||
* [scale](/reference/scale.md)
|
||||
* [stop](/reference/stop.md)
|
||||
24
docs/reference/kill.md
Normal file
24
docs/reference/kill.md
Normal file
@@ -0,0 +1,24 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "kill"
|
||||
description = "Forces running containers to stop."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, kill"]
|
||||
[menu.main]
|
||||
identifier="kill.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# kill
|
||||
|
||||
```
|
||||
Usage: kill [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL.
|
||||
```
|
||||
|
||||
Forces running containers to stop by sending a `SIGKILL` signal. Optionally the
|
||||
signal can be passed, for example:
|
||||
|
||||
$ docker-compose kill -s SIGINT
|
||||
21
docs/reference/logs.md
Normal file
21
docs/reference/logs.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "logs"
|
||||
description = "Displays log output from services."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, logs"]
|
||||
[menu.main]
|
||||
identifier="logs.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# logs
|
||||
|
||||
```
|
||||
Usage: logs [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--no-color Produce monochrome output.
|
||||
```
|
||||
|
||||
Displays log output from services.
|
||||
62
docs/reference/overview.md
Normal file
62
docs/reference/overview.md
Normal file
@@ -0,0 +1,62 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Introduction to the CLI"
|
||||
description = "Introduction to the CLI"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
weight=-2
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# Introduction to the CLI
|
||||
|
||||
This section describes the subcommands you can use with the `docker-compose` command. You can run subcommand against one or more services. To run against a specific service, you supply the service name from your compose configuration. If you do not specify the service name, the command runs against all the services in your configuration.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Several environment variables are available for you to configure the Docker Compose command-line behaviour.
|
||||
|
||||
Variables starting with `DOCKER_` are the same as those used to configure the
|
||||
Docker command-line client. If you're using `docker-machine`, then the `eval "$(docker-machine env my-docker-vm)"` command should set them to their correct values. (In this example, `my-docker-vm` is the name of a machine you created.)
|
||||
|
||||
### COMPOSE\_PROJECT\_NAME
|
||||
|
||||
Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively.
|
||||
|
||||
Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME` defaults to the `basename` of the current working directory.
|
||||
|
||||
### COMPOSE\_FILE
|
||||
|
||||
Specify the file containing the compose configuration. If not provided, Compose looks for a file named `docker-compose.yml` in the current directory and then each parent directory in succession until a file by that name is found.
|
||||
|
||||
### DOCKER\_HOST
|
||||
|
||||
Sets the URL of the `docker` daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`.
|
||||
|
||||
### DOCKER\_TLS\_VERIFY
|
||||
|
||||
When set to anything other than an empty string, enables TLS communication with
|
||||
the `docker` daemon.
|
||||
|
||||
### DOCKER\_CERT\_PATH
|
||||
|
||||
Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
23
docs/reference/port.md
Normal file
23
docs/reference/port.md
Normal file
@@ -0,0 +1,23 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "port"
|
||||
description = "Prints the public port for a port binding.s"
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, port"]
|
||||
[menu.main]
|
||||
identifier="port.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# port
|
||||
|
||||
```
|
||||
Usage: port [options] SERVICE PRIVATE_PORT
|
||||
|
||||
Options:
|
||||
--protocol=proto tcp or udp [default: tcp]
|
||||
--index=index index of the container if there are multiple
|
||||
instances of a service [default: 1]
|
||||
```
|
||||
|
||||
Prints the public port for a port binding.
|
||||
21
docs/reference/ps.md
Normal file
21
docs/reference/ps.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "ps"
|
||||
description = "Lists containers."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, ps"]
|
||||
[menu.main]
|
||||
identifier="ps.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# ps
|
||||
|
||||
```
|
||||
Usage: ps [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-q Only display IDs
|
||||
```
|
||||
|
||||
Lists containers.
|
||||
18
docs/reference/pull.md
Normal file
18
docs/reference/pull.md
Normal file
@@ -0,0 +1,18 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "pull"
|
||||
description = "Pulls service images."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, pull"]
|
||||
[menu.main]
|
||||
identifier="pull.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# pull
|
||||
|
||||
```
|
||||
Usage: pull [options] [SERVICE...]
|
||||
```
|
||||
|
||||
Pulls service images.
|
||||
21
docs/reference/restart.md
Normal file
21
docs/reference/restart.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "restart"
|
||||
description = "Restarts Docker Compose services."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, restart"]
|
||||
[menu.main]
|
||||
identifier="restart.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# restart
|
||||
|
||||
```
|
||||
Usage: restart [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10)
|
||||
```
|
||||
|
||||
Restarts services.
|
||||
22
docs/reference/rm.md
Normal file
22
docs/reference/rm.md
Normal file
@@ -0,0 +1,22 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "rm"
|
||||
description = "Removes stopped service containers."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, rm"]
|
||||
[menu.main]
|
||||
identifier="rm.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# rm
|
||||
|
||||
```
|
||||
Usage: rm [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-f, --force Don't ask to confirm removal
|
||||
-v Remove volumes associated with containers
|
||||
```
|
||||
|
||||
Removes stopped service containers.
|
||||
53
docs/reference/run.md
Normal file
53
docs/reference/run.md
Normal file
@@ -0,0 +1,53 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "run"
|
||||
description = "Runs a one-off command on a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, run"]
|
||||
[menu.main]
|
||||
identifier="run.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# run
|
||||
|
||||
```
|
||||
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run container in the background, print
|
||||
new container name.
|
||||
--entrypoint CMD Override the entrypoint of the image.
|
||||
-e KEY=VAL Set an environment variable (can be used multiple times)
|
||||
-u, --user="" Run as specified username or uid
|
||||
--no-deps Don't start linked services.
|
||||
--rm Remove container after run. Ignored in detached mode.
|
||||
--service-ports Run command with the service's ports enabled and mapped to the host.
|
||||
-T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.
|
||||
```
|
||||
|
||||
Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command.
|
||||
|
||||
$ docker-compose run web bash
|
||||
|
||||
Commands you use with `run` start in new containers with the same configuration as defined by the service' configuration. This means the container has the same volumes, links, as defined in the configuration file. There two differences though.
|
||||
|
||||
First, the command passed by `run` overrides the command defined in the service configuration. For example, if the `web` service configuration is started with `bash`, then `docker-compose run web python app.py` overrides it with `python app.py`.
|
||||
|
||||
The second difference is the `docker-compose run` command does not create any of the ports specified in the service configuration. This prevents the port collisions with already open ports. If you *do want* the service's ports created and mapped to the host, specify the `--service-ports` flag:
|
||||
|
||||
$ docker-compose run --service-ports web python manage.py shell
|
||||
|
||||
If you start a service configured with links, the `run` command first checks to see if the linked service is running and starts the service if it is stopped. Once all the linked services are running, the `run` executes the command you passed it. So, for example, you could run:
|
||||
|
||||
$ docker-compose run db psql -h db -U docker
|
||||
|
||||
This would open up an interactive PostgreSQL shell for the linked `db` container.
|
||||
|
||||
If you do not want the `run` command to start linked containers, specify the `--no-deps` flag:
|
||||
|
||||
$ docker-compose run --no-deps web python manage.py shell
|
||||
|
||||
|
||||
|
||||
|
||||
21
docs/reference/scale.md
Normal file
21
docs/reference/scale.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "scale"
|
||||
description = "Sets the number of containers to run for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, scale"]
|
||||
[menu.main]
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# scale
|
||||
|
||||
```
|
||||
Usage: scale [SERVICE=NUM...]
|
||||
```
|
||||
|
||||
Sets the number of containers to run for a service.
|
||||
|
||||
Numbers are specified as arguments in the form `service=num`. For example:
|
||||
|
||||
$ docker-compose scale web=2 worker=3
|
||||
18
docs/reference/start.md
Normal file
18
docs/reference/start.md
Normal file
@@ -0,0 +1,18 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "start"
|
||||
description = "Starts existing containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, start"]
|
||||
[menu.main]
|
||||
identifier="start.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# start
|
||||
|
||||
```
|
||||
Usage: start [SERVICE...]
|
||||
```
|
||||
|
||||
Starts existing containers for a service.
|
||||
22
docs/reference/stop.md
Normal file
22
docs/reference/stop.md
Normal file
@@ -0,0 +1,22 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "stop"
|
||||
description = "Stops running containers without removing them. "
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, stop"]
|
||||
[menu.main]
|
||||
identifier="stop.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# stop
|
||||
|
||||
```
|
||||
Usage: stop [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds (default: 10).
|
||||
```
|
||||
|
||||
Stops running containers without removing them. They can be started again with
|
||||
`docker-compose start`.
|
||||
47
docs/reference/up.md
Normal file
47
docs/reference/up.md
Normal file
@@ -0,0 +1,47 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "up"
|
||||
description = "Builds, (re)creates, starts, and attaches to containers for a service."
|
||||
keywords = ["fig, composition, compose, docker, orchestration, cli, up"]
|
||||
[menu.main]
|
||||
identifier="up.compose"
|
||||
parent = "smn_compose_cli"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# up
|
||||
|
||||
```
|
||||
Usage: up [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run containers in the background,
|
||||
print new container names.
|
||||
--no-color Produce monochrome output.
|
||||
--no-deps Don't start linked services.
|
||||
--force-recreate Recreate containers even if their configuration and
|
||||
image haven't changed. Incompatible with --no-recreate.
|
||||
--no-recreate If containers already exist, don't recreate them.
|
||||
Incompatible with --force-recreate.
|
||||
--no-build Don't build an image, even if it's missing
|
||||
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
|
||||
when attached or when containers are already
|
||||
running. (default: 10)
|
||||
```
|
||||
|
||||
Builds, (re)creates, starts, and attaches to containers for a service.
|
||||
|
||||
Unless they are already running, this command also starts any linked services.
|
||||
|
||||
The `docker-compose up` command aggregates the output of each container. When
|
||||
the command exits, all containers are stopped. Running `docker-compose up -d`
|
||||
starts the containers in the background and leaves them running.
|
||||
|
||||
If there are existing containers for a service, and the service's configuration
|
||||
or image was changed after the container's creation, `docker-compose up` picks
|
||||
up the changes by stopping and recreating the containers (preserving mounted
|
||||
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
|
||||
flag.
|
||||
|
||||
If you want to force Compose to stop and recreate all containers, use the
|
||||
`--force-recreate` flag.
|
||||
@@ -1,14 +1,21 @@
|
||||
page_title: Quickstart Guide: Compose and Wordpress
|
||||
page_description: Getting started with Docker Compose and Rails
|
||||
page_keywords: documentation, docs, docker, compose, orchestration, containers,
|
||||
wordpress
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Quickstart Guide: Compose and Wordpress"
|
||||
description = "Getting started with Compose and Wordpress"
|
||||
keywords = ["documentation, docs, docker, compose, orchestration, containers"]
|
||||
[menu.main]
|
||||
parent="smn_workw_compose"
|
||||
weight=6
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
## Getting started with Compose and Wordpress
|
||||
|
||||
# Quickstart Guide: Compose and Wordpress
|
||||
|
||||
You can use Compose to easily run Wordpress in an isolated environment built
|
||||
with Docker containers.
|
||||
|
||||
### Define the project
|
||||
## Define the project
|
||||
|
||||
First, [Install Compose](install.md) and then download Wordpress into the
|
||||
current directory:
|
||||
@@ -25,10 +32,8 @@ Dockerfiles, see the
|
||||
[Dockerfile reference](http://docs.docker.com/reference/builder/). In this case,
|
||||
your Dockerfile should be:
|
||||
|
||||
```
|
||||
FROM orchardup/php5
|
||||
ADD . /code
|
||||
```
|
||||
FROM orchardup/php5
|
||||
ADD . /code
|
||||
|
||||
This tells Docker how to build an image defining a container that contains PHP
|
||||
and Wordpress.
|
||||
@@ -36,86 +41,82 @@ and Wordpress.
|
||||
Next you'll create a `docker-compose.yml` file that will start your web service
|
||||
and a separate MySQL instance:
|
||||
|
||||
```
|
||||
web:
|
||||
build: .
|
||||
command: php -S 0.0.0.0:8000 -t /code
|
||||
ports:
|
||||
- "8000:8000"
|
||||
links:
|
||||
- db
|
||||
volumes:
|
||||
- .:/code
|
||||
db:
|
||||
image: orchardup/mysql
|
||||
environment:
|
||||
MYSQL_DATABASE: wordpress
|
||||
```
|
||||
web:
|
||||
build: .
|
||||
command: php -S 0.0.0.0:8000 -t /code
|
||||
ports:
|
||||
- "8000:8000"
|
||||
links:
|
||||
- db
|
||||
volumes:
|
||||
- .:/code
|
||||
db:
|
||||
image: orchardup/mysql
|
||||
environment:
|
||||
MYSQL_DATABASE: wordpress
|
||||
|
||||
Two supporting files are needed to get this working - first, `wp-config.php` is
|
||||
the standard Wordpress config file with a single change to point the database
|
||||
configuration at the `db` container:
|
||||
|
||||
```
|
||||
<?php
|
||||
define('DB_NAME', 'wordpress');
|
||||
define('DB_USER', 'root');
|
||||
define('DB_PASSWORD', '');
|
||||
define('DB_HOST', "db:3306");
|
||||
define('DB_CHARSET', 'utf8');
|
||||
define('DB_COLLATE', '');
|
||||
<?php
|
||||
define('DB_NAME', 'wordpress');
|
||||
define('DB_USER', 'root');
|
||||
define('DB_PASSWORD', '');
|
||||
define('DB_HOST', "db:3306");
|
||||
define('DB_CHARSET', 'utf8');
|
||||
define('DB_COLLATE', '');
|
||||
|
||||
define('AUTH_KEY', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_KEY', 'put your unique phrase here');
|
||||
define('LOGGED_IN_KEY', 'put your unique phrase here');
|
||||
define('NONCE_KEY', 'put your unique phrase here');
|
||||
define('AUTH_SALT', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_SALT', 'put your unique phrase here');
|
||||
define('LOGGED_IN_SALT', 'put your unique phrase here');
|
||||
define('NONCE_SALT', 'put your unique phrase here');
|
||||
define('AUTH_KEY', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_KEY', 'put your unique phrase here');
|
||||
define('LOGGED_IN_KEY', 'put your unique phrase here');
|
||||
define('NONCE_KEY', 'put your unique phrase here');
|
||||
define('AUTH_SALT', 'put your unique phrase here');
|
||||
define('SECURE_AUTH_SALT', 'put your unique phrase here');
|
||||
define('LOGGED_IN_SALT', 'put your unique phrase here');
|
||||
define('NONCE_SALT', 'put your unique phrase here');
|
||||
|
||||
$table_prefix = 'wp_';
|
||||
define('WPLANG', '');
|
||||
define('WP_DEBUG', false);
|
||||
$table_prefix = 'wp_';
|
||||
define('WPLANG', '');
|
||||
define('WP_DEBUG', false);
|
||||
|
||||
if ( !defined('ABSPATH') )
|
||||
define('ABSPATH', dirname(__FILE__) . '/');
|
||||
if ( !defined('ABSPATH') )
|
||||
define('ABSPATH', dirname(__FILE__) . '/');
|
||||
|
||||
require_once(ABSPATH . 'wp-settings.php');
|
||||
```
|
||||
require_once(ABSPATH . 'wp-settings.php');
|
||||
|
||||
Second, `router.php` tells PHP's built-in web server how to run Wordpress:
|
||||
|
||||
```
|
||||
<?php
|
||||
<?php
|
||||
|
||||
$root = $_SERVER['DOCUMENT_ROOT'];
|
||||
chdir($root);
|
||||
$path = '/'.ltrim(parse_url($_SERVER['REQUEST_URI'])['path'],'/');
|
||||
set_include_path(get_include_path().':'.__DIR__);
|
||||
if(file_exists($root.$path))
|
||||
{
|
||||
if(is_dir($root.$path) && substr($path,strlen($path) - 1, 1) !== '/')
|
||||
$path = rtrim($path,'/').'/index.php';
|
||||
if(strpos($path,'.php') === false) return false;
|
||||
else {
|
||||
chdir(dirname($root.$path));
|
||||
require_once $root.$path;
|
||||
}
|
||||
}else include_once 'index.php';
|
||||
|
||||
$root = $_SERVER['DOCUMENT_ROOT'];
|
||||
chdir($root);
|
||||
$path = '/'.ltrim(parse_url($_SERVER['REQUEST_URI'])['path'],'/');
|
||||
set_include_path(get_include_path().':'.__DIR__);
|
||||
if(file_exists($root.$path))
|
||||
{
|
||||
if(is_dir($root.$path) && substr($path,strlen($path) - 1, 1) !== '/')
|
||||
$path = rtrim($path,'/').'/index.php';
|
||||
if(strpos($path,'.php') === false) return false;
|
||||
else {
|
||||
chdir(dirname($root.$path));
|
||||
require_once $root.$path;
|
||||
}
|
||||
}else include_once 'index.php';
|
||||
```
|
||||
### Build the project
|
||||
|
||||
With those four files in place, run `docker-compose up` inside your Wordpress
|
||||
directory and it'll pull and build the needed images, and then start the web and
|
||||
database containers. You'll then be able to visit Wordpress at port 8000 on your
|
||||
Docker daemon (if you're using Boot2docker, `boot2docker ip` will tell you its
|
||||
address).
|
||||
database containers. If you're using [Docker Machine](https://docs.docker.com/machine), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser.
|
||||
|
||||
## More Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Yaml file reference](yml.md)
|
||||
- [Compose environment variables](env.md)
|
||||
|
||||
388
docs/yml.md
388
docs/yml.md
@@ -1,10 +1,13 @@
|
||||
---
|
||||
layout: default
|
||||
title: docker-compose.yml reference
|
||||
page_title: docker-compose.yml reference
|
||||
page_description: docker-compose.yml reference
|
||||
page_keywords: fig, composition, compose, docker
|
||||
---
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "docker-compose.yml reference"
|
||||
description = "docker-compose.yml reference"
|
||||
keywords = ["fig, composition, compose, docker"]
|
||||
[menu.main]
|
||||
parent="smn_compose_ref"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
|
||||
# docker-compose.yml reference
|
||||
|
||||
@@ -21,31 +24,33 @@ specify them again in `docker-compose.yml`.
|
||||
Tag or partial image ID. Can be local or remote - Compose will attempt to
|
||||
pull if it doesn't exist locally.
|
||||
|
||||
```
|
||||
image: ubuntu
|
||||
image: orchardup/postgresql
|
||||
image: a4bc65fd
|
||||
```
|
||||
image: ubuntu
|
||||
image: orchardup/postgresql
|
||||
image: a4bc65fd
|
||||
|
||||
### build
|
||||
|
||||
Path to a directory containing a Dockerfile. When the value supplied is a
|
||||
relative path, it is interpreted as relative to the location of the yml file
|
||||
Path to a directory containing a Dockerfile. When the value supplied is a
|
||||
relative path, it is interpreted as relative to the location of the yml file
|
||||
itself. This directory is also the build context that is sent to the Docker daemon.
|
||||
|
||||
Compose will build and tag it with a generated name, and use that image thereafter.
|
||||
|
||||
```
|
||||
build: /path/to/build/dir
|
||||
```
|
||||
build: /path/to/build/dir
|
||||
|
||||
### dockerfile
|
||||
|
||||
Alternate Dockerfile.
|
||||
|
||||
Compose will use an alternate file to build with.
|
||||
|
||||
dockerfile: Dockerfile-alternate
|
||||
|
||||
### command
|
||||
|
||||
Override the default command.
|
||||
|
||||
```
|
||||
command: bundle exec thin -p 3000
|
||||
```
|
||||
command: bundle exec thin -p 3000
|
||||
|
||||
<a name="links"></a>
|
||||
### links
|
||||
@@ -54,21 +59,17 @@ Link to containers in another service. Either specify both the service name and
|
||||
the link alias (`SERVICE:ALIAS`), or just the service name (which will also be
|
||||
used for the alias).
|
||||
|
||||
```
|
||||
links:
|
||||
- db
|
||||
- db:database
|
||||
- redis
|
||||
```
|
||||
links:
|
||||
- db
|
||||
- db:database
|
||||
- redis
|
||||
|
||||
An entry with the alias' name will be created in `/etc/hosts` inside containers
|
||||
for this service, e.g:
|
||||
|
||||
```
|
||||
172.17.2.186 db
|
||||
172.17.2.186 database
|
||||
172.17.2.187 redis
|
||||
```
|
||||
172.17.2.186 db
|
||||
172.17.2.186 database
|
||||
172.17.2.187 redis
|
||||
|
||||
Environment variables will also be created - see the [environment variable
|
||||
reference](env.md) for details.
|
||||
@@ -80,12 +81,23 @@ of Compose, especially for containers that provide shared or common services.
|
||||
`external_links` follow semantics similar to `links` when specifying both the
|
||||
container name and the link alias (`CONTAINER:ALIAS`).
|
||||
|
||||
```
|
||||
external_links:
|
||||
- redis_1
|
||||
- project_db_1:mysql
|
||||
- project_db_1:postgresql
|
||||
```
|
||||
external_links:
|
||||
- redis_1
|
||||
- project_db_1:mysql
|
||||
- project_db_1:postgresql
|
||||
|
||||
### extra_hosts
|
||||
|
||||
Add hostname mappings. Use the same values as the docker client `--add-host` parameter.
|
||||
|
||||
extra_hosts:
|
||||
- "somehost:162.242.195.82"
|
||||
- "otherhost:50.31.209.229"
|
||||
|
||||
An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g:
|
||||
|
||||
162.242.195.82 somehost
|
||||
50.31.209.229 otherhost
|
||||
|
||||
### ports
|
||||
|
||||
@@ -97,46 +109,45 @@ port (a random host port will be chosen).
|
||||
> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason,
|
||||
> we recommend always explicitly specifying your port mappings as strings.
|
||||
|
||||
```
|
||||
ports:
|
||||
- "3000"
|
||||
- "8000:8000"
|
||||
- "49100:22"
|
||||
- "127.0.0.1:8001:8001"
|
||||
```
|
||||
ports:
|
||||
- "3000"
|
||||
- "8000:8000"
|
||||
- "49100:22"
|
||||
- "127.0.0.1:8001:8001"
|
||||
|
||||
### expose
|
||||
|
||||
Expose ports without publishing them to the host machine - they'll only be
|
||||
accessible to linked services. Only the internal port can be specified.
|
||||
|
||||
```
|
||||
expose:
|
||||
- "3000"
|
||||
- "8000"
|
||||
```
|
||||
expose:
|
||||
- "3000"
|
||||
- "8000"
|
||||
|
||||
### volumes
|
||||
|
||||
Mount paths as volumes, optionally specifying a path on the host machine
|
||||
(`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`).
|
||||
|
||||
```
|
||||
volumes:
|
||||
- /var/lib/mysql
|
||||
- cache/:/tmp/cache
|
||||
- ~/configs:/etc/configs/:ro
|
||||
```
|
||||
volumes:
|
||||
- /var/lib/mysql
|
||||
- ./cache:/tmp/cache
|
||||
- ~/configs:/etc/configs/:ro
|
||||
|
||||
You can mount a relative path on the host, which will expand relative to
|
||||
the directory of the Compose configuration file being used. Relative paths
|
||||
should always begin with `.` or `..`.
|
||||
|
||||
> Note: No path expansion will be done if you have also specified a
|
||||
> `volume_driver`.
|
||||
|
||||
### volumes_from
|
||||
|
||||
Mount all of the volumes from another service or container.
|
||||
|
||||
```
|
||||
volumes_from:
|
||||
- service_name
|
||||
- container_name
|
||||
```
|
||||
volumes_from:
|
||||
- service_name
|
||||
- container_name
|
||||
|
||||
### environment
|
||||
|
||||
@@ -145,15 +156,13 @@ Add environment variables. You can use either an array or a dictionary.
|
||||
Environment variables with only a key are resolved to their values on the
|
||||
machine Compose is running on, which can be helpful for secret or host-specific values.
|
||||
|
||||
```
|
||||
environment:
|
||||
RACK_ENV: development
|
||||
SESSION_SECRET:
|
||||
environment:
|
||||
RACK_ENV: development
|
||||
SESSION_SECRET:
|
||||
|
||||
environment:
|
||||
- RACK_ENV=development
|
||||
- SESSION_SECRET
|
||||
```
|
||||
environment:
|
||||
- RACK_ENV=development
|
||||
- SESSION_SECRET
|
||||
|
||||
### env_file
|
||||
|
||||
@@ -164,18 +173,18 @@ If you have specified a Compose file with `docker-compose -f FILE`, paths in
|
||||
|
||||
Environment variables specified in `environment` override these values.
|
||||
|
||||
```
|
||||
env_file: .env
|
||||
env_file: .env
|
||||
|
||||
env_file:
|
||||
- ./common.env
|
||||
- ./apps/web.env
|
||||
- /opt/secrets.env
|
||||
```
|
||||
env_file:
|
||||
- ./common.env
|
||||
- ./apps/web.env
|
||||
- /opt/secrets.env
|
||||
|
||||
```
|
||||
RACK_ENV: development
|
||||
```
|
||||
Compose expects each line in an env file to be in `VAR=VAL` format. Lines
|
||||
beginning with `#` (i.e. comments) are ignored, as are blank lines.
|
||||
|
||||
# Set Rails/Rack environment
|
||||
RACK_ENV=development
|
||||
|
||||
### extends
|
||||
|
||||
@@ -188,147 +197,186 @@ Here's a simple example. Suppose we have 2 files - **common.yml** and
|
||||
|
||||
**common.yml**
|
||||
|
||||
```
|
||||
webapp:
|
||||
build: ./webapp
|
||||
environment:
|
||||
- DEBUG=false
|
||||
- SEND_EMAILS=false
|
||||
```
|
||||
webapp:
|
||||
build: ./webapp
|
||||
environment:
|
||||
- DEBUG=false
|
||||
- SEND_EMAILS=false
|
||||
|
||||
**development.yml**
|
||||
|
||||
```
|
||||
web:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: webapp
|
||||
ports:
|
||||
- "8000:8000"
|
||||
links:
|
||||
- db
|
||||
environment:
|
||||
- DEBUG=true
|
||||
db:
|
||||
image: postgres
|
||||
```
|
||||
web:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: webapp
|
||||
ports:
|
||||
- "8000:8000"
|
||||
links:
|
||||
- db
|
||||
environment:
|
||||
- DEBUG=true
|
||||
db:
|
||||
image: postgres
|
||||
|
||||
Here, the `web` service in **development.yml** inherits the configuration of
|
||||
the `webapp` service in **common.yml** - the `build` and `environment` keys -
|
||||
and adds `ports` and `links` configuration. It overrides one of the defined
|
||||
environment variables (DEBUG) with a new value, and the other one
|
||||
(SEND_EMAILS) is left untouched. It's exactly as if you defined `web` like
|
||||
this:
|
||||
(SEND_EMAILS) is left untouched.
|
||||
|
||||
```yaml
|
||||
web:
|
||||
build: ./webapp
|
||||
ports:
|
||||
- "8000:8000"
|
||||
links:
|
||||
- db
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- SEND_EMAILS=false
|
||||
```
|
||||
The `file` key is optional, if it is not set then Compose will look for the
|
||||
service within the current file.
|
||||
|
||||
The `extends` option is great for sharing configuration between different
|
||||
apps, or for configuring the same app differently for different environments.
|
||||
You could write a new file for a staging environment, **staging.yml**, which
|
||||
binds to a different port and doesn't turn on debugging:
|
||||
For more on `extends`, see the [tutorial](extends.md#example) and
|
||||
[reference](extends.md#reference).
|
||||
|
||||
```
|
||||
web:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: webapp
|
||||
ports:
|
||||
- "80:8000"
|
||||
links:
|
||||
- db
|
||||
db:
|
||||
image: postgres
|
||||
```
|
||||
### labels
|
||||
|
||||
> **Note:** When you extend a service, `links` and `volumes_from`
|
||||
> configuration options are **not** inherited - you will have to define
|
||||
> those manually each time you extend it.
|
||||
Add metadata to containers using [Docker labels](http://docs.docker.com/userguide/labels-custom-metadata/). You can use either an array or a dictionary.
|
||||
|
||||
It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.
|
||||
|
||||
labels:
|
||||
com.example.description: "Accounting webapp"
|
||||
com.example.department: "Finance"
|
||||
com.example.label-with-empty-value: ""
|
||||
|
||||
labels:
|
||||
- "com.example.description=Accounting webapp"
|
||||
- "com.example.department=Finance"
|
||||
- "com.example.label-with-empty-value"
|
||||
|
||||
### container_name
|
||||
|
||||
Specify a custom container name, rather than a generated default name.
|
||||
|
||||
container_name: my-web-container
|
||||
|
||||
Because Docker container names must be unique, you cannot scale a service
|
||||
beyond 1 container if you have specified a custom name. Attempting to do so
|
||||
results in an error.
|
||||
|
||||
### log driver
|
||||
|
||||
Specify a logging driver for the service's containers, as with the ``--log-driver`` option for docker run ([documented here](http://docs.docker.com/reference/run/#logging-drivers-log-driver)).
|
||||
|
||||
Allowed values are currently ``json-file``, ``syslog`` and ``none``. The list will change over time as more drivers are added to the Docker engine.
|
||||
|
||||
The default value is json-file.
|
||||
|
||||
log_driver: "json-file"
|
||||
log_driver: "syslog"
|
||||
log_driver: "none"
|
||||
|
||||
Specify logging options with `log_opt` for the logging driver, as with the ``--log-opt`` option for `docker run`.
|
||||
|
||||
Logging options are key value pairs. An example of `syslog` options:
|
||||
|
||||
log_driver: "syslog"
|
||||
log_opt:
|
||||
address: "tcp://192.168.0.42:123"
|
||||
|
||||
### net
|
||||
|
||||
Networking mode. Use the same values as the docker client `--net` parameter.
|
||||
|
||||
```
|
||||
net: "bridge"
|
||||
net: "none"
|
||||
net: "container:[name or id]"
|
||||
net: "host"
|
||||
```
|
||||
net: "bridge"
|
||||
net: "none"
|
||||
net: "container:[name or id]"
|
||||
net: "host"
|
||||
|
||||
### pid
|
||||
|
||||
pid: "host"
|
||||
|
||||
Sets the PID mode to the host PID mode. This turns on sharing between
|
||||
container and the host operating system the PID address space. Containers
|
||||
launched with this flag will be able to access and manipulate other
|
||||
containers in the bare-metal machine's namespace and vise-versa.
|
||||
|
||||
### dns
|
||||
|
||||
Custom DNS servers. Can be a single value or a list.
|
||||
|
||||
```
|
||||
dns: 8.8.8.8
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 9.9.9.9
|
||||
```
|
||||
dns: 8.8.8.8
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 9.9.9.9
|
||||
|
||||
### cap_add, cap_drop
|
||||
|
||||
Add or drop container capabilities.
|
||||
See `man 7 capabilities` for a full list.
|
||||
|
||||
```
|
||||
cap_add:
|
||||
- ALL
|
||||
cap_add:
|
||||
- ALL
|
||||
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
```
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
|
||||
### dns_search
|
||||
|
||||
Custom DNS search domains. Can be a single value or a list.
|
||||
|
||||
```
|
||||
dns_search: example.com
|
||||
dns_search:
|
||||
- dc1.example.com
|
||||
- dc2.example.com
|
||||
```
|
||||
dns_search: example.com
|
||||
dns_search:
|
||||
- dc1.example.com
|
||||
- dc2.example.com
|
||||
|
||||
### working\_dir, entrypoint, user, hostname, domainname, mem\_limit, privileged, restart, stdin\_open, tty, cpu\_shares
|
||||
### devices
|
||||
|
||||
List of device mappings. Uses the same format as the `--device` docker
|
||||
client create option.
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
### security_opt
|
||||
|
||||
Override the default labeling scheme for each container.
|
||||
|
||||
security_opt:
|
||||
- label:user:USER
|
||||
- label:role:ROLE
|
||||
|
||||
### working\_dir, entrypoint, user, hostname, domainname, mac\_address, mem\_limit, memswap\_limit, privileged, restart, stdin\_open, tty, cpu\_shares, cpuset, read\_only, volume\_driver
|
||||
|
||||
Each of these is a single value, analogous to its
|
||||
[docker run](https://docs.docker.com/reference/run/) counterpart.
|
||||
|
||||
```
|
||||
cpu_shares: 73
|
||||
cpu_shares: 73
|
||||
cpuset: 0,1
|
||||
|
||||
working_dir: /code
|
||||
entrypoint: /code/entrypoint.sh
|
||||
user: postgresql
|
||||
working_dir: /code
|
||||
entrypoint: /code/entrypoint.sh
|
||||
user: postgresql
|
||||
|
||||
hostname: foo
|
||||
domainname: foo.com
|
||||
hostname: foo
|
||||
domainname: foo.com
|
||||
|
||||
mem_limit: 1000000000
|
||||
privileged: true
|
||||
mac_address: 02:42:ac:11:65:43
|
||||
|
||||
restart: always
|
||||
mem_limit: 1000000000
|
||||
memswap_limit: 2000000000
|
||||
privileged: true
|
||||
|
||||
stdin_open: true
|
||||
tty: true
|
||||
restart: always
|
||||
|
||||
stdin_open: true
|
||||
tty: true
|
||||
read_only: true
|
||||
|
||||
volume_driver: mydriver
|
||||
```
|
||||
|
||||
## Compose documentation
|
||||
|
||||
- [User guide](/)
|
||||
- [Installing Compose](install.md)
|
||||
- [User guide](index.md)
|
||||
- [Get started with Django](django.md)
|
||||
- [Get started with Rails](rails.md)
|
||||
- [Get started with Wordpress](wordpress.md)
|
||||
- [Command line reference](cli.md)
|
||||
- [Compose environment variables](env.md)
|
||||
- [Compose command line completion](completion.md)
|
||||
|
||||
183
experimental/compose_swarm_networking.md
Normal file
183
experimental/compose_swarm_networking.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Experimental: Compose, Swarm and Multi-Host Networking
|
||||
|
||||
The [experimental build of Docker](https://github.com/docker/docker/tree/master/experimental) has an entirely new networking system, which enables secure communication between containers on multiple hosts. In combination with Docker Swarm and Docker Compose, you can now run multi-container apps on multi-host clusters with the same tooling and configuration format you use to develop them locally.
|
||||
|
||||
> Note: This functionality is in the experimental stage, and contains some hacks and workarounds which will be removed as it matures.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you start, you’ll need to install the experimental build of Docker, and the latest versions of Machine and Compose.
|
||||
|
||||
- To install the experimental Docker build on a Linux machine, follow the instructions [here](https://github.com/docker/docker/tree/master/experimental#install-docker-experimental).
|
||||
|
||||
- To install the experimental Docker build on a Mac, run these commands:
|
||||
|
||||
$ curl -L https://experimental.docker.com/builds/Darwin/x86_64/docker-latest > /usr/local/bin/docker
|
||||
$ chmod +x /usr/local/bin/docker
|
||||
|
||||
- To install Machine, follow the instructions [here](http://docs.docker.com/machine/).
|
||||
|
||||
- To install Compose, follow the instructions [here](http://docs.docker.com/compose/install/).
|
||||
|
||||
You’ll also need a [Docker Hub](https://hub.docker.com/account/signup/) account and a [Digital Ocean](https://www.digitalocean.com/) account.
|
||||
|
||||
## Set up a swarm with multi-host networking
|
||||
|
||||
Set the `DIGITALOCEAN_ACCESS_TOKEN` environment variable to a valid Digital Ocean API token, which you can generate in the [API panel](https://cloud.digitalocean.com/settings/applications).
|
||||
|
||||
DIGITALOCEAN_ACCESS_TOKEN=abc12345
|
||||
|
||||
Start a consul server:
|
||||
|
||||
docker-machine create -d digitalocean --engine-install-url https://experimental.docker.com consul
|
||||
docker $(docker-machine config consul) run -d -p 8500:8500 -h consul progrium/consul -server -bootstrap
|
||||
|
||||
(In a real world setting you’d set up a distributed consul, but that’s beyond the scope of this guide!)
|
||||
|
||||
Create a Swarm token:
|
||||
|
||||
SWARM_TOKEN=$(docker run swarm create)
|
||||
|
||||
Create a Swarm master:
|
||||
|
||||
docker-machine create -d digitalocean --swarm --swarm-master --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 swarm-0
|
||||
|
||||
Create a Swarm node:
|
||||
|
||||
docker-machine create -d digitalocean --swarm --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 --engine-label com.docker.network.driver.overlay.neighbor_ip=$(docker-machine ip swarm-0) swarm-1
|
||||
|
||||
You can create more Swarm nodes if you want - it’s best to give them sensible names (swarm-2, swarm-3, etc).
|
||||
|
||||
Finally, point Docker at your swarm:
|
||||
|
||||
eval "$(docker-machine env --swarm swarm-0)"
|
||||
|
||||
## Run containers and get them communicating
|
||||
|
||||
Now that you’ve got a swarm up and running, you can create containers on it just like a single Docker instance:
|
||||
|
||||
$ docker run busybox echo hello world
|
||||
hello world
|
||||
|
||||
If you run `docker ps -a`, you can see what node that container was started on by looking at its name (here it’s swarm-3):
|
||||
|
||||
$ docker ps -a
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
41f59749737b busybox "echo hello world" 15 seconds ago Exited (0) 13 seconds ago swarm-3/trusting_leakey
|
||||
|
||||
As you start more containers, they’ll be placed on different nodes across the cluster, thanks to Swarm’s default “spread” scheduling strategy.
|
||||
|
||||
Every container started on this swarm will use the “overlay:multihost” network by default, meaning they can all intercommunicate. Each container gets an IP address on that network, and an `/etc/hosts` file which will be updated on-the-fly with every other container’s IP address and name. That means that if you have a running container named ‘foo’, other containers can access it at the hostname ‘foo’.
|
||||
|
||||
Let’s verify that multi-host networking is functioning. Start a long-running container:
|
||||
|
||||
$ docker run -d --name long-running busybox top
|
||||
<container id>
|
||||
|
||||
If you start a new container and inspect its /etc/hosts file, you’ll see the long-running container in there:
|
||||
|
||||
$ docker run busybox cat /etc/hosts
|
||||
...
|
||||
172.21.0.6 long-running
|
||||
|
||||
Verify that connectivity works between containers:
|
||||
|
||||
$ docker run busybox ping long-running
|
||||
PING long-running (172.21.0.6): 56 data bytes
|
||||
64 bytes from 172.21.0.6: seq=0 ttl=64 time=7.975 ms
|
||||
64 bytes from 172.21.0.6: seq=1 ttl=64 time=1.378 ms
|
||||
64 bytes from 172.21.0.6: seq=2 ttl=64 time=1.348 ms
|
||||
^C
|
||||
--- long-running ping statistics ---
|
||||
3 packets transmitted, 3 packets received, 0% packet loss
|
||||
round-trip min/avg/max = 1.140/2.099/7.975 ms
|
||||
|
||||
## Run a Compose application
|
||||
|
||||
Here’s an example of a simple Python + Redis app using multi-host networking on a swarm.
|
||||
|
||||
Create a directory for the app:
|
||||
|
||||
$ mkdir composetest
|
||||
$ cd composetest
|
||||
|
||||
Inside this directory, create 2 files.
|
||||
|
||||
First, create `app.py` - a simple web app that uses the Flask framework and increments a value in Redis:
|
||||
|
||||
from flask import Flask
|
||||
from redis import Redis
|
||||
import os
|
||||
app = Flask(__name__)
|
||||
redis = Redis(host='composetest_redis_1', port=6379)
|
||||
|
||||
@app.route('/')
|
||||
def hello():
|
||||
redis.incr('hits')
|
||||
return 'Hello World! I have been seen %s times.' % redis.get('hits')
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", debug=True)
|
||||
|
||||
Note that we’re connecting to a host called `composetest_redis_1` - this is the name of the Redis container that Compose will start.
|
||||
|
||||
Second, create a Dockerfile for the app container:
|
||||
|
||||
FROM python:2.7
|
||||
RUN pip install flask redis
|
||||
ADD . /code
|
||||
WORKDIR /code
|
||||
CMD ["python", "app.py"]
|
||||
|
||||
Build the Docker image and push it to the Hub (you’ll need a Hub account). Replace `<username>` with your Docker Hub username:
|
||||
|
||||
$ docker build -t <username>/counter .
|
||||
$ docker push <username>/counter
|
||||
|
||||
Next, create a `docker-compose.yml`, which defines the configuration for the web and redis containers. Once again, replace `<username>` with your Hub username:
|
||||
|
||||
web:
|
||||
image: <username>/counter
|
||||
ports:
|
||||
- "80:5000"
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
Now start the app:
|
||||
|
||||
$ docker-compose up -d
|
||||
Pulling web (username/counter:latest)...
|
||||
swarm-0: Pulling username/counter:latest... : downloaded
|
||||
swarm-2: Pulling username/counter:latest... : downloaded
|
||||
swarm-1: Pulling username/counter:latest... : downloaded
|
||||
swarm-3: Pulling username/counter:latest... : downloaded
|
||||
swarm-4: Pulling username/counter:latest... : downloaded
|
||||
Creating composetest_web_1...
|
||||
Pulling redis (redis:latest)...
|
||||
swarm-2: Pulling redis:latest... : downloaded
|
||||
swarm-1: Pulling redis:latest... : downloaded
|
||||
swarm-3: Pulling redis:latest... : downloaded
|
||||
swarm-4: Pulling redis:latest... : downloaded
|
||||
swarm-0: Pulling redis:latest... : downloaded
|
||||
Creating composetest_redis_1...
|
||||
|
||||
Swarm has created containers for both web and redis, and placed them on different nodes, which you can check with `docker ps`:
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
92faad2135c9 redis "/entrypoint.sh redi 43 seconds ago Up 42 seconds swarm-2/composetest_redis_1
|
||||
adb809e5cdac username/counter "/bin/sh -c 'python 55 seconds ago Up 54 seconds 45.67.8.9:80->5000/tcp swarm-1/composetest_web_1
|
||||
|
||||
You can also see that the web container has exposed port 80 on its swarm node. If you curl that IP, you’ll get a response from the container:
|
||||
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 1 times.
|
||||
|
||||
If you hit it repeatedly, the counter will increment, demonstrating that the web and redis container are communicating:
|
||||
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 2 times.
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 3 times.
|
||||
$ curl http://45.67.8.9
|
||||
Hello World! I have been seen 4 times.
|
||||
@@ -1,8 +1,8 @@
|
||||
PyYAML==3.10
|
||||
docker-py==1.0.0
|
||||
dockerpty==0.3.2
|
||||
docker-py==1.3.1
|
||||
dockerpty==0.3.4
|
||||
docopt==0.6.1
|
||||
requests==2.2.1
|
||||
requests==2.6.1
|
||||
six==1.7.3
|
||||
texttable==0.8.2
|
||||
websocket-client==0.11.0
|
||||
websocket-client==0.32.0
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/docker/fig.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
||||
fi
|
||||
|
||||
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
||||
|
||||
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
||||
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
||||
|
||||
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
||||
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
||||
|
||||
validate_diff() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git diff "$VALIDATE_COMMIT_DIFF" "$@"
|
||||
fi
|
||||
}
|
||||
validate_log() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git log "$VALIDATE_COMMIT_LOG" "$@"
|
||||
fi
|
||||
}
|
||||
fi
|
||||
@@ -7,4 +7,4 @@ chmod 777 `pwd`/dist
|
||||
|
||||
pyinstaller -F bin/docker-compose
|
||||
mv dist/docker-compose dist/docker-compose-Linux-x86_64
|
||||
dist/docker-compose-Linux-x86_64 --version
|
||||
dist/docker-compose-Linux-x86_64 version
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
|
||||
rm -rf venv
|
||||
virtualenv venv
|
||||
virtualenv -p /usr/local/bin/python venv
|
||||
venv/bin/pip install -r requirements.txt
|
||||
venv/bin/pip install -r requirements-dev.txt
|
||||
venv/bin/pip install .
|
||||
venv/bin/pyinstaller -F bin/docker-compose
|
||||
mv dist/docker-compose dist/docker-compose-Darwin-x86_64
|
||||
dist/docker-compose-Darwin-x86_64 --version
|
||||
dist/docker-compose-Darwin-x86_64 version
|
||||
|
||||
@@ -8,9 +8,6 @@
|
||||
|
||||
set -e
|
||||
|
||||
>&2 echo "Validating DCO"
|
||||
script/validate-dco
|
||||
|
||||
export DOCKER_VERSIONS=all
|
||||
. script/test-versions
|
||||
|
||||
|
||||
53
script/prepare-osx
Executable file
53
script/prepare-osx
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
python_version() {
|
||||
python -V 2>&1
|
||||
}
|
||||
|
||||
openssl_version() {
|
||||
python -c "import ssl; print ssl.OPENSSL_VERSION"
|
||||
}
|
||||
|
||||
desired_python_version="2.7.9"
|
||||
desired_python_brew_version="2.7.9"
|
||||
python_formula="https://raw.githubusercontent.com/Homebrew/homebrew/1681e193e4d91c9620c4901efd4458d9b6fcda8e/Library/Formula/python.rb"
|
||||
|
||||
desired_openssl_version="1.0.1j"
|
||||
desired_openssl_brew_version="1.0.1j_1"
|
||||
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew/62fc2a1a65e83ba9dbb30b2e0a2b7355831c714b/Library/Formula/openssl.rb"
|
||||
|
||||
PATH="/usr/local/bin:$PATH"
|
||||
|
||||
if !(which brew); then
|
||||
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||
fi
|
||||
|
||||
brew update
|
||||
|
||||
if !(python_version | grep "$desired_python_version"); then
|
||||
if brew list | grep python; then
|
||||
brew unlink python
|
||||
fi
|
||||
|
||||
brew install "$python_formula"
|
||||
brew switch python "$desired_python_brew_version"
|
||||
fi
|
||||
|
||||
if !(openssl_version | grep "$desired_openssl_version"); then
|
||||
if brew list | grep openssl; then
|
||||
brew unlink openssl
|
||||
fi
|
||||
|
||||
brew install "$openssl_formula"
|
||||
brew switch openssl "$desired_openssl_brew_version"
|
||||
fi
|
||||
|
||||
echo "*** Using $(python_version)"
|
||||
echo "*** Using $(openssl_version)"
|
||||
|
||||
if !(which virtualenv); then
|
||||
pip install virtualenv
|
||||
fi
|
||||
|
||||
@@ -9,9 +9,9 @@ docker build -t "$TAG" .
|
||||
docker run \
|
||||
--rm \
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \
|
||||
--volume="$(pwd):/code" \
|
||||
-e DOCKER_VERSIONS \
|
||||
-e "TAG=$TAG" \
|
||||
-e "affinity:image==$TAG" \
|
||||
--entrypoint="script/test-versions" \
|
||||
"$TAG" \
|
||||
"$@"
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
set -e
|
||||
|
||||
>&2 echo "Running lint checks"
|
||||
flake8 compose
|
||||
flake8 compose tests setup.py
|
||||
|
||||
if [ "$DOCKER_VERSIONS" == "" ]; then
|
||||
DOCKER_VERSIONS="1.5.0"
|
||||
DOCKER_VERSIONS="default"
|
||||
elif [ "$DOCKER_VERSIONS" == "all" ]; then
|
||||
DOCKER_VERSIONS="$ALL_DOCKER_VERSIONS"
|
||||
fi
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
|
||||
dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }')
|
||||
notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')"
|
||||
|
||||
: ${adds:=0}
|
||||
: ${dels:=0}
|
||||
|
||||
# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash"
|
||||
githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+'
|
||||
|
||||
# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
dcoPrefix='Signed-off-by:'
|
||||
dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$"
|
||||
|
||||
check_dco() {
|
||||
grep -qE "$dcoRegex"
|
||||
}
|
||||
|
||||
if [ $adds -eq 0 -a $dels -eq 0 ]; then
|
||||
echo '0 adds, 0 deletions; nothing to validate! :)'
|
||||
elif [ -z "$notDocs" -a $adds -le 1 -a $dels -le 1 ]; then
|
||||
echo 'Congratulations! DCO small-patch-exception material!'
|
||||
else
|
||||
commits=( $(validate_log --format='format:%H%n') )
|
||||
badCommits=()
|
||||
for commit in "${commits[@]}"; do
|
||||
if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then
|
||||
# no content (ie, Merge commit, etc)
|
||||
continue
|
||||
fi
|
||||
if ! git log -1 --format='format:%B' "$commit" | check_dco; then
|
||||
badCommits+=( "$commit" )
|
||||
fi
|
||||
done
|
||||
if [ ${#badCommits[@]} -eq 0 ]; then
|
||||
echo "Congratulations! All commits are properly signed with the DCO!"
|
||||
else
|
||||
{
|
||||
echo "These commits do not have a proper '$dcoPrefix' marker:"
|
||||
for commit in "${badCommits[@]}"; do
|
||||
echo " - $commit"
|
||||
done
|
||||
echo
|
||||
echo 'Please amend each commit to include a properly formatted DCO marker.'
|
||||
echo
|
||||
echo 'Visit the following URL for information about the Docker DCO:'
|
||||
echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
fi
|
||||
@@ -1,18 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "$DOCKER_VERSION" == "" ]; then
|
||||
DOCKER_VERSION="1.5.0"
|
||||
if [ "$DOCKER_VERSION" != "" ] && [ "$DOCKER_VERSION" != "default" ]; then
|
||||
ln -fs "/usr/local/bin/docker-$DOCKER_VERSION" "/usr/local/bin/docker"
|
||||
fi
|
||||
|
||||
ln -fs "/usr/local/bin/docker-$DOCKER_VERSION" "/usr/local/bin/docker"
|
||||
|
||||
# If a pidfile is still around (for example after a container restart),
|
||||
# delete it so that docker can start.
|
||||
rm -rf /var/run/docker.pid
|
||||
docker -d $DOCKER_DAEMON_ARGS &>/var/log/docker.log &
|
||||
docker -d --storage-driver="overlay" &>/var/log/docker.log &
|
||||
docker_pid=$!
|
||||
|
||||
>&2 echo "Waiting for Docker to start..."
|
||||
while ! docker ps &>/dev/null; do
|
||||
if ! kill -0 "$docker_pid" &>/dev/null; then
|
||||
>&2 echo "Docker failed to start"
|
||||
cat /var/log/docker.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
|
||||
11
setup.py
11
setup.py
@@ -27,14 +27,15 @@ def find_version(*file_paths):
|
||||
install_requires = [
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4',
|
||||
'requests >= 2.2.1, < 2.6',
|
||||
'requests >= 2.6.1, < 2.7',
|
||||
'texttable >= 0.8.1, < 0.9',
|
||||
'websocket-client >= 0.11.0, < 1.0',
|
||||
'docker-py >= 1.0.0, < 1.2',
|
||||
'dockerpty >= 0.3.2, < 0.4',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker-py >= 1.3.1, < 1.4',
|
||||
'dockerpty >= 0.3.4, < 0.4',
|
||||
'six >= 1.3.0, < 2',
|
||||
]
|
||||
|
||||
|
||||
tests_require = [
|
||||
'mock >= 1.0.1',
|
||||
'nose',
|
||||
@@ -54,7 +55,7 @@ setup(
|
||||
url='https://www.docker.com/',
|
||||
author='Docker, Inc.',
|
||||
license='Apache License 2.0',
|
||||
packages=find_packages(exclude=[ 'tests.*', 'tests' ]),
|
||||
packages=find_packages(exclude=['tests.*', 'tests']),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
install_requires=install_requires,
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (2,7):
|
||||
import unittest
|
||||
else:
|
||||
import unittest2 as unittest
|
||||
import mock # noqa
|
||||
|
||||
if sys.version_info >= (2, 7):
|
||||
import unittest # NOQA
|
||||
else:
|
||||
import unittest2 as unittest # NOQA
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
|
||||
1
tests/fixtures/build-ctx/Dockerfile
vendored
1
tests/fixtures/build-ctx/Dockerfile
vendored
@@ -1,2 +1,3 @@
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
FROM busybox
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
VOLUME /data
|
||||
CMD sleep 3000
|
||||
CMD top
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
ENTRYPOINT echo "From prebuilt entrypoint"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
service:
|
||||
image: busybox:latest
|
||||
command: sleep 5
|
||||
command: top
|
||||
|
||||
environment:
|
||||
foo: bar
|
||||
|
||||
4
tests/fixtures/extends/docker-compose.yml
vendored
4
tests/fixtures/extends/docker-compose.yml
vendored
@@ -2,7 +2,7 @@ myweb:
|
||||
extends:
|
||||
file: common.yml
|
||||
service: web
|
||||
command: sleep 300
|
||||
command: top
|
||||
links:
|
||||
- "mydb:db"
|
||||
environment:
|
||||
@@ -13,4 +13,4 @@ myweb:
|
||||
BAZ: "2"
|
||||
mydb:
|
||||
image: busybox
|
||||
command: sleep 300
|
||||
command: top
|
||||
|
||||
9
tests/fixtures/extends/no-file-specified.yml
vendored
Normal file
9
tests/fixtures/extends/no-file-specified.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
myweb:
|
||||
extends:
|
||||
service: web
|
||||
environment:
|
||||
- "BAR=1"
|
||||
web:
|
||||
image: busybox
|
||||
environment:
|
||||
- "BAZ=3"
|
||||
6
tests/fixtures/extends/nonexistent-path-base.yml
vendored
Normal file
6
tests/fixtures/extends/nonexistent-path-base.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
dnebase:
|
||||
build: nonexistent.path
|
||||
command: /bin/true
|
||||
environment:
|
||||
- FOO=1
|
||||
- BAR=1
|
||||
8
tests/fixtures/extends/nonexistent-path-child.yml
vendored
Normal file
8
tests/fixtures/extends/nonexistent-path-child.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
dnechild:
|
||||
extends:
|
||||
file: nonexistent-path-base.yml
|
||||
service: dnebase
|
||||
image: busybox
|
||||
command: /bin/true
|
||||
environment:
|
||||
- BAR=2
|
||||
16
tests/fixtures/extends/specify-file-as-self.yml
vendored
Normal file
16
tests/fixtures/extends/specify-file-as-self.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
myweb:
|
||||
extends:
|
||||
file: specify-file-as-self.yml
|
||||
service: web
|
||||
environment:
|
||||
- "BAR=1"
|
||||
web:
|
||||
extends:
|
||||
file: specify-file-as-self.yml
|
||||
service: otherweb
|
||||
image: busybox
|
||||
environment:
|
||||
- "BAZ=3"
|
||||
otherweb:
|
||||
environment:
|
||||
- "YEP=1"
|
||||
@@ -1,11 +1,11 @@
|
||||
db:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
web:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
links:
|
||||
- db:db
|
||||
console:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
definedinyamlnotyml:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
@@ -1,3 +1,3 @@
|
||||
yetanother:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
|
||||
6
tests/fixtures/ports-composefile-scale/docker-compose.yml
vendored
Normal file
6
tests/fixtures/ports-composefile-scale/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
ports:
|
||||
- '3000'
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
ports:
|
||||
- '3000'
|
||||
- '49152:3001'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
another:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
command: top
|
||||
|
||||
1
tests/fixtures/simple-dockerfile/Dockerfile
vendored
1
tests/fixtures/simple-dockerfile/Dockerfile
vendored
@@ -1,2 +1,3 @@
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
from __future__ import absolute_import
|
||||
from operator import attrgetter
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
|
||||
from six import StringIO
|
||||
from mock import patch
|
||||
|
||||
from .testcases import DockerClientTestCase
|
||||
from compose.cli.main import TopLevelCommand
|
||||
from compose.cli.errors import UserError
|
||||
from compose.project import NoSuchService
|
||||
|
||||
|
||||
class CLITestCase(DockerClientTestCase):
|
||||
@@ -21,6 +25,9 @@ class CLITestCase(DockerClientTestCase):
|
||||
sys.exit = self.old_sys_exit
|
||||
self.project.kill()
|
||||
self.project.remove_stopped()
|
||||
for container in self.project.containers(stopped=True, one_off=True):
|
||||
container.remove(force=True)
|
||||
super(CLITestCase, self).tearDown()
|
||||
|
||||
@property
|
||||
def project(self):
|
||||
@@ -30,7 +37,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
if hasattr(self, '_project'):
|
||||
return self._project
|
||||
|
||||
return self.command.get_project(self.command.get_config_path())
|
||||
return self.command.get_project()
|
||||
|
||||
def test_help(self):
|
||||
old_base_dir = self.command.base_dir
|
||||
@@ -62,6 +69,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps_alternate_composefile(self, mock_stdout):
|
||||
config_path = os.path.abspath(
|
||||
'tests/fixtures/multiple-composefiles/compose2.yml')
|
||||
self._project = self.command.get_project(config_path)
|
||||
|
||||
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
|
||||
self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None)
|
||||
self.command.dispatch(['-f', 'compose2.yml', 'ps'], None)
|
||||
@@ -126,21 +137,21 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
def test_up_with_recreate(self):
|
||||
def test_up_with_force_recreate(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
old_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
self.command.dispatch(['up', '-d', '--force-recreate'], None)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
new_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.assertNotEqual(old_ids, new_ids)
|
||||
|
||||
def test_up_with_keep_old(self):
|
||||
def test_up_with_no_recreate(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
@@ -154,6 +165,23 @@ class CLITestCase(DockerClientTestCase):
|
||||
|
||||
self.assertEqual(old_ids, new_ids)
|
||||
|
||||
def test_up_with_force_recreate_and_no_recreate(self):
|
||||
with self.assertRaises(UserError):
|
||||
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
|
||||
|
||||
def test_up_with_timeout(self):
|
||||
self.command.dispatch(['up', '-d', '-t', '1'], None)
|
||||
service = self.project.get_service('simple')
|
||||
another = self.project.get_service('another')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertEqual(len(another.containers()), 1)
|
||||
|
||||
# Ensure containers don't have stdin and stdout connected in -d mode
|
||||
config = service.containers()[0].inspect()['Config']
|
||||
self.assertFalse(config['AttachStderr'])
|
||||
self.assertFalse(config['AttachStdout'])
|
||||
self.assertFalse(config['AttachStdin'])
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_service_without_links(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/links-composefile'
|
||||
@@ -200,13 +228,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(old_ids, new_ids)
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_without_command(self, __):
|
||||
def test_run_without_command(self, _):
|
||||
self.command.base_dir = 'tests/fixtures/commands-composefile'
|
||||
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
|
||||
|
||||
for c in self.project.containers(stopped=True, one_off=True):
|
||||
c.remove()
|
||||
|
||||
self.command.dispatch(['run', 'implicit'], None)
|
||||
service = self.project.get_service('implicit')
|
||||
containers = service.containers(stopped=True, one_off=True)
|
||||
@@ -234,8 +259,8 @@ class CLITestCase(DockerClientTestCase):
|
||||
service = self.project.get_service(name)
|
||||
container = service.containers(stopped=True, one_off=True)[0]
|
||||
self.assertEqual(
|
||||
container.human_readable_command,
|
||||
u'/bin/echo helloworld'
|
||||
shlex.split(container.human_readable_command),
|
||||
[u'/bin/echo', u'helloworld'],
|
||||
)
|
||||
|
||||
@patch('dockerpty.start')
|
||||
@@ -332,6 +357,21 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.command.dispatch(['rm', '-f'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True)), 0)
|
||||
|
||||
def test_stop(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
|
||||
self.command.dispatch(['stop', '-t', '1'], None)
|
||||
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
self.assertFalse(service.containers(stopped=True)[0].is_running)
|
||||
|
||||
def test_logs_invalid_service_name(self):
|
||||
with self.assertRaises(NoSuchService):
|
||||
self.command.dispatch(['logs', 'madeupname'], None)
|
||||
|
||||
def test_kill(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
@@ -343,22 +383,22 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
self.assertFalse(service.containers(stopped=True)[0].is_running)
|
||||
|
||||
def test_kill_signal_sigint(self):
|
||||
def test_kill_signal_sigstop(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
|
||||
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
|
||||
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
# The container is still running. It has been only interrupted
|
||||
# The container is still running. It has only been paused
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
|
||||
def test_kill_interrupted_service(self):
|
||||
def test_kill_stopped_service(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
self.command.dispatch(['kill', '-s', 'SIGINT'], None)
|
||||
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
|
||||
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
|
||||
@@ -371,7 +411,7 @@ class CLITestCase(DockerClientTestCase):
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
started_at = container.dictionary['State']['StartedAt']
|
||||
self.command.dispatch(['restart'], None)
|
||||
self.command.dispatch(['restart', '-t', '1'], None)
|
||||
container.inspect()
|
||||
self.assertNotEqual(
|
||||
container.dictionary['State']['FinishedAt'],
|
||||
@@ -405,7 +445,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(len(project.get_service('another').containers()), 0)
|
||||
|
||||
def test_port(self):
|
||||
|
||||
self.command.base_dir = 'tests/fixtures/ports-composefile'
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
container = self.project.get_service('simple').get_container()
|
||||
@@ -419,6 +458,27 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.assertEqual(get_port(3001), "0.0.0.0:49152")
|
||||
self.assertEqual(get_port(3002), "")
|
||||
|
||||
def test_port_with_scale(self):
|
||||
|
||||
self.command.base_dir = 'tests/fixtures/ports-composefile-scale'
|
||||
self.command.dispatch(['scale', 'simple=2'], None)
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=['simple']),
|
||||
key=attrgetter('name'))
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def get_port(number, mock_stdout, index=None):
|
||||
if index is None:
|
||||
self.command.dispatch(['port', 'simple', str(number)], None)
|
||||
else:
|
||||
self.command.dispatch(['port', '--index=' + str(index), 'simple', str(number)], None)
|
||||
return mock_stdout.getvalue().rstrip()
|
||||
|
||||
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
|
||||
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
|
||||
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
|
||||
self.assertEqual(get_port(3002), "")
|
||||
|
||||
def test_env_file_relative_to_compose_file(self):
|
||||
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
|
||||
self.command.dispatch(['-f', config_path, 'up', '-d'], None)
|
||||
|
||||
218
tests/integration/legacy_test.py
Normal file
218
tests/integration/legacy_test.py
Normal file
@@ -0,0 +1,218 @@
|
||||
import unittest
|
||||
from mock import Mock
|
||||
|
||||
from docker.errors import APIError
|
||||
|
||||
from compose import legacy
|
||||
from compose.project import Project
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class UtilitiesTestCase(unittest.TestCase):
|
||||
def test_has_container(self):
|
||||
self.assertTrue(
|
||||
legacy.has_container("composetest", "web", "composetest_web_1", one_off=False),
|
||||
)
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "composetest_web_run_1", one_off=False),
|
||||
)
|
||||
|
||||
def test_has_container_one_off(self):
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "composetest_web_1", one_off=True),
|
||||
)
|
||||
self.assertTrue(
|
||||
legacy.has_container("composetest", "web", "composetest_web_run_1", one_off=True),
|
||||
)
|
||||
|
||||
def test_has_container_different_project(self):
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "otherapp_web_1", one_off=False),
|
||||
)
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "otherapp_web_run_1", one_off=True),
|
||||
)
|
||||
|
||||
def test_has_container_different_service(self):
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "composetest_db_1", one_off=False),
|
||||
)
|
||||
self.assertFalse(
|
||||
legacy.has_container("composetest", "web", "composetest_db_run_1", one_off=True),
|
||||
)
|
||||
|
||||
def test_is_valid_name(self):
|
||||
self.assertTrue(
|
||||
legacy.is_valid_name("composetest_web_1", one_off=False),
|
||||
)
|
||||
self.assertFalse(
|
||||
legacy.is_valid_name("composetest_web_run_1", one_off=False),
|
||||
)
|
||||
|
||||
def test_is_valid_name_one_off(self):
|
||||
self.assertFalse(
|
||||
legacy.is_valid_name("composetest_web_1", one_off=True),
|
||||
)
|
||||
self.assertTrue(
|
||||
legacy.is_valid_name("composetest_web_run_1", one_off=True),
|
||||
)
|
||||
|
||||
def test_is_valid_name_invalid(self):
|
||||
self.assertFalse(
|
||||
legacy.is_valid_name("foo"),
|
||||
)
|
||||
self.assertFalse(
|
||||
legacy.is_valid_name("composetest_web_lol_1", one_off=True),
|
||||
)
|
||||
|
||||
def test_get_legacy_containers(self):
|
||||
client = Mock()
|
||||
client.containers.return_value = [
|
||||
{
|
||||
"Id": "abc123",
|
||||
"Image": "def456",
|
||||
"Name": "composetest_web_1",
|
||||
"Labels": None,
|
||||
},
|
||||
{
|
||||
"Id": "ghi789",
|
||||
"Image": "def456",
|
||||
"Name": None,
|
||||
"Labels": None,
|
||||
},
|
||||
{
|
||||
"Id": "jkl012",
|
||||
"Image": "def456",
|
||||
"Labels": None,
|
||||
},
|
||||
]
|
||||
|
||||
containers = legacy.get_legacy_containers(client, "composetest", ["web"])
|
||||
|
||||
self.assertEqual(len(containers), 1)
|
||||
self.assertEqual(containers[0].id, 'abc123')
|
||||
|
||||
|
||||
class LegacyTestCase(DockerClientTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LegacyTestCase, self).setUp()
|
||||
self.containers = []
|
||||
|
||||
db = self.create_service('db')
|
||||
web = self.create_service('web', links=[(db, 'db')])
|
||||
nginx = self.create_service('nginx', links=[(web, 'web')])
|
||||
|
||||
self.services = [db, web, nginx]
|
||||
self.project = Project('composetest', self.services, self.client)
|
||||
|
||||
# Create a legacy container for each service
|
||||
for service in self.services:
|
||||
service.ensure_image_exists()
|
||||
container = self.client.create_container(
|
||||
name='{}_{}_1'.format(self.project.name, service.name),
|
||||
**service.options
|
||||
)
|
||||
self.client.start(container)
|
||||
self.containers.append(container)
|
||||
|
||||
# Create a single one-off legacy container
|
||||
self.containers.append(self.client.create_container(
|
||||
name='{}_{}_run_1'.format(self.project.name, db.name),
|
||||
**self.services[0].options
|
||||
))
|
||||
|
||||
def tearDown(self):
|
||||
super(LegacyTestCase, self).tearDown()
|
||||
for container in self.containers:
|
||||
try:
|
||||
self.client.kill(container)
|
||||
except APIError:
|
||||
pass
|
||||
try:
|
||||
self.client.remove_container(container)
|
||||
except APIError:
|
||||
pass
|
||||
|
||||
def get_legacy_containers(self, **kwargs):
|
||||
return legacy.get_legacy_containers(
|
||||
self.client,
|
||||
self.project.name,
|
||||
[s.name for s in self.services],
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def test_get_legacy_container_names(self):
|
||||
self.assertEqual(len(self.get_legacy_containers()), len(self.services))
|
||||
|
||||
def test_get_legacy_container_names_one_off(self):
|
||||
self.assertEqual(len(self.get_legacy_containers(one_off=True)), 1)
|
||||
|
||||
def test_migration_to_labels(self):
|
||||
# Trying to get the container list raises an exception
|
||||
|
||||
with self.assertRaises(legacy.LegacyContainersError) as cm:
|
||||
self.project.containers(stopped=True)
|
||||
|
||||
self.assertEqual(
|
||||
set(cm.exception.names),
|
||||
set(['composetest_db_1', 'composetest_web_1', 'composetest_nginx_1']),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
set(cm.exception.one_off_names),
|
||||
set(['composetest_db_run_1']),
|
||||
)
|
||||
|
||||
# Migrate the containers
|
||||
|
||||
legacy.migrate_project_to_labels(self.project)
|
||||
|
||||
# Getting the list no longer raises an exception
|
||||
|
||||
containers = self.project.containers(stopped=True)
|
||||
self.assertEqual(len(containers), len(self.services))
|
||||
|
||||
def test_migration_one_off(self):
|
||||
# We've already migrated
|
||||
|
||||
legacy.migrate_project_to_labels(self.project)
|
||||
|
||||
# Trying to create a one-off container results in a Docker API error
|
||||
|
||||
with self.assertRaises(APIError) as cm:
|
||||
self.project.get_service('db').create_container(one_off=True)
|
||||
|
||||
# Checking for legacy one-off containers raises an exception
|
||||
|
||||
with self.assertRaises(legacy.LegacyOneOffContainersError) as cm:
|
||||
legacy.check_for_legacy_containers(
|
||||
self.client,
|
||||
self.project.name,
|
||||
['db'],
|
||||
allow_one_off=False,
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
set(cm.exception.one_off_names),
|
||||
set(['composetest_db_run_1']),
|
||||
)
|
||||
|
||||
# Remove the old one-off container
|
||||
|
||||
c = self.client.inspect_container('composetest_db_run_1')
|
||||
self.client.remove_container(c)
|
||||
|
||||
# Checking no longer raises an exception
|
||||
|
||||
legacy.check_for_legacy_containers(
|
||||
self.client,
|
||||
self.project.name,
|
||||
['db'],
|
||||
allow_one_off=False,
|
||||
)
|
||||
|
||||
# Creating a one-off container no longer results in an API error
|
||||
|
||||
self.project.get_service('db').create_container(one_off=True)
|
||||
self.assertIsInstance(self.client.inspect_container('composetest_db_run_1'), dict)
|
||||
@@ -1,13 +1,57 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from compose import config
|
||||
from compose.const import LABEL_PROJECT
|
||||
from compose.project import Project
|
||||
from compose.container import Container
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
def build_service_dicts(service_config):
|
||||
return config.load(config.ConfigDetails(service_config, 'working_dir', None))
|
||||
|
||||
|
||||
class ProjectTest(DockerClientTestCase):
|
||||
|
||||
def test_containers(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('composetest', [web, db], self.client)
|
||||
|
||||
project.up()
|
||||
|
||||
containers = project.containers()
|
||||
self.assertEqual(len(containers), 2)
|
||||
|
||||
def test_containers_with_service_names(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('composetest', [web, db], self.client)
|
||||
|
||||
project.up()
|
||||
|
||||
containers = project.containers(['web'])
|
||||
self.assertEqual(
|
||||
[c.name for c in containers],
|
||||
['composetest_web_1'])
|
||||
|
||||
def test_containers_with_extra_service(self):
|
||||
web = self.create_service('web')
|
||||
web_1 = web.create_container()
|
||||
|
||||
db = self.create_service('db')
|
||||
db_1 = db.create_container()
|
||||
|
||||
self.create_service('extra').create_container()
|
||||
|
||||
project = Project('composetest', [web, db], self.client)
|
||||
self.assertEqual(
|
||||
set(project.containers(stopped=True)),
|
||||
set([web_1, db_1]),
|
||||
)
|
||||
|
||||
def test_volumes_from_service(self):
|
||||
service_dicts = config.from_dictionary({
|
||||
service_dicts = build_service_dicts({
|
||||
'data': {
|
||||
'image': 'busybox:latest',
|
||||
'volumes': ['/var/data'],
|
||||
@@ -16,7 +60,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
'image': 'busybox:latest',
|
||||
'volumes_from': ['data'],
|
||||
},
|
||||
}, working_dir='.')
|
||||
})
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=service_dicts,
|
||||
@@ -32,10 +76,11 @@ class ProjectTest(DockerClientTestCase):
|
||||
image='busybox:latest',
|
||||
volumes=['/var/data'],
|
||||
name='composetest_data_container',
|
||||
labels={LABEL_PROJECT: 'composetest'},
|
||||
)
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=config.from_dictionary({
|
||||
service_dicts=build_service_dicts({
|
||||
'db': {
|
||||
'image': 'busybox:latest',
|
||||
'volumes_from': ['composetest_data_container'],
|
||||
@@ -46,21 +91,18 @@ class ProjectTest(DockerClientTestCase):
|
||||
db = project.get_service('db')
|
||||
self.assertEqual(db.volumes_from, [data_container])
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_net_from_service(self):
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=config.from_dictionary({
|
||||
service_dicts=build_service_dicts({
|
||||
'net': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"]
|
||||
'command': ["top"]
|
||||
},
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'net': 'container:net',
|
||||
'command': ["/bin/sleep", "300"]
|
||||
'command': ["top"]
|
||||
},
|
||||
}),
|
||||
client=self.client,
|
||||
@@ -70,23 +112,21 @@ class ProjectTest(DockerClientTestCase):
|
||||
|
||||
web = project.get_service('web')
|
||||
net = project.get_service('net')
|
||||
self.assertEqual(web._get_net(), 'container:'+net.containers()[0].id)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
self.assertEqual(web.net.mode, 'container:' + net.containers()[0].id)
|
||||
|
||||
def test_net_from_container(self):
|
||||
net_container = Container.create(
|
||||
self.client,
|
||||
image='busybox:latest',
|
||||
name='composetest_net_container',
|
||||
command='/bin/sleep 300'
|
||||
command='top',
|
||||
labels={LABEL_PROJECT: 'composetest'},
|
||||
)
|
||||
net_container.start()
|
||||
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=config.from_dictionary({
|
||||
service_dicts=build_service_dicts({
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'net': 'container:composetest_net_container'
|
||||
@@ -98,10 +138,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up()
|
||||
|
||||
web = project.get_service('web')
|
||||
self.assertEqual(web._get_net(), 'container:'+net_container.id)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
self.assertEqual(web.net.mode, 'container:' + net_container.id)
|
||||
|
||||
def test_start_stop_kill_remove(self):
|
||||
web = self.create_service('web')
|
||||
@@ -148,10 +185,19 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
def test_project_up_starts_uncreated_services(self):
|
||||
db = self.create_service('db')
|
||||
web = self.create_service('web', links=[(db, 'db')])
|
||||
project = Project('composetest', [db, web], self.client)
|
||||
project.up(['db'])
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
|
||||
def test_project_up_recreates_containers(self):
|
||||
project.up()
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(web.containers()), 1)
|
||||
|
||||
def test_recreate_preserves_volumes(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/etc'])
|
||||
project = Project('composetest', [web, db], self.client)
|
||||
@@ -163,16 +209,13 @@ class ProjectTest(DockerClientTestCase):
|
||||
old_db_id = project.containers()[0].id
|
||||
db_volume_path = project.containers()[0].get('Volumes./etc')
|
||||
|
||||
project.up()
|
||||
project.up(force_recreate=True)
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
self.assertNotEqual(db_container.id, old_db_id)
|
||||
self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_recreate_running(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
@@ -185,7 +228,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
old_db_id = project.containers()[0].id
|
||||
db_volume_path = project.containers()[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up(recreate=False)
|
||||
project.up(allow_recreate=False)
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
@@ -193,9 +236,6 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(db_container.inspect()['Volumes']['/var/db'],
|
||||
db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_recreate_stopped(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
@@ -204,7 +244,7 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
project.stop()
|
||||
project.kill()
|
||||
|
||||
old_containers = project.containers(stopped=True)
|
||||
|
||||
@@ -212,19 +252,17 @@ class ProjectTest(DockerClientTestCase):
|
||||
old_db_id = old_containers[0].id
|
||||
db_volume_path = old_containers[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up(recreate=False)
|
||||
project.up(allow_recreate=False)
|
||||
|
||||
new_containers = project.containers(stopped=True)
|
||||
self.assertEqual(len(new_containers), 2)
|
||||
self.assertEqual([c.is_running for c in new_containers], [True, True])
|
||||
|
||||
db_container = [c for c in new_containers if 'db' in c.name][0]
|
||||
self.assertEqual(db_container.id, old_db_id)
|
||||
self.assertEqual(db_container.inspect()['Volumes']['/var/db'],
|
||||
db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_without_all_services(self):
|
||||
console = self.create_service('console')
|
||||
db = self.create_service('db')
|
||||
@@ -237,9 +275,6 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 1)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_starts_links(self):
|
||||
console = self.create_service('console')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
@@ -255,29 +290,26 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_starts_depends(self):
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=config.from_dictionary({
|
||||
service_dicts=build_service_dicts({
|
||||
'console': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
},
|
||||
'data' : {
|
||||
'data': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"]
|
||||
'command': ["top"]
|
||||
},
|
||||
'db': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
'volumes_from': ['data'],
|
||||
},
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
'links': ['db'],
|
||||
},
|
||||
}),
|
||||
@@ -293,29 +325,26 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(project.get_service('data').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('console').containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_deps(self):
|
||||
project = Project.from_dicts(
|
||||
name='composetest',
|
||||
service_dicts=config.from_dictionary({
|
||||
service_dicts=build_service_dicts({
|
||||
'console': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
},
|
||||
'data' : {
|
||||
'data': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"]
|
||||
'command': ["top"]
|
||||
},
|
||||
'db': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
'volumes_from': ['data'],
|
||||
},
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'command': ["/bin/sleep", "300"],
|
||||
'command': ["top"],
|
||||
'links': ['db'],
|
||||
},
|
||||
}),
|
||||
@@ -332,9 +361,6 @@ class ProjectTest(DockerClientTestCase):
|
||||
self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
|
||||
self.assertEqual(len(project.get_service('console').containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_unscale_after_restart(self):
|
||||
web = self.create_service('web')
|
||||
project = Project('composetest', [web], self.client)
|
||||
@@ -359,5 +385,3 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
48
tests/integration/resilience_test.py
Normal file
48
tests/integration/resilience_test.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
|
||||
import mock
|
||||
|
||||
from compose.project import Project
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ResilienceTest(DockerClientTestCase):
|
||||
def setUp(self):
|
||||
self.db = self.create_service('db', volumes=['/var/db'], command='top')
|
||||
self.project = Project('composetest', [self.db], self.client)
|
||||
|
||||
container = self.db.create_container()
|
||||
self.db.start_container(container)
|
||||
self.host_path = container.get('Volumes')['/var/db']
|
||||
|
||||
def test_successful_recreate(self):
|
||||
self.project.up(force_recreate=True)
|
||||
container = self.db.containers()[0]
|
||||
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
|
||||
|
||||
def test_create_failure(self):
|
||||
with mock.patch('compose.service.Service.create_container', crash):
|
||||
with self.assertRaises(Crash):
|
||||
self.project.up(force_recreate=True)
|
||||
|
||||
self.project.up()
|
||||
container = self.db.containers()[0]
|
||||
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
|
||||
|
||||
def test_start_failure(self):
|
||||
with mock.patch('compose.service.Service.start_container', crash):
|
||||
with self.assertRaises(Crash):
|
||||
self.project.up(force_recreate=True)
|
||||
|
||||
self.project.up()
|
||||
container = self.db.containers()[0]
|
||||
self.assertEqual(container.get('Volumes')['/var/db'], self.host_path)
|
||||
|
||||
|
||||
class Crash(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def crash(*args, **kwargs):
|
||||
raise Crash()
|
||||
@@ -2,13 +2,28 @@ from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
from os import path
|
||||
import mock
|
||||
|
||||
from compose import Service
|
||||
from compose.service import CannotBeScaledError
|
||||
from compose.container import Container
|
||||
from docker.errors import APIError
|
||||
from mock import patch
|
||||
import tempfile
|
||||
import shutil
|
||||
from six import StringIO, text_type
|
||||
|
||||
from .testcases import DockerClientTestCase
|
||||
from compose import __version__
|
||||
from compose.const import (
|
||||
LABEL_CONTAINER_NUMBER,
|
||||
LABEL_ONE_OFF,
|
||||
LABEL_PROJECT,
|
||||
LABEL_SERVICE,
|
||||
LABEL_VERSION,
|
||||
)
|
||||
from compose.container import Container
|
||||
from compose.service import build_extra_hosts
|
||||
from compose.service import ConfigError
|
||||
from compose.service import ConvergencePlan
|
||||
from compose.service import Net
|
||||
from compose.service import Service
|
||||
|
||||
|
||||
def create_and_start_container(service, **override_options):
|
||||
@@ -99,13 +114,101 @@ class ServiceTest(DockerClientTestCase):
|
||||
service = self.create_service('db', volumes=['/var/db'])
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertIn('/var/db', container.inspect()['Volumes'])
|
||||
self.assertIn('/var/db', container.get('Volumes'))
|
||||
|
||||
def test_create_container_with_volume_driver(self):
|
||||
service = self.create_service('db', volume_driver='foodriver')
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual('foodriver', container.get('Config.VolumeDriver'))
|
||||
|
||||
def test_create_container_with_cpu_shares(self):
|
||||
service = self.create_service('db', cpu_shares=73)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(container.inspect()['Config']['CpuShares'], 73)
|
||||
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
|
||||
|
||||
def test_build_extra_hosts(self):
|
||||
# string
|
||||
self.assertRaises(ConfigError, lambda: build_extra_hosts("www.example.com: 192.168.0.17"))
|
||||
|
||||
# list of strings
|
||||
self.assertEqual(build_extra_hosts(
|
||||
["www.example.com:192.168.0.17"]),
|
||||
{'www.example.com': '192.168.0.17'})
|
||||
self.assertEqual(build_extra_hosts(
|
||||
["www.example.com: 192.168.0.17"]),
|
||||
{'www.example.com': '192.168.0.17'})
|
||||
self.assertEqual(build_extra_hosts(
|
||||
["www.example.com: 192.168.0.17",
|
||||
"static.example.com:192.168.0.19",
|
||||
"api.example.com: 192.168.0.18"]),
|
||||
{'www.example.com': '192.168.0.17',
|
||||
'static.example.com': '192.168.0.19',
|
||||
'api.example.com': '192.168.0.18'})
|
||||
|
||||
# list of dictionaries
|
||||
self.assertRaises(ConfigError, lambda: build_extra_hosts(
|
||||
[{'www.example.com': '192.168.0.17'},
|
||||
{'api.example.com': '192.168.0.18'}]))
|
||||
|
||||
# dictionaries
|
||||
self.assertEqual(build_extra_hosts(
|
||||
{'www.example.com': '192.168.0.17',
|
||||
'api.example.com': '192.168.0.18'}),
|
||||
{'www.example.com': '192.168.0.17',
|
||||
'api.example.com': '192.168.0.18'})
|
||||
|
||||
def test_create_container_with_extra_hosts_list(self):
|
||||
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
|
||||
service = self.create_service('db', extra_hosts=extra_hosts)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
|
||||
|
||||
def test_create_container_with_extra_hosts_string(self):
|
||||
extra_hosts = 'somehost:162.242.195.82'
|
||||
service = self.create_service('db', extra_hosts=extra_hosts)
|
||||
self.assertRaises(ConfigError, lambda: service.create_container())
|
||||
|
||||
def test_create_container_with_extra_hosts_list_of_dicts(self):
|
||||
extra_hosts = [{'somehost': '162.242.195.82'}, {'otherhost': '50.31.209.229'}]
|
||||
service = self.create_service('db', extra_hosts=extra_hosts)
|
||||
self.assertRaises(ConfigError, lambda: service.create_container())
|
||||
|
||||
def test_create_container_with_extra_hosts_dicts(self):
|
||||
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
|
||||
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
|
||||
service = self.create_service('db', extra_hosts=extra_hosts)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
|
||||
|
||||
def test_create_container_with_cpu_set(self):
|
||||
service = self.create_service('db', cpuset='0')
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
|
||||
|
||||
def test_create_container_with_read_only_root_fs(self):
|
||||
read_only = True
|
||||
service = self.create_service('db', read_only=read_only)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(container.get('HostConfig.ReadonlyRootfs'), read_only, container.get('HostConfig'))
|
||||
|
||||
def test_create_container_with_security_opt(self):
|
||||
security_opt = ['label:disable']
|
||||
service = self.create_service('db', security_opt=security_opt)
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
|
||||
|
||||
def test_create_container_with_mac_address(self):
|
||||
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
|
||||
|
||||
def test_create_container_with_specified_volume(self):
|
||||
host_path = '/tmp/host-path'
|
||||
@@ -121,9 +224,55 @@ class ServiceTest(DockerClientTestCase):
|
||||
# Match the last component ("host-path"), because boot2docker symlinks /tmp
|
||||
actual_host_path = volumes[container_path]
|
||||
self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
|
||||
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
|
||||
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_recreate_preserves_volume_with_trailing_slash(self):
|
||||
"""
|
||||
When the Compose file specifies a trailing slash in the container path, make
|
||||
sure we copy the volume over when recreating.
|
||||
"""
|
||||
service = self.create_service('data', volumes=['/data/'])
|
||||
old_container = create_and_start_container(service)
|
||||
volume_path = old_container.get('Volumes')['/data']
|
||||
|
||||
new_container = service.recreate_container(old_container)
|
||||
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
|
||||
|
||||
def test_duplicate_volume_trailing_slash(self):
|
||||
"""
|
||||
When an image specifies a volume, and the Compose file specifies a host path
|
||||
but adds a trailing slash, make sure that we don't create duplicate binds.
|
||||
"""
|
||||
host_path = '/tmp/data'
|
||||
container_path = '/data'
|
||||
volumes = ['{}:{}/'.format(host_path, container_path)]
|
||||
|
||||
tmp_container = self.client.create_container(
|
||||
'busybox', 'true',
|
||||
volumes={container_path: {}},
|
||||
labels={'com.docker.compose.test_image': 'true'},
|
||||
)
|
||||
image = self.client.commit(tmp_container)['Id']
|
||||
|
||||
service = self.create_service('db', image=image, volumes=volumes)
|
||||
old_container = create_and_start_container(service)
|
||||
|
||||
self.assertEqual(
|
||||
old_container.get('Config.Volumes'),
|
||||
{container_path: {}},
|
||||
)
|
||||
|
||||
service = self.create_service('db', image=image, volumes=volumes)
|
||||
new_container = service.recreate_container(old_container)
|
||||
|
||||
self.assertEqual(
|
||||
new_container.get('Config.Volumes'),
|
||||
{container_path: {}},
|
||||
)
|
||||
|
||||
self.assertEqual(service.containers(stopped=False), [new_container])
|
||||
|
||||
@patch.dict(os.environ)
|
||||
def test_create_container_with_home_and_env_var_in_volume_path(self):
|
||||
os.environ['VOLUME_NAME'] = 'my-volume'
|
||||
os.environ['HOME'] = '/tmp/home-dir'
|
||||
@@ -144,7 +293,12 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_create_container_with_volumes_from(self):
|
||||
volume_service = self.create_service('data')
|
||||
volume_container_1 = volume_service.create_container()
|
||||
volume_container_2 = Container.create(self.client, image='busybox:latest', command=["/bin/sleep", "300"])
|
||||
volume_container_2 = Container.create(
|
||||
self.client,
|
||||
image='busybox:latest',
|
||||
command=["top"],
|
||||
labels={LABEL_PROJECT: 'composetest'},
|
||||
)
|
||||
host_service = self.create_service('host', volumes_from=[volume_service, volume_container_2])
|
||||
host_container = host_service.create_container()
|
||||
host_service.start_container(host_container)
|
||||
@@ -153,60 +307,68 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertIn(volume_container_2.id,
|
||||
host_container.get('HostConfig.VolumesFrom'))
|
||||
|
||||
def test_recreate_containers(self):
|
||||
def test_execute_convergence_plan_recreate(self):
|
||||
service = self.create_service(
|
||||
'db',
|
||||
environment={'FOO': '1'},
|
||||
volumes=['/etc'],
|
||||
entrypoint=['sleep'],
|
||||
command=['300']
|
||||
entrypoint=['top'],
|
||||
command=['-d', '1']
|
||||
)
|
||||
old_container = service.create_container()
|
||||
self.assertEqual(old_container.dictionary['Config']['Entrypoint'], ['sleep'])
|
||||
self.assertEqual(old_container.dictionary['Config']['Cmd'], ['300'])
|
||||
self.assertIn('FOO=1', old_container.dictionary['Config']['Env'])
|
||||
self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
|
||||
self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
|
||||
self.assertIn('FOO=1', old_container.get('Config.Env'))
|
||||
self.assertEqual(old_container.name, 'composetest_db_1')
|
||||
service.start_container(old_container)
|
||||
volume_path = old_container.inspect()['Volumes']['/etc']
|
||||
old_container.inspect() # reload volume data
|
||||
volume_path = old_container.get('Volumes')['/etc']
|
||||
|
||||
num_containers_before = len(self.client.containers(all=True))
|
||||
|
||||
service.options['environment']['FOO'] = '2'
|
||||
tuples = service.recreate_containers()
|
||||
self.assertEqual(len(tuples), 1)
|
||||
new_container, = service.execute_convergence_plan(
|
||||
ConvergencePlan('recreate', [old_container]))
|
||||
|
||||
intermediate_container = tuples[0][0]
|
||||
new_container = tuples[0][1]
|
||||
self.assertEqual(intermediate_container.dictionary['Config']['Entrypoint'], ['/bin/echo'])
|
||||
|
||||
self.assertEqual(new_container.dictionary['Config']['Entrypoint'], ['sleep'])
|
||||
self.assertEqual(new_container.dictionary['Config']['Cmd'], ['300'])
|
||||
self.assertIn('FOO=2', new_container.dictionary['Config']['Env'])
|
||||
self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
|
||||
self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
|
||||
self.assertIn('FOO=2', new_container.get('Config.Env'))
|
||||
self.assertEqual(new_container.name, 'composetest_db_1')
|
||||
self.assertEqual(new_container.inspect()['Volumes']['/etc'], volume_path)
|
||||
self.assertIn(intermediate_container.id, new_container.dictionary['HostConfig']['VolumesFrom'])
|
||||
self.assertEqual(new_container.get('Volumes')['/etc'], volume_path)
|
||||
self.assertIn(
|
||||
'affinity:container==%s' % old_container.id,
|
||||
new_container.get('Config.Env'))
|
||||
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
|
||||
self.assertNotEqual(old_container.id, new_container.id)
|
||||
self.assertRaises(APIError,
|
||||
self.client.inspect_container,
|
||||
intermediate_container.id)
|
||||
old_container.id)
|
||||
|
||||
def test_recreate_containers_when_containers_are_stopped(self):
|
||||
def test_execute_convergence_plan_when_containers_are_stopped(self):
|
||||
service = self.create_service(
|
||||
'db',
|
||||
environment={'FOO': '1'},
|
||||
volumes=['/var/db'],
|
||||
entrypoint=['sleep'],
|
||||
command=['300']
|
||||
entrypoint=['top'],
|
||||
command=['-d', '1']
|
||||
)
|
||||
old_container = service.create_container()
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
service.recreate_containers()
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
service.create_container()
|
||||
|
||||
containers = service.containers(stopped=True)
|
||||
self.assertEqual(len(containers), 1)
|
||||
container, = containers
|
||||
self.assertFalse(container.is_running)
|
||||
|
||||
def test_recreate_containers_with_image_declared_volume(self):
|
||||
service.execute_convergence_plan(ConvergencePlan('start', [container]))
|
||||
|
||||
containers = service.containers()
|
||||
self.assertEqual(len(containers), 1)
|
||||
container.inspect()
|
||||
self.assertEqual(container, containers[0])
|
||||
self.assertTrue(container.is_running)
|
||||
|
||||
def test_execute_convergence_plan_with_image_declared_volume(self):
|
||||
service = Service(
|
||||
project='composetest',
|
||||
name='db',
|
||||
@@ -218,9 +380,8 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(old_container.get('Volumes').keys(), ['/data'])
|
||||
volume_path = old_container.get('Volumes')['/data']
|
||||
|
||||
service.recreate_containers()
|
||||
new_container = service.containers()[0]
|
||||
service.start_container(new_container)
|
||||
new_container, = service.execute_convergence_plan(
|
||||
ConvergencePlan('recreate', [old_container]))
|
||||
self.assertEqual(new_container.get('Volumes').keys(), ['/data'])
|
||||
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
|
||||
|
||||
@@ -247,8 +408,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
'db',
|
||||
]),
|
||||
'db'])
|
||||
)
|
||||
|
||||
def test_start_container_creates_links_with_names(self):
|
||||
@@ -264,8 +424,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
'custom_link_name',
|
||||
]),
|
||||
'custom_link_name'])
|
||||
)
|
||||
|
||||
def test_start_container_with_external_links(self):
|
||||
@@ -283,8 +442,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
set([
|
||||
'composetest_db_1',
|
||||
'composetest_db_2',
|
||||
'db_3',
|
||||
]),
|
||||
'db_3']),
|
||||
)
|
||||
|
||||
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
|
||||
@@ -309,8 +467,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
set([
|
||||
'composetest_db_1', 'db_1',
|
||||
'composetest_db_2', 'db_2',
|
||||
'db',
|
||||
]),
|
||||
'db'])
|
||||
)
|
||||
|
||||
def test_start_container_builds_images(self):
|
||||
@@ -326,7 +483,7 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(len(self.client.images(name='composetest_test')), 1)
|
||||
|
||||
def test_start_container_uses_tagged_image_if_it_exists(self):
|
||||
self.client.build('tests/fixtures/simple-dockerfile', tag='composetest_test')
|
||||
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
|
||||
service = Service(
|
||||
name='test',
|
||||
client=self.client,
|
||||
@@ -343,13 +500,36 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
|
||||
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
|
||||
|
||||
def test_build(self):
|
||||
base_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, base_dir)
|
||||
|
||||
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
|
||||
f.write("FROM busybox\n")
|
||||
|
||||
self.create_service('web', build=base_dir).build()
|
||||
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
|
||||
|
||||
def test_build_non_ascii_filename(self):
|
||||
base_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, base_dir)
|
||||
|
||||
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
|
||||
f.write("FROM busybox\n")
|
||||
|
||||
with open(os.path.join(base_dir, b'foo\xE2bar'), 'w') as f:
|
||||
f.write("hello world\n")
|
||||
|
||||
self.create_service('web', build=text_type(base_dir)).build()
|
||||
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
|
||||
|
||||
def test_start_container_stays_unpriviliged(self):
|
||||
service = self.create_service('web')
|
||||
container = create_and_start_container(service).inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], False)
|
||||
|
||||
def test_start_container_becomes_priviliged(self):
|
||||
service = self.create_service('web', privileged = True)
|
||||
service = self.create_service('web', privileged=True)
|
||||
container = create_and_start_container(service).inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], True)
|
||||
|
||||
@@ -396,6 +576,11 @@ class ServiceTest(DockerClientTestCase):
|
||||
],
|
||||
})
|
||||
|
||||
def test_create_with_image_id(self):
|
||||
# Image id for the current busybox:latest
|
||||
service = self.create_service('foo', image='8c2e06607696')
|
||||
service.create_container()
|
||||
|
||||
def test_scale(self):
|
||||
service = self.create_service('web')
|
||||
service.scale(1)
|
||||
@@ -415,9 +600,138 @@ class ServiceTest(DockerClientTestCase):
|
||||
service.scale(0)
|
||||
self.assertEqual(len(service.containers()), 0)
|
||||
|
||||
def test_scale_on_service_that_cannot_be_scaled(self):
|
||||
service = self.create_service('web', ports=['8000:8000'])
|
||||
self.assertRaises(CannotBeScaledError, lambda: service.scale(1))
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_scale_with_stopped_containers(self, mock_stdout):
|
||||
"""
|
||||
Given there are some stopped containers and scale is called with a
|
||||
desired number that is the same as the number of stopped containers,
|
||||
test that those containers are restarted and not removed/recreated.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
valid_numbers = [next_number, next_number + 1]
|
||||
service.create_container(number=next_number, quiet=True)
|
||||
service.create_container(number=next_number + 1, quiet=True)
|
||||
|
||||
for container in service.containers():
|
||||
self.assertFalse(container.is_running)
|
||||
|
||||
service.scale(2)
|
||||
|
||||
self.assertEqual(len(service.containers()), 2)
|
||||
for container in service.containers():
|
||||
self.assertTrue(container.is_running)
|
||||
self.assertTrue(container.number in valid_numbers)
|
||||
|
||||
captured_output = mock_stdout.getvalue()
|
||||
self.assertNotIn('Creating', captured_output)
|
||||
self.assertIn('Starting', captured_output)
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_scale_with_stopped_containers_and_needing_creation(self, mock_stdout):
|
||||
"""
|
||||
Given there are some stopped containers and scale is called with a
|
||||
desired number that is greater than the number of stopped containers,
|
||||
test that those containers are restarted and required number are created.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
service.create_container(number=next_number, quiet=True)
|
||||
|
||||
for container in service.containers():
|
||||
self.assertFalse(container.is_running)
|
||||
|
||||
service.scale(2)
|
||||
|
||||
self.assertEqual(len(service.containers()), 2)
|
||||
for container in service.containers():
|
||||
self.assertTrue(container.is_running)
|
||||
|
||||
captured_output = mock_stdout.getvalue()
|
||||
self.assertIn('Creating', captured_output)
|
||||
self.assertIn('Starting', captured_output)
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_scale_with_api_returns_errors(self, mock_stdout):
|
||||
"""
|
||||
Test that when scaling if the API returns an error, that error is handled
|
||||
and the remaining threads continue.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
service.create_container(number=next_number, quiet=True)
|
||||
|
||||
with patch(
|
||||
'compose.container.Container.create',
|
||||
side_effect=APIError(message="testing", response={}, explanation="Boom")):
|
||||
|
||||
service.scale(3)
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
self.assertIn("ERROR: for 2 Boom", mock_stdout.getvalue())
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_scale_with_api_returns_unexpected_exception(self, mock_stdout):
|
||||
"""
|
||||
Test that when scaling if the API returns an error, that is not of type
|
||||
APIError, that error is re-raised.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
service.create_container(number=next_number, quiet=True)
|
||||
|
||||
with patch(
|
||||
'compose.container.Container.create',
|
||||
side_effect=ValueError("BOOM")):
|
||||
with self.assertRaises(ValueError):
|
||||
service.scale(3)
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertTrue(service.containers()[0].is_running)
|
||||
|
||||
@patch('compose.service.log')
|
||||
def test_scale_with_desired_number_already_achieved(self, mock_log):
|
||||
"""
|
||||
Test that calling scale with a desired number that is equal to the
|
||||
number of containers already running results in no change.
|
||||
"""
|
||||
service = self.create_service('web')
|
||||
next_number = service._next_container_number()
|
||||
container = service.create_container(number=next_number, quiet=True)
|
||||
container.start()
|
||||
|
||||
self.assertTrue(container.is_running)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
service.scale(1)
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
container.inspect()
|
||||
self.assertTrue(container.is_running)
|
||||
|
||||
captured_output = mock_log.info.call_args[0]
|
||||
self.assertIn('Desired container number already achieved', captured_output)
|
||||
|
||||
@patch('compose.service.log')
|
||||
def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
|
||||
"""
|
||||
Test that calling scale on a service that has a custom container name
|
||||
results in warning output.
|
||||
"""
|
||||
service = self.create_service('web', container_name='custom-container')
|
||||
|
||||
self.assertEqual(service.custom_container_name(), 'custom-container')
|
||||
|
||||
service.scale(3)
|
||||
|
||||
captured_output = mock_log.warn.call_args[0][0]
|
||||
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertIn(
|
||||
"Remove the custom name to scale the service.",
|
||||
captured_output
|
||||
)
|
||||
|
||||
def test_scale_sets_ports(self):
|
||||
service = self.create_service('web', ports=['8000'])
|
||||
@@ -428,20 +742,30 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
|
||||
|
||||
def test_network_mode_none(self):
|
||||
service = self.create_service('web', net='none')
|
||||
service = self.create_service('web', net=Net('none'))
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
|
||||
|
||||
def test_network_mode_bridged(self):
|
||||
service = self.create_service('web', net='bridge')
|
||||
service = self.create_service('web', net=Net('bridge'))
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
|
||||
|
||||
def test_network_mode_host(self):
|
||||
service = self.create_service('web', net='host')
|
||||
service = self.create_service('web', net=Net('host'))
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
|
||||
|
||||
def test_pid_mode_none_defined(self):
|
||||
service = self.create_service('web', pid=None)
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.get('HostConfig.PidMode'), '')
|
||||
|
||||
def test_pid_mode_host(self):
|
||||
service = self.create_service('web', pid='host')
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.get('HostConfig.PidMode'), 'host')
|
||||
|
||||
def test_dns_no_value(self):
|
||||
service = self.create_service('web')
|
||||
container = create_and_start_container(service)
|
||||
@@ -501,21 +825,116 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_split_env(self):
|
||||
service = self.create_service('web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
|
||||
env = create_and_start_container(service).environment
|
||||
for k,v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
|
||||
for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
|
||||
self.assertEqual(env[k], v)
|
||||
|
||||
def test_env_from_file_combined_with_env(self):
|
||||
service = self.create_service('web', environment=['ONE=1', 'TWO=2', 'THREE=3'], env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
|
||||
env = create_and_start_container(service).environment
|
||||
for k,v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items():
|
||||
for k, v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items():
|
||||
self.assertEqual(env[k], v)
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
@patch.dict(os.environ)
|
||||
def test_resolve_env(self):
|
||||
os.environ['FILE_DEF'] = 'E1'
|
||||
os.environ['FILE_DEF_EMPTY'] = 'E2'
|
||||
os.environ['ENV_DEF'] = 'E3'
|
||||
service = self.create_service('web', environment={'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None})
|
||||
env = create_and_start_container(service).environment
|
||||
for k,v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items():
|
||||
for k, v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items():
|
||||
self.assertEqual(env[k], v)
|
||||
|
||||
def test_labels(self):
|
||||
labels_dict = {
|
||||
'com.example.description': "Accounting webapp",
|
||||
'com.example.department': "Finance",
|
||||
'com.example.label-with-empty-value': "",
|
||||
}
|
||||
|
||||
compose_labels = {
|
||||
LABEL_CONTAINER_NUMBER: '1',
|
||||
LABEL_ONE_OFF: 'False',
|
||||
LABEL_PROJECT: 'composetest',
|
||||
LABEL_SERVICE: 'web',
|
||||
LABEL_VERSION: __version__,
|
||||
}
|
||||
expected = dict(labels_dict, **compose_labels)
|
||||
|
||||
service = self.create_service('web', labels=labels_dict)
|
||||
labels = create_and_start_container(service).labels.items()
|
||||
for pair in expected.items():
|
||||
self.assertIn(pair, labels)
|
||||
|
||||
service.kill()
|
||||
service.remove_stopped()
|
||||
|
||||
labels_list = ["%s=%s" % pair for pair in labels_dict.items()]
|
||||
|
||||
service = self.create_service('web', labels=labels_list)
|
||||
labels = create_and_start_container(service).labels.items()
|
||||
for pair in expected.items():
|
||||
self.assertIn(pair, labels)
|
||||
|
||||
def test_empty_labels(self):
|
||||
labels_list = ['foo', 'bar']
|
||||
|
||||
service = self.create_service('web', labels=labels_list)
|
||||
labels = create_and_start_container(service).labels.items()
|
||||
for name in labels_list:
|
||||
self.assertIn((name, ''), labels)
|
||||
|
||||
def test_custom_container_name(self):
|
||||
service = self.create_service('web', container_name='my-web-container')
|
||||
self.assertEqual(service.custom_container_name(), 'my-web-container')
|
||||
|
||||
container = create_and_start_container(service)
|
||||
self.assertEqual(container.name, 'my-web-container')
|
||||
|
||||
one_off_container = service.create_container(one_off=True)
|
||||
self.assertNotEqual(one_off_container.name, 'my-web-container')
|
||||
|
||||
def test_log_drive_invalid(self):
|
||||
service = self.create_service('web', log_driver='xxx')
|
||||
self.assertRaises(ValueError, lambda: create_and_start_container(service))
|
||||
|
||||
def test_log_drive_empty_default_jsonfile(self):
|
||||
service = self.create_service('web')
|
||||
log_config = create_and_start_container(service).log_config
|
||||
|
||||
self.assertEqual('json-file', log_config['Type'])
|
||||
self.assertFalse(log_config['Config'])
|
||||
|
||||
def test_log_drive_none(self):
|
||||
service = self.create_service('web', log_driver='none')
|
||||
log_config = create_and_start_container(service).log_config
|
||||
|
||||
self.assertEqual('none', log_config['Type'])
|
||||
self.assertFalse(log_config['Config'])
|
||||
|
||||
def test_devices(self):
|
||||
service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
|
||||
device_config = create_and_start_container(service).get('HostConfig.Devices')
|
||||
|
||||
device_dict = {
|
||||
'PathOnHost': '/dev/random',
|
||||
'CgroupPermissions': 'rwm',
|
||||
'PathInContainer': '/dev/mapped-random'
|
||||
}
|
||||
|
||||
self.assertEqual(1, len(device_config))
|
||||
self.assertDictEqual(device_dict, device_config[0])
|
||||
|
||||
def test_duplicate_containers(self):
|
||||
service = self.create_service('web')
|
||||
|
||||
options = service._get_container_create_options({}, 1)
|
||||
original = Container.create(service.client, **options)
|
||||
|
||||
self.assertEqual(set(service.containers(stopped=True)), set([original]))
|
||||
self.assertEqual(set(service.duplicate_containers()), set())
|
||||
|
||||
options['name'] = 'temporary_container_name'
|
||||
duplicate = Container.create(service.client, **options)
|
||||
|
||||
self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
|
||||
self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
|
||||
|
||||
314
tests/integration/state_test.py
Normal file
314
tests/integration/state_test.py
Normal file
@@ -0,0 +1,314 @@
|
||||
"""
|
||||
Integration tests which cover state convergence (aka smart recreate) performed
|
||||
by `docker-compose up`.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
import tempfile
|
||||
import shutil
|
||||
import os
|
||||
|
||||
from compose import config
|
||||
from compose.project import Project
|
||||
from compose.const import LABEL_CONFIG_HASH
|
||||
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ProjectTestCase(DockerClientTestCase):
|
||||
def run_up(self, cfg, **kwargs):
|
||||
kwargs.setdefault('timeout', 1)
|
||||
|
||||
project = self.make_project(cfg)
|
||||
project.up(**kwargs)
|
||||
return set(project.containers(stopped=True))
|
||||
|
||||
def make_project(self, cfg):
|
||||
return Project.from_dicts(
|
||||
name='composetest',
|
||||
client=self.client,
|
||||
service_dicts=config.load(config.ConfigDetails(cfg, 'working_dir', None))
|
||||
)
|
||||
|
||||
|
||||
class BasicProjectTest(ProjectTestCase):
|
||||
def setUp(self):
|
||||
super(BasicProjectTest, self).setUp()
|
||||
|
||||
self.cfg = {
|
||||
'db': {'image': 'busybox:latest'},
|
||||
'web': {'image': 'busybox:latest'},
|
||||
}
|
||||
|
||||
def test_no_change(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(old_containers), 2)
|
||||
|
||||
new_containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(new_containers), 2)
|
||||
|
||||
self.assertEqual(old_containers, new_containers)
|
||||
|
||||
def test_partial_change(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
|
||||
old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
|
||||
|
||||
self.cfg['web']['command'] = '/bin/true'
|
||||
|
||||
new_containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(new_containers), 2)
|
||||
|
||||
preserved = list(old_containers & new_containers)
|
||||
self.assertEqual(preserved, [old_db])
|
||||
|
||||
removed = list(old_containers - new_containers)
|
||||
self.assertEqual(removed, [old_web])
|
||||
|
||||
created = list(new_containers - old_containers)
|
||||
self.assertEqual(len(created), 1)
|
||||
self.assertEqual(created[0].name_without_project, 'web_1')
|
||||
self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
|
||||
|
||||
def test_all_change(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(old_containers), 2)
|
||||
|
||||
self.cfg['web']['command'] = '/bin/true'
|
||||
self.cfg['db']['command'] = '/bin/true'
|
||||
|
||||
new_containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(new_containers), 2)
|
||||
|
||||
unchanged = old_containers & new_containers
|
||||
self.assertEqual(len(unchanged), 0)
|
||||
|
||||
new = new_containers - old_containers
|
||||
self.assertEqual(len(new), 2)
|
||||
|
||||
|
||||
class ProjectWithDependenciesTest(ProjectTestCase):
|
||||
def setUp(self):
|
||||
super(ProjectWithDependenciesTest, self).setUp()
|
||||
|
||||
self.cfg = {
|
||||
'db': {
|
||||
'image': 'busybox:latest',
|
||||
'command': 'tail -f /dev/null',
|
||||
},
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'command': 'tail -f /dev/null',
|
||||
'links': ['db'],
|
||||
},
|
||||
'nginx': {
|
||||
'image': 'busybox:latest',
|
||||
'command': 'tail -f /dev/null',
|
||||
'links': ['web'],
|
||||
},
|
||||
}
|
||||
|
||||
def test_up(self):
|
||||
containers = self.run_up(self.cfg)
|
||||
self.assertEqual(
|
||||
set(c.name_without_project for c in containers),
|
||||
set(['db_1', 'web_1', 'nginx_1']),
|
||||
)
|
||||
|
||||
def test_change_leaf(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
|
||||
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
self.assertEqual(
|
||||
set(c.name_without_project for c in new_containers - old_containers),
|
||||
set(['nginx_1']),
|
||||
)
|
||||
|
||||
def test_change_middle(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
|
||||
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
self.assertEqual(
|
||||
set(c.name_without_project for c in new_containers - old_containers),
|
||||
set(['web_1', 'nginx_1']),
|
||||
)
|
||||
|
||||
def test_change_root(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg)
|
||||
|
||||
self.assertEqual(
|
||||
set(c.name_without_project for c in new_containers - old_containers),
|
||||
set(['db_1', 'web_1', 'nginx_1']),
|
||||
)
|
||||
|
||||
def test_change_root_no_recreate(self):
|
||||
old_containers = self.run_up(self.cfg)
|
||||
|
||||
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
|
||||
new_containers = self.run_up(self.cfg, allow_recreate=False)
|
||||
|
||||
self.assertEqual(new_containers - old_containers, set())
|
||||
|
||||
def test_service_removed_while_down(self):
|
||||
next_cfg = {
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
'command': 'tail -f /dev/null',
|
||||
},
|
||||
'nginx': self.cfg['nginx'],
|
||||
}
|
||||
|
||||
containers = self.run_up(self.cfg)
|
||||
self.assertEqual(len(containers), 3)
|
||||
|
||||
project = self.make_project(self.cfg)
|
||||
project.stop(timeout=1)
|
||||
|
||||
containers = self.run_up(next_cfg)
|
||||
self.assertEqual(len(containers), 2)
|
||||
|
||||
|
||||
def converge(service,
|
||||
allow_recreate=True,
|
||||
force_recreate=False,
|
||||
do_build=True):
|
||||
"""
|
||||
If a container for this service doesn't exist, create and start one. If there are
|
||||
any, stop them, create+start new ones, and remove the old containers.
|
||||
"""
|
||||
plan = service.convergence_plan(
|
||||
allow_recreate=allow_recreate,
|
||||
force_recreate=force_recreate,
|
||||
)
|
||||
|
||||
return service.execute_convergence_plan(
|
||||
plan,
|
||||
do_build=do_build,
|
||||
timeout=1,
|
||||
)
|
||||
|
||||
|
||||
class ServiceStateTest(DockerClientTestCase):
|
||||
"""Test cases for Service.convergence_plan."""
|
||||
|
||||
def test_trigger_create(self):
|
||||
web = self.create_service('web')
|
||||
self.assertEqual(('create', []), web.convergence_plan())
|
||||
|
||||
def test_trigger_noop(self):
|
||||
web = self.create_service('web')
|
||||
container = web.create_container()
|
||||
web.start()
|
||||
|
||||
web = self.create_service('web')
|
||||
self.assertEqual(('noop', [container]), web.convergence_plan())
|
||||
|
||||
def test_trigger_start(self):
|
||||
options = dict(command=["top"])
|
||||
|
||||
web = self.create_service('web', **options)
|
||||
web.scale(2)
|
||||
|
||||
containers = web.containers(stopped=True)
|
||||
containers[0].stop()
|
||||
containers[0].inspect()
|
||||
|
||||
self.assertEqual([c.is_running for c in containers], [False, True])
|
||||
|
||||
web = self.create_service('web', **options)
|
||||
self.assertEqual(
|
||||
('start', containers[0:1]),
|
||||
web.convergence_plan(),
|
||||
)
|
||||
|
||||
def test_trigger_recreate_with_config_change(self):
|
||||
web = self.create_service('web', command=["top"])
|
||||
container = web.create_container()
|
||||
|
||||
web = self.create_service('web', command=["top", "-d", "1"])
|
||||
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||
|
||||
def test_trigger_recreate_with_nonexistent_image_tag(self):
|
||||
web = self.create_service('web', image="busybox:latest")
|
||||
container = web.create_container()
|
||||
|
||||
web = self.create_service('web', image="nonexistent-image")
|
||||
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||
|
||||
def test_trigger_recreate_with_image_change(self):
|
||||
repo = 'composetest_myimage'
|
||||
tag = 'latest'
|
||||
image = '{}:{}'.format(repo, tag)
|
||||
|
||||
image_id = self.client.images(name='busybox')[0]['Id']
|
||||
self.client.tag(image_id, repository=repo, tag=tag)
|
||||
|
||||
try:
|
||||
web = self.create_service('web', image=image)
|
||||
container = web.create_container()
|
||||
|
||||
# update the image
|
||||
c = self.client.create_container(image, ['touch', '/hello.txt'])
|
||||
self.client.commit(c, repository=repo, tag=tag)
|
||||
self.client.remove_container(c)
|
||||
|
||||
web = self.create_service('web', image=image)
|
||||
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||
|
||||
finally:
|
||||
self.client.remove_image(image)
|
||||
|
||||
def test_trigger_recreate_with_build(self):
|
||||
context = tempfile.mkdtemp()
|
||||
base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
|
||||
|
||||
try:
|
||||
dockerfile = os.path.join(context, 'Dockerfile')
|
||||
|
||||
with open(dockerfile, 'w') as f:
|
||||
f.write(base_image)
|
||||
|
||||
web = self.create_service('web', build=context)
|
||||
container = web.create_container()
|
||||
|
||||
with open(dockerfile, 'w') as f:
|
||||
f.write(base_image + 'CMD echo hello world\n')
|
||||
web.build()
|
||||
|
||||
web = self.create_service('web', build=context)
|
||||
self.assertEqual(('recreate', [container]), web.convergence_plan())
|
||||
finally:
|
||||
shutil.rmtree(context)
|
||||
|
||||
|
||||
class ConfigHashTest(DockerClientTestCase):
|
||||
def test_no_config_hash_when_one_off(self):
|
||||
web = self.create_service('web')
|
||||
container = web.create_container(one_off=True)
|
||||
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
|
||||
|
||||
def test_no_config_hash_when_overriding_options(self):
|
||||
web = self.create_service('web')
|
||||
container = web.create_container(environment={'FOO': '1'})
|
||||
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
|
||||
|
||||
def test_config_hash_with_custom_labels(self):
|
||||
web = self.create_service('web', labels={'foo': '1'})
|
||||
container = converge(web)[0]
|
||||
self.assertIn(LABEL_CONFIG_HASH, container.labels)
|
||||
self.assertIn('foo', container.labels)
|
||||
|
||||
def test_config_hash_sticks_around(self):
|
||||
web = self.create_service('web', command=["top"])
|
||||
container = converge(web)[0]
|
||||
self.assertIn(LABEL_CONFIG_HASH, container.labels)
|
||||
|
||||
web = self.create_service('web', command=["top", "-d", "1"])
|
||||
container = converge(web)[0]
|
||||
self.assertIn(LABEL_CONFIG_HASH, container.labels)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user