mirror of
https://github.com/docker/compose.git
synced 2026-02-10 18:49:28 +08:00
Compare commits
179 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e117a7822d | ||
|
|
5489465905 | ||
|
|
4afcdbdb3c | ||
|
|
94d82d4acb | ||
|
|
d528f9f642 | ||
|
|
99d7a474af | ||
|
|
d1052ff666 | ||
|
|
44a91e6ba8 | ||
|
|
3996947024 | ||
|
|
b7afaba56a | ||
|
|
2ce3685e32 | ||
|
|
699bbe9ca2 | ||
|
|
4b890bffde | ||
|
|
789e1ba82b | ||
|
|
1a9614c35e | ||
|
|
d83bdd5164 | ||
|
|
e1a3fc2536 | ||
|
|
251aa7efb6 | ||
|
|
2924b9997a | ||
|
|
2a9aef1332 | ||
|
|
361294d20b | ||
|
|
9a825c5c35 | ||
|
|
944e15fa65 | ||
|
|
d04b1724ec | ||
|
|
e5916b2fae | ||
|
|
4f7cbc3812 | ||
|
|
3c48884dbb | ||
|
|
7ec63afae9 | ||
|
|
8c6b516aa0 | ||
|
|
50c588176c | ||
|
|
3770aac1af | ||
|
|
256dccc554 | ||
|
|
d0f65906ed | ||
|
|
95aa61cfe5 | ||
|
|
247691ca44 | ||
|
|
0fc9cc65d1 | ||
|
|
eb69225444 | ||
|
|
cafe68a92d | ||
|
|
723cccdae8 | ||
|
|
6b8044e92c | ||
|
|
1e7e8202af | ||
|
|
c0fdf7bd39 | ||
|
|
034b66fedb | ||
|
|
eed274c632 | ||
|
|
5b10c4811f | ||
|
|
2bd6e3d0a5 | ||
|
|
d0b5bcf26a | ||
|
|
262248d8a6 | ||
|
|
9eb3697b40 | ||
|
|
c246897af1 | ||
|
|
cfcabce593 | ||
|
|
e517061010 | ||
|
|
feb8ad7b4c | ||
|
|
1b5bf6e12a | ||
|
|
e953a32a82 | ||
|
|
f1390b3cb6 | ||
|
|
6e485df084 | ||
|
|
3a342fb25d | ||
|
|
e71e82f8ac | ||
|
|
da80eca28c | ||
|
|
1d1e23611b | ||
|
|
74e067c6e6 | ||
|
|
85b9619799 | ||
|
|
ab1fbc96c3 | ||
|
|
a04143e2a7 | ||
|
|
6c4299039a | ||
|
|
655d347ea2 | ||
|
|
94a3164248 | ||
|
|
18728a64b9 | ||
|
|
d8b0fa294e | ||
|
|
a6c8319b5d | ||
|
|
5d92f12f8e | ||
|
|
c0231bdb70 | ||
|
|
ac541e208f | ||
|
|
3d8ce448b8 | ||
|
|
949df97726 | ||
|
|
14cbe40543 | ||
|
|
9dd53ecdaa | ||
|
|
6bfe5e049d | ||
|
|
b672861ffd | ||
|
|
b081077f2b | ||
|
|
13a296049b | ||
|
|
22c531dea7 | ||
|
|
dfc74e2a77 | ||
|
|
0c12db06ec | ||
|
|
edf6b56016 | ||
|
|
8b4ed0c1a8 | ||
|
|
1b5335f409 | ||
|
|
3a2c9c1016 | ||
|
|
cf3eed2cda | ||
|
|
2ecd366905 | ||
|
|
d34dc45b78 | ||
|
|
8394e84099 | ||
|
|
adda3a7f79 | ||
|
|
52d0f4d9e7 | ||
|
|
c1a38d787d | ||
|
|
7879dfd3fd | ||
|
|
cd1c8b2f09 | ||
|
|
7a9228ad75 | ||
|
|
98ceb62202 | ||
|
|
b99bb64487 | ||
|
|
580affa5f3 | ||
|
|
d600b3498b | ||
|
|
f0eaf84cb9 | ||
|
|
257a171c0c | ||
|
|
3c5e818b49 | ||
|
|
c3c8395cef | ||
|
|
38c008e527 | ||
|
|
a3d8f7d113 | ||
|
|
65a642097c | ||
|
|
dff9aa6f0c | ||
|
|
ab145b5365 | ||
|
|
5878fe3834 | ||
|
|
715e29d7ba | ||
|
|
983337401c | ||
|
|
8251dec587 | ||
|
|
52f994cf04 | ||
|
|
3d4b5cfbfe | ||
|
|
33b057bfaf | ||
|
|
629fe771df | ||
|
|
5e6f175b5f | ||
|
|
6dc1a404d5 | ||
|
|
d1d4f47764 | ||
|
|
3abce4259f | ||
|
|
c78b952c3d | ||
|
|
fff5e51426 | ||
|
|
a724aa5717 | ||
|
|
10725136d8 | ||
|
|
8f1793dd06 | ||
|
|
fd85be2c9e | ||
|
|
24959209cc | ||
|
|
29c9763feb | ||
|
|
ca7151aeb1 | ||
|
|
6e932794f7 | ||
|
|
9e1dfcfb37 | ||
|
|
5166b2c1a8 | ||
|
|
80991f1521 | ||
|
|
b752a86589 | ||
|
|
c2acceba4e | ||
|
|
f8ee52ca2a | ||
|
|
2b245bdf9e | ||
|
|
d7e01a23f8 | ||
|
|
4e20be9c66 | ||
|
|
94e15a9985 | ||
|
|
5061875fa9 | ||
|
|
d9782b2dd1 | ||
|
|
530afba5cb | ||
|
|
4a90a7691b | ||
|
|
050f81e37c | ||
|
|
aecaf665f1 | ||
|
|
23a8938809 | ||
|
|
641c773476 | ||
|
|
97be5b4cfb | ||
|
|
76b7d0095d | ||
|
|
352ad7a38c | ||
|
|
401ea4e7a8 | ||
|
|
710cd38591 | ||
|
|
859d4bb98b | ||
|
|
a3374ac51d | ||
|
|
168b1909ae | ||
|
|
a96ab41739 | ||
|
|
0f5a56b3c2 | ||
|
|
c1d9e5156f | ||
|
|
b0ec54b6f7 | ||
|
|
2360bf0eb9 | ||
|
|
1a1a61a672 | ||
|
|
13c7380113 | ||
|
|
3a48172332 | ||
|
|
14e778b3de | ||
|
|
9164125177 | ||
|
|
2595f89519 | ||
|
|
a058c40dfb | ||
|
|
fc4b3d2771 | ||
|
|
59cc9c9b68 | ||
|
|
d1a52d2b1a | ||
|
|
5f5fbb3ea4 | ||
|
|
2d98071e55 | ||
|
|
6a3d1d06b5 | ||
|
|
6813cb86a2 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,5 +3,5 @@
|
||||
/build
|
||||
/dist
|
||||
/docs/_site
|
||||
/docs/.git-gh-pages
|
||||
/venv
|
||||
fig.spec
|
||||
|
||||
25
.travis.yml
25
.travis.yml
@@ -3,17 +3,22 @@ python:
|
||||
- '2.6'
|
||||
- '2.7'
|
||||
env:
|
||||
- DOCKER_VERSION=0.8.0
|
||||
- DOCKER_VERSION=0.8.1
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: '3.2'
|
||||
- python: '3.3'
|
||||
install: script/travis-install
|
||||
global:
|
||||
- secure: exbot0LTV/0Wic6ElKCrOZmh2ZrieuGwEqfYKf5rVuwu1sLngYRihh+lBL/hTwc79NSu829pbwiWfsQZrXbk/yvaS7avGR0CLDoipyPxlYa2/rfs/o4OdTZqXv0LcFmmd54j5QBMpWU1S+CYOwNkwas57trrvIpPbzWjMtfYzOU=
|
||||
install:
|
||||
- pip install .
|
||||
- pip install -r requirements.txt
|
||||
- pip install -r requirements-dev.txt
|
||||
- sudo curl -L -o /usr/local/bin/orchard https://github.com/orchardup/go-orchard/releases/download/2.0.5/linux
|
||||
- sudo chmod +x /usr/local/bin/orchard
|
||||
before_script:
|
||||
- 'if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then orchard hosts rm -f $TRAVIS_JOB_ID || true; fi'
|
||||
- 'if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then orchard hosts create $TRAVIS_JOB_ID; fi'
|
||||
script:
|
||||
- pwd
|
||||
- env
|
||||
- sekexe/run "`pwd`/script/travis $TRAVIS_PYTHON_VERSION"
|
||||
- nosetests tests/unit
|
||||
- 'if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then script/travis-integration; fi'
|
||||
after_script:
|
||||
- 'if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then orchard hosts rm -f $TRAVIS_JOB_ID; fi'
|
||||
deploy:
|
||||
provider: pypi
|
||||
user: orchard
|
||||
|
||||
60
CHANGES.md
60
CHANGES.md
@@ -1,6 +1,66 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
0.5.0 (2014-07-11)
|
||||
------------------
|
||||
|
||||
- Fig now starts links when you run `fig run` or `fig up`.
|
||||
|
||||
For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service.
|
||||
|
||||
- Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved:
|
||||
```
|
||||
environment:
|
||||
RACK_ENV: development
|
||||
SESSION_SECRET:
|
||||
```
|
||||
|
||||
- `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted:
|
||||
|
||||
```
|
||||
volumes_from:
|
||||
- service_name
|
||||
- container_name
|
||||
```
|
||||
|
||||
- The `net` and `workdir` options are now supported in `fig.yml`.
|
||||
- The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option.
|
||||
- TTY behaviour is far more robust, and resizes are supported correctly.
|
||||
- Load YAML files safely.
|
||||
|
||||
Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca and @mozz100 for their help with this release!
|
||||
|
||||
|
||||
0.4.2 (2014-06-18)
|
||||
------------------
|
||||
|
||||
- Fix various encoding errors when using `fig run`, `fig up` and `fig build`.
|
||||
|
||||
0.4.1 (2014-05-08)
|
||||
------------------
|
||||
|
||||
- Add support for Docker 0.11.0. (Thanks @marksteve!)
|
||||
- Make project name configurable. (Thanks @jefmathiot!)
|
||||
- Return correct exit code from `fig run`.
|
||||
|
||||
0.4.0 (2014-04-29)
|
||||
------------------
|
||||
|
||||
- Support Docker 0.9 and 0.10
|
||||
- Display progress bars correctly when pulling images (no more ski slopes)
|
||||
- `fig up` now stops all services when any container exits
|
||||
- Added support for the `privileged` config option in fig.yml (thanks @kvz!)
|
||||
- Shortened and aligned log prefixes in `fig up` output
|
||||
- Only containers started with `fig run` link back to their own service
|
||||
- Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!)
|
||||
- Error message improvements
|
||||
|
||||
0.3.2 (2014-03-05)
|
||||
------------------
|
||||
|
||||
- Added an `--rm` option to `fig run`. (Thanks @marksteve!)
|
||||
- Added an `expose` option to `fig.yml`.
|
||||
|
||||
0.3.1 (2014-03-04)
|
||||
------------------
|
||||
|
||||
|
||||
75
CONTRIBUTING.md
Normal file
75
CONTRIBUTING.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Contributing to Fig
|
||||
|
||||
## Development environment
|
||||
|
||||
If you're looking contribute to [Fig](http://orchardup.github.io/fig/)
|
||||
but you're new to the project or maybe even to Python, here are the steps
|
||||
that should get you started.
|
||||
|
||||
1. Fork [https://github.com/orchardup/fig](https://github.com/orchardup/fig) to your username. kvz in this example.
|
||||
1. Clone your forked repository locally `git clone git@github.com:kvz/fig.git`.
|
||||
1. Enter the local directory `cd fig`.
|
||||
1. Set up a development environment `python setup.py develop`. That will install the dependencies and set up a symlink from your `fig` executable to the checkout of the repo. So from any of your fig projects, `fig` now refers to your development project. Time to start hacking : )
|
||||
1. Works for you? Run the test suite via `./script/test` to verify it won't break other usecases.
|
||||
1. All good? Commit and push to GitHub, and submit a pull request.
|
||||
|
||||
## Running the test suite
|
||||
|
||||
$ script/test
|
||||
|
||||
## Building binaries
|
||||
|
||||
Linux:
|
||||
|
||||
$ script/build-linux
|
||||
|
||||
OS X:
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
Note that this only works on Mountain Lion, not Mavericks, due to a [bug in PyInstaller](http://www.pyinstaller.org/ticket/807).
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
then you just add a line saying
|
||||
|
||||
Signed-off-by: Random J Developer <random@developer.example.org>
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
The easiest way to do this is to use the `--signoff` flag when committing. E.g.:
|
||||
|
||||
|
||||
$ git commit --signoff
|
||||
|
||||
@@ -5,6 +5,7 @@ RUN pip install -r requirements.txt
|
||||
ADD requirements-dev.txt /code/
|
||||
RUN pip install -r requirements-dev.txt
|
||||
ADD . /code/
|
||||
RUN python setup.py develop
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
RUN chown -R user /code/
|
||||
USER user
|
||||
|
||||
215
LICENSE
215
LICENSE
@@ -1,24 +1,191 @@
|
||||
Copyright (c) 2013, Orchard Laboratories Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* The names of its contributors may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Orchard Laboratories Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
30
README.md
30
README.md
@@ -1,7 +1,7 @@
|
||||
Fig
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/orchardup/fig)
|
||||
[](https://travis-ci.org/orchardup/fig)
|
||||
[](http://badge.fury.io/py/fig)
|
||||
|
||||
Fast, isolated development environments using Docker.
|
||||
@@ -32,7 +32,7 @@ db:
|
||||
|
||||
Then type `fig up`, and Fig will start and run your entire app:
|
||||
|
||||

|
||||

|
||||
|
||||
There are commands to:
|
||||
|
||||
@@ -47,29 +47,3 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
Full documentation is available on [Fig's website](http://orchardup.github.io/fig/).
|
||||
|
||||
Running the test suite
|
||||
----------------------
|
||||
|
||||
$ script/test
|
||||
|
||||
Building OS X binaries
|
||||
---------------------
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
Note that this only works on Mountain Lion, not Mavericks, due to a [bug in PyInstaller](http://www.pyinstaller.org/ticket/807).
|
||||
|
||||
Contributing to Fig
|
||||
-------------------
|
||||
|
||||
If you're looking contribute to [Fig](http://orchardup.github.io/fig/)
|
||||
but you're new to the project or maybe even to Python, here are the steps
|
||||
that should get you started.
|
||||
|
||||
1. Fork [https://github.com/orchardup/fig](https://github.com/orchardup/fig) to your username. kvz in this example.
|
||||
1. Clone your forked repository locally `git clone git@github.com:kvz/fig.git`.
|
||||
1. Enter the local directory `cd fig`.
|
||||
1. Set up a development environment `python setup.py develop`. That will install the dependencies and set up a symlink from your `fig` executable to the checkout of the repo. So from any of your fig projects, `fig` now refers to your development project. Time to start hacking : )
|
||||
1. Works for you? Run the test suite via `./scripts/test` to verify it won't break other usecases.
|
||||
1. All good? Commit and push to GitHub, and submit a pull request.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM stackbrew/ubuntu:13.10
|
||||
FROM ubuntu:13.10
|
||||
RUN apt-get -qq update && apt-get install -y ruby1.8 bundler python
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ADD Gemfile /code/
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
markdown: redcarpet
|
||||
encoding: utf-8
|
||||
|
||||
|
||||
@@ -44,10 +44,12 @@
|
||||
</ul>
|
||||
<ul class="nav">
|
||||
<li><a href="https://github.com/orchardup/fig">Fig on GitHub</a></li>
|
||||
<li><a href="https://twitter.com/orchardup">Follow us on Twitter</a></li>
|
||||
<li><a href="http://webchat.freenode.net/?channels=%23orchardup&uio=d4">#orchardup on Freenode</a></li>
|
||||
</ul>
|
||||
|
||||
<p>Fig is a project from <a href="https://www.orchardup.com">Orchard</a>, a Docker hosting service.</p>
|
||||
<p><a href="https://twitter.com/orchardup">Follow us on Twitter</a> to keep up to date with Fig and other Docker news.</p>
|
||||
|
||||
<div class="badges">
|
||||
<iframe src="http://ghbtns.com/github-btn.html?user=orchardup&repo=fig&type=watch&count=true" allowtransparency="true" frameborder="0" scrolling="0" width="100" height="20"></iframe>
|
||||
<a href="https://twitter.com/share" class="twitter-share-button" data-url="http://orchardup.github.io/fig/">Tweet</a>
|
||||
|
||||
10
docs/cli.md
10
docs/cli.md
@@ -45,7 +45,7 @@ For example:
|
||||
|
||||
$ fig run web python manage.py shell
|
||||
|
||||
Note that this will not start any services that the command's service links to. So if, for example, your one-off command talks to your database, you will need to run `fig up -d db` first.
|
||||
By default, linked services will be started, unless they are already running.
|
||||
|
||||
One-off commands are started in new containers with the same config as a normal container for that service, so volumes, links, etc will all be created as expected. The only thing different to a normal container is the command will be overridden with the one specified and no ports will be created in case they collide.
|
||||
|
||||
@@ -53,6 +53,10 @@ Links are also created between one-off commands and the other containers for tha
|
||||
|
||||
$ fig run db /bin/sh -c "psql -h \$DB_1_PORT_5432_TCP_ADDR -U docker"
|
||||
|
||||
If you do not want linked containers to be started when running the one-off command, specify the `--no-deps` flag:
|
||||
|
||||
$ fig run --no-deps web python manage.py shell
|
||||
|
||||
## scale
|
||||
|
||||
Set number of containers to run for a service.
|
||||
@@ -74,8 +78,10 @@ Stop running containers without removing them. They can be started again with `f
|
||||
|
||||
Build, (re)create, start and attach to containers for a service.
|
||||
|
||||
Linked services will be started, unless they are already running.
|
||||
|
||||
By default, `fig up` will aggregate the output of each container, and when it exits, all containers will be stopped. If you run `fig up -d`, it'll start the containers in the background and leave them running.
|
||||
|
||||
If there are existing containers for a service, `fig up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `fig.yml` are picked up.
|
||||
By default if there are existing containers for a service, `fig up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `fig.yml` are picked up. If you do no want containers to be stopped and recreated, use `fig up --no-recreate`. This will still start any stopped containers, if needed.
|
||||
|
||||
[volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/
|
||||
|
||||
@@ -58,7 +58,7 @@ img {
|
||||
|
||||
.logo {
|
||||
font-family: 'Lilita One', sans-serif;
|
||||
font-size: 80px;
|
||||
font-size: 64px;
|
||||
margin: 20px 0 40px 0;
|
||||
}
|
||||
|
||||
@@ -68,8 +68,8 @@ img {
|
||||
}
|
||||
|
||||
.logo img {
|
||||
width: 80px;
|
||||
vertical-align: -17px;
|
||||
width: 60px;
|
||||
vertical-align: -8px;
|
||||
}
|
||||
|
||||
.mobile-logo {
|
||||
@@ -77,13 +77,18 @@ img {
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
font-size: 16px;
|
||||
font-size: 15px;
|
||||
color: #777;
|
||||
}
|
||||
|
||||
.sidebar a {
|
||||
color: #a41211;
|
||||
}
|
||||
|
||||
.sidebar p {
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
@media (max-width: 767px) {
|
||||
.sidebar {
|
||||
text-align: center;
|
||||
@@ -101,7 +106,8 @@ img {
|
||||
}
|
||||
|
||||
.logo {
|
||||
margin-top: 40px;
|
||||
margin-top: 30px;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.content h1 {
|
||||
@@ -116,6 +122,7 @@ img {
|
||||
width: 280px;
|
||||
overflow-y: auto;
|
||||
padding-left: 40px;
|
||||
padding-right: 10px;
|
||||
border-right: 1px solid #ccc;
|
||||
}
|
||||
|
||||
@@ -126,12 +133,12 @@ img {
|
||||
}
|
||||
|
||||
.nav {
|
||||
margin: 20px 0;
|
||||
margin: 15px 0;
|
||||
}
|
||||
|
||||
.nav li a {
|
||||
display: block;
|
||||
padding: 8px 0;
|
||||
padding: 5px 0;
|
||||
line-height: 1.2;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ Let's use Fig to set up and run a Django/PostgreSQL app. Before starting, you'll
|
||||
Let's set up the three files that'll get us started. First, our app is going to be running inside a Docker container which contains all of its dependencies. We can define what goes inside that Docker container using a file called `Dockerfile`. It'll contain this to start with:
|
||||
|
||||
FROM orchardup/python:2.7
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
RUN apt-get update -qq && apt-get install -y python-psycopg2
|
||||
RUN mkdir /code
|
||||
WORKDIR /code
|
||||
@@ -18,7 +19,7 @@ Let's set up the three files that'll get us started. First, our app is going to
|
||||
RUN pip install -r requirements.txt
|
||||
ADD . /code/
|
||||
|
||||
That'll install our application inside an image with Python installed alongside all of our Python dependencies. For more information on how to write Dockerfiles, see the [Dockerfile tutorial](https://www.docker.io/learn/dockerfile/) and the [Dockerfile reference](http://docs.docker.io/en/latest/reference/builder/).
|
||||
That'll install our application inside an image with Python installed alongside all of our Python dependencies. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
Second, we define our Python dependencies in a file called `requirements.txt`:
|
||||
|
||||
@@ -28,8 +29,6 @@ Simple enough. Finally, this is all tied together with a file called `fig.yml`.
|
||||
|
||||
db:
|
||||
image: orchardup/postgresql
|
||||
ports:
|
||||
- "5432"
|
||||
web:
|
||||
build: .
|
||||
command: python manage.py runserver 0.0.0.0:8000
|
||||
@@ -40,7 +39,7 @@ Simple enough. Finally, this is all tied together with a file called `fig.yml`.
|
||||
links:
|
||||
- db
|
||||
|
||||
See the [`fig.yml` reference]() for more information on how it works.
|
||||
See the [`fig.yml` reference](http://orchardup.github.io/fig/yml.html) for more information on how it works.
|
||||
|
||||
We can now start a Django project using `fig run`:
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ db:
|
||||
|
||||
Then type `fig up`, and Fig will start and run your entire app:
|
||||
|
||||

|
||||

|
||||
|
||||
There are commands to:
|
||||
|
||||
@@ -39,27 +39,13 @@ There are commands to:
|
||||
- tail running services' log output
|
||||
- run a one-off command on a service
|
||||
|
||||
Fig is a project from [Orchard](https://orchardup.com), a Docker hosting service. [Follow us on Twitter](https://twitter.com/orchardup) to keep up to date with Fig and other Docker news.
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
|
||||
Let's get a basic Python web app running on Fig. It assumes a little knowledge of Python, but the concepts should be clear if you're not familiar with it.
|
||||
|
||||
First, install Docker. If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
|
||||
$ curl https://raw.github.com/noplay/docker-osx/0.7.6/docker-osx > /usr/local/bin/docker-osx
|
||||
$ chmod +x /usr/local/bin/docker-osx
|
||||
$ docker-osx shell
|
||||
|
||||
Docker has guides for [Ubuntu](http://docs.docker.io/en/latest/installation/ubuntulinux/) and [other platforms](http://docs.docker.io/en/latest/installation/) in their documentation.
|
||||
|
||||
Next, install Fig:
|
||||
|
||||
$ sudo pip install -U fig
|
||||
|
||||
(This command also upgrades Fig when we release a new version. If you don’t have pip installed, try `brew install python` or `apt-get install python-pip`.)
|
||||
First, [install Docker and Fig](install.html).
|
||||
|
||||
You'll want to make a directory for the project:
|
||||
|
||||
@@ -99,7 +85,7 @@ Next, we want to create a Docker image containing all of our app's dependencies.
|
||||
WORKDIR /code
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
This tells Docker to install Python, our code and our Python dependencies inside a Docker image. For more information on how to write Dockerfiles, see the [Dockerfile tutorial](https://www.docker.io/learn/dockerfile/) and the [Dockerfile reference](http://docs.docker.io/en/latest/reference/builder/).
|
||||
This tells Docker to install Python, our code and our Python dependencies inside a Docker image. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
We then define a set of services using `fig.yml`:
|
||||
|
||||
@@ -127,8 +113,8 @@ Now if we run `fig up`, it'll pull a Redis image, build an image for our own cod
|
||||
Building web...
|
||||
Starting figtest_redis_1...
|
||||
Starting figtest_web_1...
|
||||
figtest_redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
|
||||
figtest_web_1 | * Running on http://0.0.0.0:5000/
|
||||
redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
|
||||
web_1 | * Running on http://0.0.0.0:5000/
|
||||
|
||||
Open up [http://localhost:5000](http://localhost:5000) in your browser (or [http://localdocker:5000](http://localdocker:5000) if you're using [docker-osx](https://github.com/noplay/docker-osx)) and you should see it running!
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ title: Installing Fig
|
||||
Installing Fig
|
||||
==============
|
||||
|
||||
First, install Docker (version 0.8 or higher). If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
First, install Docker version 1.0.0. If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
|
||||
$ curl https://raw.github.com/noplay/docker-osx/0.8.0/docker-osx > /usr/local/bin/docker-osx
|
||||
$ curl https://raw.githubusercontent.com/noplay/docker-osx/1.0.0/docker-osx > /usr/local/bin/docker-osx
|
||||
$ chmod +x /usr/local/bin/docker-osx
|
||||
$ docker-osx shell
|
||||
|
||||
@@ -16,12 +16,12 @@ Docker has guides for [Ubuntu](http://docs.docker.io/en/latest/installation/ubun
|
||||
|
||||
Next, install Fig. On OS X:
|
||||
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.0/darwin > /usr/local/bin/fig
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.4.2/darwin > /usr/local/bin/fig
|
||||
$ chmod +x /usr/local/bin/fig
|
||||
|
||||
On 64-bit Linux:
|
||||
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.0/linux > /usr/local/bin/fig
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.4.2/linux > /usr/local/bin/fig
|
||||
$ chmod +x /usr/local/bin/fig
|
||||
|
||||
Fig is also available as a Python package if you're on another platform (or if you prefer that sort of thing):
|
||||
|
||||
@@ -18,7 +18,7 @@ Let's set up the three files that'll get us started. First, our app is going to
|
||||
RUN bundle install
|
||||
ADD . /myapp
|
||||
|
||||
That'll put our application code inside an image with Ruby, Bundler and all our dependencies. For more information on how to write Dockerfiles, see the [Dockerfile tutorial](https://www.docker.io/learn/dockerfile/) and the [Dockerfile reference](http://docs.docker.io/en/latest/reference/builder/).
|
||||
That'll put our application code inside an image with Ruby, Bundler and all our dependencies. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
Next, we have a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ FROM orchardup/php5
|
||||
ADD . /code
|
||||
```
|
||||
|
||||
This instructs Docker on how to build an image that contains PHP and Wordpress. For more information on how to write Dockerfiles, see the [Dockerfile tutorial](https://www.docker.io/learn/dockerfile/) and the [Dockerfile reference](http://docs.docker.io/en/latest/reference/builder/).
|
||||
This instructs Docker on how to build an image that contains PHP and Wordpress. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/).
|
||||
|
||||
Next up, `fig.yml` starts our web service and a separate MySQL instance:
|
||||
|
||||
@@ -33,8 +33,6 @@ web:
|
||||
- .:/code
|
||||
db:
|
||||
image: orchardup/mysql
|
||||
ports:
|
||||
- "3306:3306"
|
||||
environment:
|
||||
MYSQL_DATABASE: wordpress
|
||||
```
|
||||
|
||||
16
docs/yml.md
16
docs/yml.md
@@ -44,12 +44,28 @@ ports:
|
||||
- "8000:8000"
|
||||
- "49100:22"
|
||||
|
||||
-- Expose ports without publishing them to the host machine - they'll only be
|
||||
-- accessible to linked services. Only the internal port can be specified.
|
||||
expose:
|
||||
- "3000"
|
||||
- "8000"
|
||||
|
||||
-- Map volumes from the host machine (HOST:CONTAINER).
|
||||
volumes:
|
||||
- cache/:/tmp/cache
|
||||
|
||||
-- Mount all of the volumes from another service or container
|
||||
volumes_from:
|
||||
- service_name
|
||||
- container_name
|
||||
|
||||
-- Add environment variables.
|
||||
-- Environment variables with only a key are resolved to values on the host
|
||||
-- machine, which can be helpful for secret or host-specific values.
|
||||
environment:
|
||||
RACK_ENV: development
|
||||
SESSION_SECRET:
|
||||
```
|
||||
|
||||
-- Networking mode. Use the same values as the docker client --net parameter
|
||||
net: "host"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from .service import Service
|
||||
|
||||
__version__ = '0.3.1'
|
||||
__version__ = '0.5.0'
|
||||
|
||||
@@ -24,6 +24,7 @@ class Command(DocoptCommand):
|
||||
|
||||
def __init__(self):
|
||||
self.yaml_path = os.environ.get('FIG_FILE', None)
|
||||
self.explicit_project_name = None
|
||||
|
||||
def dispatch(self, *args, **kwargs):
|
||||
try:
|
||||
@@ -44,6 +45,8 @@ class Command(DocoptCommand):
|
||||
def perform_command(self, options, *args, **kwargs):
|
||||
if options['--file'] is not None:
|
||||
self.yaml_path = os.path.join(self.base_dir, options['--file'])
|
||||
if options['--project-name'] is not None:
|
||||
self.explicit_project_name = options['--project-name']
|
||||
return super(Command, self).perform_command(options, *args, **kwargs)
|
||||
|
||||
@cached_property
|
||||
@@ -56,7 +59,7 @@ class Command(DocoptCommand):
|
||||
yaml_path = self.yaml_path
|
||||
if yaml_path is None:
|
||||
yaml_path = self.check_yaml_filename()
|
||||
config = yaml.load(open(yaml_path))
|
||||
config = yaml.safe_load(open(yaml_path))
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.FigFileNotFound(os.path.basename(e.filename))
|
||||
@@ -70,6 +73,8 @@ class Command(DocoptCommand):
|
||||
@cached_property
|
||||
def project_name(self):
|
||||
project = os.path.basename(os.getcwd())
|
||||
if self.explicit_project_name is not None:
|
||||
project = self.explicit_project_name
|
||||
project = re.sub(r'[^a-zA-Z0-9]', '', project)
|
||||
if not project:
|
||||
project = 'default'
|
||||
|
||||
@@ -4,21 +4,36 @@ import sys
|
||||
|
||||
from itertools import cycle
|
||||
|
||||
from .multiplexer import Multiplexer
|
||||
from .multiplexer import Multiplexer, STOP
|
||||
from . import colors
|
||||
from .utils import split_buffer
|
||||
|
||||
|
||||
class LogPrinter(object):
|
||||
def __init__(self, containers, attach_params=None):
|
||||
def __init__(self, containers, attach_params=None, output=sys.stdout):
|
||||
self.containers = containers
|
||||
self.attach_params = attach_params or {}
|
||||
self.prefix_width = self._calculate_prefix_width(containers)
|
||||
self.generators = self._make_log_generators()
|
||||
self.output = output
|
||||
|
||||
def run(self):
|
||||
mux = Multiplexer(self.generators)
|
||||
for line in mux.loop():
|
||||
sys.stdout.write(line)
|
||||
self.output.write(line)
|
||||
|
||||
def _calculate_prefix_width(self, containers):
|
||||
"""
|
||||
Calculate the maximum width of container names so we can make the log
|
||||
prefixes line up like so:
|
||||
|
||||
db_1 | Listening
|
||||
web_1 | Listening
|
||||
"""
|
||||
prefix_width = 0
|
||||
for container in containers:
|
||||
prefix_width = max(prefix_width, len(container.name_without_project))
|
||||
return prefix_width
|
||||
|
||||
def _make_log_generators(self):
|
||||
color_fns = cycle(colors.rainbow())
|
||||
@@ -31,10 +46,24 @@ class LogPrinter(object):
|
||||
return generators
|
||||
|
||||
def _make_log_generator(self, container, color_fn):
|
||||
prefix = color_fn(container.name + " | ")
|
||||
prefix = color_fn(self._generate_prefix(container)).encode('utf-8')
|
||||
# Attach to container before log printer starts running
|
||||
line_generator = split_buffer(self._attach(container), '\n')
|
||||
return (prefix + line.decode('utf-8') for line in line_generator)
|
||||
|
||||
for line in line_generator:
|
||||
yield prefix + line
|
||||
|
||||
exit_code = container.wait()
|
||||
yield color_fn("%s exited with code %s\n" % (container.name, exit_code))
|
||||
yield STOP
|
||||
|
||||
def _generate_prefix(self, container):
|
||||
"""
|
||||
Generate the prefix for a log line without colour
|
||||
"""
|
||||
name = container.name_without_project
|
||||
padding = ' ' * (self.prefix_width - len(name))
|
||||
return ''.join([name, padding, ' | '])
|
||||
|
||||
def _attach(self, container):
|
||||
params = {
|
||||
|
||||
101
fig/cli/main.py
101
fig/cli/main.py
@@ -6,19 +6,19 @@ import re
|
||||
import signal
|
||||
|
||||
from inspect import getdoc
|
||||
import dockerpty
|
||||
|
||||
from .. import __version__
|
||||
from ..project import NoSuchService, ConfigurationError
|
||||
from ..service import CannotBeScaledError
|
||||
from ..service import BuildError, CannotBeScaledError
|
||||
from .command import Command
|
||||
from .formatter import Formatter
|
||||
from .log_printer import LogPrinter
|
||||
from .utils import yesno
|
||||
|
||||
from ..packages.docker.client import APIError
|
||||
from ..packages.docker.errors import APIError
|
||||
from .errors import UserError
|
||||
from .docopt_command import NoSuchCommand
|
||||
from .socketclient import SocketClient
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -51,6 +51,9 @@ def main():
|
||||
except APIError as e:
|
||||
log.error(e.explanation)
|
||||
sys.exit(1)
|
||||
except BuildError as e:
|
||||
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# stolen from docopt master
|
||||
@@ -68,9 +71,10 @@ class TopLevelCommand(Command):
|
||||
fig -h|--help
|
||||
|
||||
Options:
|
||||
--verbose Show more output
|
||||
--version Print version and exit
|
||||
-f, --file FILE Specify an alternate fig file (default: fig.yml)
|
||||
--verbose Show more output
|
||||
--version Print version and exit
|
||||
-f, --file FILE Specify an alternate fig file (default: fig.yml)
|
||||
-p, --project-name NAME Specify an alternate project name (default: directory name)
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
@@ -198,20 +202,33 @@ class TopLevelCommand(Command):
|
||||
|
||||
$ fig run web python manage.py shell
|
||||
|
||||
Note that this will not start any services that the command's service
|
||||
links to. So if, for example, your one-off command talks to your
|
||||
database, you will need to run `fig up -d db` first.
|
||||
By default, linked services will be started, unless they are already
|
||||
running. If you do not want to start linked services, use
|
||||
`fig run --no-deps SERVICE COMMAND [ARGS...]`.
|
||||
|
||||
Usage: run [options] SERVICE COMMAND [ARGS...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run container in the background, print new
|
||||
container name
|
||||
-T Disable pseudo-tty allocation. By default `fig run`
|
||||
allocates a TTY.
|
||||
-d Detached mode: Run container in the background, print
|
||||
new container name.
|
||||
-T Disable pseudo-tty allocation. By default `fig run`
|
||||
allocates a TTY.
|
||||
--rm Remove container after run. Ignored in detached mode.
|
||||
--no-deps Don't start linked services.
|
||||
"""
|
||||
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
|
||||
if not options['--no-deps']:
|
||||
deps = service.get_linked_names()
|
||||
|
||||
if len(deps) > 0:
|
||||
self.project.up(
|
||||
service_names=deps,
|
||||
start_links=True,
|
||||
recreate=False,
|
||||
)
|
||||
|
||||
tty = True
|
||||
if options['-d'] or options['-T'] or not sys.stdin.isatty():
|
||||
tty = False
|
||||
@@ -223,12 +240,16 @@ class TopLevelCommand(Command):
|
||||
}
|
||||
container = service.create_container(one_off=True, **container_options)
|
||||
if options['-d']:
|
||||
service.start_container(container, ports=None)
|
||||
service.start_container(container, ports=None, one_off=True)
|
||||
print(container.name)
|
||||
else:
|
||||
with self._attach_to_container(container.id, raw=tty) as c:
|
||||
service.start_container(container, ports=None)
|
||||
c.run()
|
||||
service.start_container(container, ports=None, one_off=True)
|
||||
dockerpty.start(self.client, container.id)
|
||||
exit_code = container.wait()
|
||||
if options['--rm']:
|
||||
log.info("Removing %s..." % container.name)
|
||||
self.client.remove_container(container.id)
|
||||
sys.exit(exit_code)
|
||||
|
||||
def scale(self, options):
|
||||
"""
|
||||
@@ -283,52 +304,46 @@ class TopLevelCommand(Command):
|
||||
|
||||
If there are existing containers for a service, `fig up` will stop
|
||||
and recreate them (preserving mounted volumes with volumes-from),
|
||||
so that changes in `fig.yml` are picked up.
|
||||
so that changes in `fig.yml` are picked up. If you do not want existing
|
||||
containers to be recreated, `fig up --no-recreate` will re-use existing
|
||||
containers.
|
||||
|
||||
Usage: up [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run containers in the background, print new
|
||||
container names
|
||||
-d Detached mode: Run containers in the background,
|
||||
print new container names.
|
||||
--no-deps Don't start linked services.
|
||||
--no-recreate If containers already exist, don't recreate them.
|
||||
"""
|
||||
detached = options['-d']
|
||||
|
||||
(old, new) = self.project.recreate_containers(service_names=options['SERVICE'])
|
||||
start_links = not options['--no-deps']
|
||||
recreate = not options['--no-recreate']
|
||||
service_names = options['SERVICE']
|
||||
|
||||
self.project.up(
|
||||
service_names=service_names,
|
||||
start_links=start_links,
|
||||
recreate=recreate
|
||||
)
|
||||
|
||||
to_attach = [c for s in self.project.get_services(service_names) for c in s.containers()]
|
||||
|
||||
if not detached:
|
||||
to_attach = [c for (s, c) in new]
|
||||
print("Attaching to", list_containers(to_attach))
|
||||
log_printer = LogPrinter(to_attach, attach_params={"logs": True})
|
||||
|
||||
for (service, container) in new:
|
||||
service.start_container(container)
|
||||
|
||||
for (service, container) in old:
|
||||
container.remove()
|
||||
|
||||
if not detached:
|
||||
try:
|
||||
log_printer.run()
|
||||
finally:
|
||||
def handler(signal, frame):
|
||||
self.project.kill(service_names=options['SERVICE'])
|
||||
self.project.kill(service_names=service_names)
|
||||
sys.exit(0)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
|
||||
print("Gracefully stopping... (press Ctrl+C again to force)")
|
||||
self.project.stop(service_names=options['SERVICE'])
|
||||
|
||||
def _attach_to_container(self, container_id, raw=False):
|
||||
socket_in = self.client.attach_socket(container_id, params={'stdin': 1, 'stream': 1})
|
||||
socket_out = self.client.attach_socket(container_id, params={'stdout': 1, 'logs': 1, 'stream': 1})
|
||||
socket_err = self.client.attach_socket(container_id, params={'stderr': 1, 'logs': 1, 'stream': 1})
|
||||
|
||||
return SocketClient(
|
||||
socket_in=socket_in,
|
||||
socket_out=socket_out,
|
||||
socket_err=socket_err,
|
||||
raw=raw,
|
||||
)
|
||||
self.project.stop(service_names=service_names)
|
||||
|
||||
def list_containers(containers):
|
||||
return ", ".join(c.name for c in containers)
|
||||
|
||||
@@ -7,6 +7,11 @@ except ImportError:
|
||||
from queue import Queue, Empty # Python 3.x
|
||||
|
||||
|
||||
# Yield STOP from an input generator to stop the
|
||||
# top-level loop without processing any more input.
|
||||
STOP = object()
|
||||
|
||||
|
||||
class Multiplexer(object):
|
||||
def __init__(self, generators):
|
||||
self.generators = generators
|
||||
@@ -17,7 +22,11 @@ class Multiplexer(object):
|
||||
|
||||
while True:
|
||||
try:
|
||||
yield self.queue.get(timeout=0.1)
|
||||
item = self.queue.get(timeout=0.1)
|
||||
if item is STOP:
|
||||
break
|
||||
else:
|
||||
yield item
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
from __future__ import print_function
|
||||
# Adapted from https://github.com/benthor/remotty/blob/master/socketclient.py
|
||||
|
||||
import sys
|
||||
import tty
|
||||
import fcntl
|
||||
import os
|
||||
import termios
|
||||
import threading
|
||||
import errno
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SocketClient:
|
||||
def __init__(self,
|
||||
socket_in=None,
|
||||
socket_out=None,
|
||||
socket_err=None,
|
||||
raw=True,
|
||||
):
|
||||
self.socket_in = socket_in
|
||||
self.socket_out = socket_out
|
||||
self.socket_err = socket_err
|
||||
self.raw = raw
|
||||
|
||||
self.stdin_fileno = sys.stdin.fileno()
|
||||
|
||||
def __enter__(self):
|
||||
self.create()
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, trace):
|
||||
self.destroy()
|
||||
|
||||
def create(self):
|
||||
if os.isatty(sys.stdin.fileno()):
|
||||
self.settings = termios.tcgetattr(sys.stdin.fileno())
|
||||
else:
|
||||
self.settings = None
|
||||
|
||||
if self.socket_in is not None:
|
||||
self.set_blocking(sys.stdin, False)
|
||||
self.set_blocking(sys.stdout, True)
|
||||
self.set_blocking(sys.stderr, True)
|
||||
|
||||
if self.raw:
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
|
||||
def set_blocking(self, file, blocking):
|
||||
fd = file.fileno()
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = (flags & ~os.O_NONBLOCK) if blocking else (flags | os.O_NONBLOCK)
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
def run(self):
|
||||
if self.socket_in is not None:
|
||||
self.start_background_thread(target=self.send, args=(self.socket_in, sys.stdin))
|
||||
|
||||
recv_threads = []
|
||||
|
||||
if self.socket_out is not None:
|
||||
recv_threads.append(self.start_background_thread(target=self.recv, args=(self.socket_out, sys.stdout)))
|
||||
|
||||
if self.socket_err is not None:
|
||||
recv_threads.append(self.start_background_thread(target=self.recv, args=(self.socket_err, sys.stderr)))
|
||||
|
||||
for t in recv_threads:
|
||||
t.join()
|
||||
|
||||
def start_background_thread(self, **kwargs):
|
||||
thread = threading.Thread(**kwargs)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
def recv(self, socket, stream):
|
||||
try:
|
||||
while True:
|
||||
chunk = socket.recv(4096)
|
||||
|
||||
if chunk:
|
||||
stream.write(chunk)
|
||||
stream.flush()
|
||||
else:
|
||||
break
|
||||
except Exception as e:
|
||||
log.debug(e)
|
||||
|
||||
def send(self, socket, stream):
|
||||
while True:
|
||||
chunk = stream.read(1)
|
||||
|
||||
if chunk == '':
|
||||
socket.close()
|
||||
break
|
||||
else:
|
||||
try:
|
||||
socket.send(chunk)
|
||||
except Exception as e:
|
||||
if hasattr(e, 'errno') and e.errno == errno.EPIPE:
|
||||
break
|
||||
else:
|
||||
raise e
|
||||
|
||||
def destroy(self):
|
||||
if self.settings is not None:
|
||||
termios.tcsetattr(self.stdin_fileno, termios.TCSADRAIN, self.settings)
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
if __name__ == '__main__':
|
||||
import websocket
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
sys.stderr.write("Usage: python socketclient.py WEBSOCKET_URL\n")
|
||||
sys.exit(1)
|
||||
|
||||
url = sys.argv[1]
|
||||
socket = websocket.create_connection(url)
|
||||
|
||||
print("connected\r")
|
||||
|
||||
with SocketClient(socket, interactive=True) as client:
|
||||
client.run()
|
||||
@@ -3,10 +3,8 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
import datetime
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import platform
|
||||
from .errors import UserError
|
||||
|
||||
|
||||
def cached_property(f):
|
||||
|
||||
@@ -17,7 +17,7 @@ class Container(object):
|
||||
Construct a container object from the output of GET /containers/json.
|
||||
"""
|
||||
new_dictionary = {
|
||||
'ID': dictionary['Id'],
|
||||
'Id': dictionary['Id'],
|
||||
'Image': dictionary['Image'],
|
||||
}
|
||||
for name in dictionary.get('Names', []):
|
||||
@@ -36,7 +36,7 @@ class Container(object):
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.dictionary['ID']
|
||||
return self.dictionary['Id']
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
@@ -70,13 +70,15 @@ class Container(object):
|
||||
for private, public in list(self.dictionary['NetworkSettings']['Ports'].items()):
|
||||
if public:
|
||||
ports.append('%s->%s' % (public[0]['HostPort'], private))
|
||||
else:
|
||||
ports.append(private)
|
||||
return ', '.join(ports)
|
||||
|
||||
@property
|
||||
def human_readable_state(self):
|
||||
self.inspect_if_not_inspected()
|
||||
if self.dictionary['State']['Running']:
|
||||
if self.dictionary['State']['Ghost']:
|
||||
if self.dictionary['State'].get('Ghost'):
|
||||
return 'Ghost'
|
||||
else:
|
||||
return 'Up'
|
||||
|
||||
@@ -12,4 +12,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .client import Client, APIError # flake8: noqa
|
||||
from .version import version
|
||||
|
||||
__version__ = version
|
||||
__title__ = 'docker-py'
|
||||
|
||||
from .client import Client # flake8: noqa
|
||||
|
||||
@@ -20,6 +20,7 @@ import os
|
||||
from fig.packages import six
|
||||
|
||||
from ..utils import utils
|
||||
from .. import errors
|
||||
|
||||
INDEX_URL = 'https://index.docker.io/v1/'
|
||||
DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
@@ -45,18 +46,19 @@ def expand_registry_url(hostname):
|
||||
|
||||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise ValueError('Repository name cannot contain a '
|
||||
'scheme ({0})'.format(repo_name))
|
||||
raise errors.InvalidRepository(
|
||||
'Repository name cannot contain a scheme ({0})'.format(repo_name))
|
||||
parts = repo_name.split('/', 1)
|
||||
if not '.' in parts[0] and not ':' in parts[0] and parts[0] != 'localhost':
|
||||
if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
|
||||
# This is a docker index repo (ex: foo/bar or ubuntu)
|
||||
return INDEX_URL, repo_name
|
||||
if len(parts) < 2:
|
||||
raise ValueError('Invalid repository name ({0})'.format(repo_name))
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid repository name ({0})'.format(repo_name))
|
||||
|
||||
if 'index.docker.io' in parts[0]:
|
||||
raise ValueError('Invalid repository name,'
|
||||
'try "{0}" instead'.format(parts[1]))
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid repository name, try "{0}" instead'.format(parts[1]))
|
||||
|
||||
return expand_registry_url(parts[0]), parts[1]
|
||||
|
||||
@@ -87,6 +89,11 @@ def resolve_authconfig(authconfig, registry=None):
|
||||
return authconfig.get(swap_protocol(registry), None)
|
||||
|
||||
|
||||
def encode_auth(auth_info):
|
||||
return base64.b64encode(auth_info.get('username', '') + b':' +
|
||||
auth_info.get('password', ''))
|
||||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, six.string_types):
|
||||
auth = auth.encode('ascii')
|
||||
@@ -100,6 +107,12 @@ def encode_header(auth):
|
||||
return base64.b64encode(auth_json)
|
||||
|
||||
|
||||
def encode_full_header(auth):
|
||||
""" Returns the given auth block encoded for the X-Registry-Config header.
|
||||
"""
|
||||
return encode_header({'configs': auth})
|
||||
|
||||
|
||||
def load_config(root=None):
|
||||
"""Loads authentication data from a Docker configuration file in the given
|
||||
root directory."""
|
||||
@@ -136,7 +149,8 @@ def load_config(root=None):
|
||||
data.append(line.strip().split(' = ')[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise Exception('Invalid or empty configuration file!')
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid or empty configuration file!')
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
conf[INDEX_URL] = {
|
||||
|
||||
@@ -16,6 +16,7 @@ import json
|
||||
import re
|
||||
import shlex
|
||||
import struct
|
||||
import warnings
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
@@ -24,48 +25,18 @@ from fig.packages import six
|
||||
from .auth import auth
|
||||
from .unixconn import unixconn
|
||||
from .utils import utils
|
||||
from . import errors
|
||||
|
||||
if not six.PY3:
|
||||
import websocket
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.12'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
super(APIError, self).__init__(message, response=response)
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '%s Client Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '%s Server Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '%s ("%s")' % (message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class Client(requests.Session):
|
||||
def __init__(self, base_url=None, version="1.6",
|
||||
def __init__(self, base_url=None, version=DEFAULT_DOCKER_API_VERSION,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS):
|
||||
super(Client, self).__init__()
|
||||
if base_url is None:
|
||||
@@ -108,7 +79,7 @@ class Client(requests.Session):
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise APIError(e, response, explanation=explanation)
|
||||
raise errors.APIError(e, response, explanation=explanation)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
@@ -125,7 +96,8 @@ class Client(requests.Session):
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False, entrypoint=None,
|
||||
cpu_shares=None, working_dir=None):
|
||||
cpu_shares=None, working_dir=None, domainname=None,
|
||||
memswap_limit=0):
|
||||
if isinstance(command, six.string_types):
|
||||
command = shlex.split(str(command))
|
||||
if isinstance(environment, dict):
|
||||
@@ -133,7 +105,7 @@ class Client(requests.Session):
|
||||
'{0}={1}'.format(k, v) for k, v in environment.items()
|
||||
]
|
||||
|
||||
if ports and isinstance(ports, list):
|
||||
if isinstance(ports, list):
|
||||
exposed_ports = {}
|
||||
for port_definition in ports:
|
||||
port = port_definition
|
||||
@@ -145,12 +117,19 @@ class Client(requests.Session):
|
||||
exposed_ports['{0}/{1}'.format(port, proto)] = {}
|
||||
ports = exposed_ports
|
||||
|
||||
if volumes and isinstance(volumes, list):
|
||||
if isinstance(volumes, list):
|
||||
volumes_dict = {}
|
||||
for vol in volumes:
|
||||
volumes_dict[vol] = {}
|
||||
volumes = volumes_dict
|
||||
|
||||
if volumes_from:
|
||||
if not isinstance(volumes_from, six.string_types):
|
||||
volumes_from = ','.join(volumes_from)
|
||||
else:
|
||||
# Force None, an empty list or dict causes client.start to fail
|
||||
volumes_from = None
|
||||
|
||||
attach_stdin = False
|
||||
attach_stdout = False
|
||||
attach_stderr = False
|
||||
@@ -164,27 +143,37 @@ class Client(requests.Session):
|
||||
attach_stdin = True
|
||||
stdin_once = True
|
||||
|
||||
if utils.compare_version('1.10', self._version) >= 0:
|
||||
message = ('{0!r} parameter has no effect on create_container().'
|
||||
' It has been moved to start()')
|
||||
if dns is not None:
|
||||
raise errors.DockerException(message.format('dns'))
|
||||
if volumes_from is not None:
|
||||
raise errors.DockerException(message.format('volumes_from'))
|
||||
|
||||
return {
|
||||
'Hostname': hostname,
|
||||
'Hostname': hostname,
|
||||
'Domainname': domainname,
|
||||
'ExposedPorts': ports,
|
||||
'User': user,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'StdinOnce': stdin_once,
|
||||
'Memory': mem_limit,
|
||||
'AttachStdin': attach_stdin,
|
||||
'User': user,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'StdinOnce': stdin_once,
|
||||
'Memory': mem_limit,
|
||||
'AttachStdin': attach_stdin,
|
||||
'AttachStdout': attach_stdout,
|
||||
'AttachStderr': attach_stderr,
|
||||
'Env': environment,
|
||||
'Cmd': command,
|
||||
'Dns': dns,
|
||||
'Image': image,
|
||||
'Volumes': volumes,
|
||||
'VolumesFrom': volumes_from,
|
||||
'Env': environment,
|
||||
'Cmd': command,
|
||||
'Dns': dns,
|
||||
'Image': image,
|
||||
'Volumes': volumes,
|
||||
'VolumesFrom': volumes_from,
|
||||
'NetworkDisabled': network_disabled,
|
||||
'Entrypoint': entrypoint,
|
||||
'CpuShares': cpu_shares,
|
||||
'WorkingDir': working_dir
|
||||
'Entrypoint': entrypoint,
|
||||
'CpuShares': cpu_shares,
|
||||
'WorkingDir': working_dir,
|
||||
'MemorySwap': memswap_limit
|
||||
}
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
@@ -222,25 +211,26 @@ class Client(requests.Session):
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
|
||||
def _stream_result(self, response):
|
||||
"""Generator for straight-out, non chunked-encoded HTTP responses."""
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1, decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line + '\n'
|
||||
|
||||
def _stream_result_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
return response.raw._fp.fp._sock
|
||||
if six.PY3:
|
||||
return response.raw._fp.fp.raw._sock
|
||||
else:
|
||||
return response.raw._fp.fp._sock
|
||||
|
||||
def _stream_helper(self, response):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
socket_fp = self._stream_result_socket(response)
|
||||
socket_fp = self._get_raw_response_socket(response)
|
||||
socket_fp.setblocking(1)
|
||||
socket = socket_fp.makefile()
|
||||
while True:
|
||||
size = int(socket.readline(), 16)
|
||||
# Because Docker introduced newlines at the end of chunks in v0.9,
|
||||
# and only on some API endpoints, we have to cater for both cases.
|
||||
size_line = socket.readline()
|
||||
if size_line == '\r\n':
|
||||
size_line = socket.readline()
|
||||
|
||||
size = int(size_line, 16)
|
||||
if size <= 0:
|
||||
break
|
||||
data = socket.readline()
|
||||
@@ -260,22 +250,25 @@ class Client(requests.Session):
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield str(buf[start:end])
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_socket_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
socket."""
|
||||
socket = self._stream_result_socket(response)
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
def recvall(socket, size):
|
||||
data = ''
|
||||
blocks = []
|
||||
while size > 0:
|
||||
block = socket.recv(size)
|
||||
if not block:
|
||||
return None
|
||||
|
||||
data += block
|
||||
blocks.append(block)
|
||||
size -= len(block)
|
||||
|
||||
sep = bytes() if six.PY3 else str()
|
||||
data = sep.join(blocks)
|
||||
return data
|
||||
|
||||
while True:
|
||||
@@ -304,13 +297,24 @@ class Client(requests.Session):
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
response = self._post(u, params=params, stream=stream)
|
||||
|
||||
# Stream multi-plexing was introduced in API v1.6.
|
||||
# Stream multi-plexing was only introduced in API v1.6. Anything before
|
||||
# that needs old-style streaming.
|
||||
if utils.compare_version('1.6', self._version) < 0:
|
||||
return stream and self._stream_result(response) or \
|
||||
def stream_result():
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1,
|
||||
decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line
|
||||
|
||||
return stream_result() if stream else \
|
||||
self._result(response, binary=True)
|
||||
|
||||
sep = bytes() if six.PY3 else str()
|
||||
|
||||
return stream and self._multiplexed_socket_stream_helper(response) or \
|
||||
''.join([x for x in self._multiplexed_buffer_helper(response)])
|
||||
sep.join([x for x in self._multiplexed_buffer_helper(response)])
|
||||
|
||||
def attach_socket(self, container, params=None, ws=False):
|
||||
if params is None:
|
||||
@@ -319,28 +323,39 @@ class Client(requests.Session):
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
if ws:
|
||||
return self._attach_websocket(container, params)
|
||||
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
return self._stream_result_socket(self.post(
|
||||
return self._get_raw_response_socket(self.post(
|
||||
u, None, params=self._attach_params(params), stream=True))
|
||||
|
||||
def build(self, path=None, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, stream=False, timeout=None):
|
||||
nocache=False, rm=False, stream=False, timeout=None,
|
||||
custom_context=False, encoding=None):
|
||||
remote = context = headers = None
|
||||
if path is None and fileobj is None:
|
||||
raise Exception("Either path or fileobj needs to be provided.")
|
||||
raise TypeError("Either path or fileobj needs to be provided.")
|
||||
|
||||
if fileobj is not None:
|
||||
if custom_context:
|
||||
if not fileobj:
|
||||
raise TypeError("You must specify fileobj with custom_context")
|
||||
context = fileobj
|
||||
elif fileobj is not None:
|
||||
context = utils.mkbuildcontext(fileobj)
|
||||
elif path.startswith(('http://', 'https://', 'git://', 'github.com/')):
|
||||
elif path.startswith(('http://', 'https://',
|
||||
'git://', 'github.com/')):
|
||||
remote = path
|
||||
else:
|
||||
context = utils.tar(path)
|
||||
|
||||
if utils.compare_version('1.8', self._version) >= 0:
|
||||
stream = True
|
||||
|
||||
u = self._url('/build')
|
||||
params = {
|
||||
't': tag,
|
||||
@@ -349,8 +364,24 @@ class Client(requests.Session):
|
||||
'nocache': nocache,
|
||||
'rm': rm
|
||||
}
|
||||
|
||||
if context is not None:
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
if encoding:
|
||||
headers['Content-Encoding'] = encoding
|
||||
|
||||
if utils.compare_version('1.9', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
# Send the full auth configuration (if any exists), since the build
|
||||
# could use any (or all) of the registries.
|
||||
if self._auth_configs:
|
||||
headers['X-Registry-Config'] = auth.encode_full_header(
|
||||
self._auth_configs
|
||||
)
|
||||
|
||||
response = self._post(
|
||||
u,
|
||||
@@ -363,8 +394,9 @@ class Client(requests.Session):
|
||||
|
||||
if context is not None:
|
||||
context.close()
|
||||
|
||||
if stream:
|
||||
return self._stream_result(response)
|
||||
return self._stream_helper(response)
|
||||
else:
|
||||
output = self._result(response)
|
||||
srch = r'Successfully built ([0-9a-f]+)'
|
||||
@@ -387,10 +419,11 @@ class Client(requests.Session):
|
||||
json=True)
|
||||
|
||||
def containers(self, quiet=False, all=False, trunc=True, latest=False,
|
||||
since=None, before=None, limit=-1):
|
||||
since=None, before=None, limit=-1, size=False):
|
||||
params = {
|
||||
'limit': 1 if latest else limit,
|
||||
'all': 1 if all else 0,
|
||||
'size': 1 if size else 0,
|
||||
'trunc_cmd': 1 if trunc else 0,
|
||||
'since': since,
|
||||
'before': before
|
||||
@@ -403,6 +436,8 @@ class Client(requests.Session):
|
||||
return res
|
||||
|
||||
def copy(self, container, resource):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
res = self._post_json(
|
||||
self._url("/containers/{0}/copy".format(container)),
|
||||
data={"Resource": resource},
|
||||
@@ -416,12 +451,13 @@ class Client(requests.Session):
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False, name=None, entrypoint=None,
|
||||
cpu_shares=None, working_dir=None):
|
||||
cpu_shares=None, working_dir=None, domainname=None,
|
||||
memswap_limit=0):
|
||||
|
||||
config = self._container_config(
|
||||
image, command, hostname, user, detach, stdin_open, tty, mem_limit,
|
||||
ports, environment, dns, volumes, volumes_from, network_disabled,
|
||||
entrypoint, cpu_shares, working_dir
|
||||
entrypoint, cpu_shares, working_dir, domainname, memswap_limit
|
||||
)
|
||||
return self.create_container_from_config(config, name)
|
||||
|
||||
@@ -440,21 +476,7 @@ class Client(requests.Session):
|
||||
format(container))), True)
|
||||
|
||||
def events(self):
|
||||
u = self._url("/events")
|
||||
|
||||
socket = self._stream_result_socket(self.get(u, stream=True))
|
||||
|
||||
while True:
|
||||
chunk = socket.recv(4096)
|
||||
if chunk:
|
||||
# Messages come in the format of length, data, newline.
|
||||
length, data = chunk.split("\n", 1)
|
||||
length = int(length, 16)
|
||||
if length > len(data):
|
||||
data += socket.recv(length - len(data))
|
||||
yield json.loads(data)
|
||||
else:
|
||||
break
|
||||
return self._stream_helper(self.get(self._url('/events'), stream=True))
|
||||
|
||||
def export(self, container):
|
||||
if isinstance(container, dict):
|
||||
@@ -464,6 +486,12 @@ class Client(requests.Session):
|
||||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
def get_image(self, image):
|
||||
res = self._get(self._url("/images/{0}/get".format(image)),
|
||||
stream=True)
|
||||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
def history(self, image):
|
||||
res = self._get(self._url("/images/{0}/history".format(image)))
|
||||
self._raise_for_status(res)
|
||||
@@ -471,6 +499,8 @@ class Client(requests.Session):
|
||||
|
||||
def images(self, name=None, quiet=False, all=False, viz=False):
|
||||
if viz:
|
||||
if utils.compare_version('1.7', self._version) >= 0:
|
||||
raise Exception('Viz output is not supported in API >= 1.7!')
|
||||
return self._result(self._get(self._url("images/viz")))
|
||||
params = {
|
||||
'filter': name,
|
||||
@@ -517,6 +547,10 @@ class Client(requests.Session):
|
||||
True)
|
||||
|
||||
def insert(self, image, url, path):
|
||||
if utils.compare_version('1.12', self._version) >= 0:
|
||||
raise errors.DeprecatedMethod(
|
||||
'insert is not available for API version >=1.12'
|
||||
)
|
||||
api_url = self._url("/images/" + image + "/insert")
|
||||
params = {
|
||||
'url': url,
|
||||
@@ -548,6 +582,10 @@ class Client(requests.Session):
|
||||
|
||||
self._raise_for_status(res)
|
||||
|
||||
def load_image(self, data):
|
||||
res = self._post(self._url("/images/load"), data=data)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False):
|
||||
# If we don't have any auth data so far, try reloading the config file
|
||||
@@ -576,7 +614,27 @@ class Client(requests.Session):
|
||||
self._auth_configs[registry] = req_data
|
||||
return self._result(response, json=True)
|
||||
|
||||
def logs(self, container, stdout=True, stderr=True, stream=False):
|
||||
def logs(self, container, stdout=True, stderr=True, stream=False,
|
||||
timestamps=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
if utils.compare_version('1.11', self._version) >= 0:
|
||||
params = {'stderr': stderr and 1 or 0,
|
||||
'stdout': stdout and 1 or 0,
|
||||
'timestamps': timestamps and 1 or 0,
|
||||
'follow': stream and 1 or 0}
|
||||
url = self._url("/containers/{0}/logs".format(container))
|
||||
res = self._get(url, params=params, stream=stream)
|
||||
if stream:
|
||||
return self._multiplexed_socket_stream_helper(res)
|
||||
elif six.PY3:
|
||||
return bytes().join(
|
||||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
)
|
||||
else:
|
||||
return str().join(
|
||||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
)
|
||||
return self.attach(
|
||||
container,
|
||||
stdout=stdout,
|
||||
@@ -585,6 +643,9 @@ class Client(requests.Session):
|
||||
logs=True
|
||||
)
|
||||
|
||||
def ping(self):
|
||||
return self._result(self._get(self._url('/_ping')))
|
||||
|
||||
def port(self, container, private_port):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
@@ -601,6 +662,8 @@ class Client(requests.Session):
|
||||
return h_ports
|
||||
|
||||
def pull(self, repository, tag=None, stream=False):
|
||||
if not tag:
|
||||
repository, tag = utils.parse_repository_tag(repository)
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
if repo_name.count(":") == 1:
|
||||
repository, tag = repository.rsplit(":", 1)
|
||||
@@ -618,7 +681,7 @@ class Client(requests.Session):
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# Do not fail here if no authentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
@@ -644,7 +707,7 @@ class Client(requests.Session):
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# Do not fail here if no authentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
@@ -652,21 +715,22 @@ class Client(requests.Session):
|
||||
|
||||
response = self._post_json(u, None, headers=headers, stream=stream)
|
||||
else:
|
||||
response = self._post_json(u, authcfg, stream=stream)
|
||||
response = self._post_json(u, None, stream=stream)
|
||||
|
||||
return stream and self._stream_helper(response) \
|
||||
or self._result(response)
|
||||
|
||||
def remove_container(self, container, v=False, link=False):
|
||||
def remove_container(self, container, v=False, link=False, force=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
params = {'v': v, 'link': link}
|
||||
params = {'v': v, 'link': link, 'force': force}
|
||||
res = self._delete(self._url("/containers/" + container),
|
||||
params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def remove_image(self, image):
|
||||
res = self._delete(self._url("/images/" + image))
|
||||
def remove_image(self, image, force=False, noprune=False):
|
||||
params = {'force': force, 'noprune': noprune}
|
||||
res = self._delete(self._url("/images/" + image), params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def restart(self, container, timeout=10):
|
||||
@@ -683,7 +747,8 @@ class Client(requests.Session):
|
||||
True)
|
||||
|
||||
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
|
||||
publish_all_ports=False, links=None, privileged=False):
|
||||
publish_all_ports=False, links=None, privileged=False,
|
||||
dns=None, dns_search=None, volumes_from=None, network_mode=None):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
@@ -697,10 +762,7 @@ class Client(requests.Session):
|
||||
'LxcConf': lxc_conf
|
||||
}
|
||||
if binds:
|
||||
bind_pairs = [
|
||||
'{0}:{1}'.format(host, dest) for host, dest in binds.items()
|
||||
]
|
||||
start_config['Binds'] = bind_pairs
|
||||
start_config['Binds'] = utils.convert_volume_binds(binds)
|
||||
|
||||
if port_bindings:
|
||||
start_config['PortBindings'] = utils.convert_port_bindings(
|
||||
@@ -721,10 +783,44 @@ class Client(requests.Session):
|
||||
|
||||
start_config['Privileged'] = privileged
|
||||
|
||||
if utils.compare_version('1.10', self._version) >= 0:
|
||||
if dns is not None:
|
||||
start_config['Dns'] = dns
|
||||
if volumes_from is not None:
|
||||
if isinstance(volumes_from, six.string_types):
|
||||
volumes_from = volumes_from.split(',')
|
||||
start_config['VolumesFrom'] = volumes_from
|
||||
else:
|
||||
warning_message = ('{0!r} parameter is discarded. It is only'
|
||||
' available for API version greater or equal'
|
||||
' than 1.10')
|
||||
|
||||
if dns is not None:
|
||||
warnings.warn(warning_message.format('dns'),
|
||||
DeprecationWarning)
|
||||
if volumes_from is not None:
|
||||
warnings.warn(warning_message.format('volumes_from'),
|
||||
DeprecationWarning)
|
||||
|
||||
if dns_search:
|
||||
start_config['DnsSearch'] = dns_search
|
||||
|
||||
if network_mode:
|
||||
start_config['NetworkMode'] = network_mode
|
||||
|
||||
url = self._url("/containers/{0}/start".format(container))
|
||||
res = self._post_json(url, data=start_config)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def resize(self, container, height, width):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
params = {'h': height, 'w': width}
|
||||
url = self._url("/containers/{0}/resize".format(container))
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def stop(self, container, timeout=10):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
65
fig/packages/docker/errors.py
Normal file
65
fig/packages/docker/errors.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# Copyright 2014 dotCloud inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 doesn't
|
||||
super(APIError, self).__init__(message)
|
||||
self.response = response
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '%s Client Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '%s Server Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '%s ("%s")' % (message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class DeprecatedMethod(DockerException):
|
||||
pass
|
||||
@@ -40,7 +40,7 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
|
||||
self.sock = sock
|
||||
|
||||
def _extract_path(self, url):
|
||||
#remove the base_url entirely..
|
||||
# remove the base_url entirely..
|
||||
return url.replace(self.base_url, "")
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from .utils import (
|
||||
compare_version, convert_port_bindings, mkbuildcontext, ping, tar
|
||||
compare_version, convert_port_bindings, convert_volume_binds,
|
||||
mkbuildcontext, ping, tar, parse_repository_tag
|
||||
) # flake8: noqa
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
import io
|
||||
import tarfile
|
||||
import tempfile
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
import requests
|
||||
from fig.packages import six
|
||||
@@ -51,15 +52,34 @@ def tar(path):
|
||||
|
||||
|
||||
def compare_version(v1, v2):
|
||||
return float(v2) - float(v1)
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
elif s1 > s2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def ping(url):
|
||||
try:
|
||||
res = requests.get(url)
|
||||
return res.status >= 400
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return res.status_code < 400
|
||||
|
||||
|
||||
def _convert_port_binding(binding):
|
||||
@@ -72,6 +92,13 @@ def _convert_port_binding(binding):
|
||||
result['HostIp'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding[0]
|
||||
elif isinstance(binding, dict):
|
||||
if 'HostPort' in binding:
|
||||
result['HostPort'] = binding['HostPort']
|
||||
if 'HostIp' in binding:
|
||||
result['HostIp'] = binding['HostIp']
|
||||
else:
|
||||
raise ValueError(binding)
|
||||
else:
|
||||
result['HostPort'] = binding
|
||||
|
||||
@@ -94,3 +121,27 @@ def convert_port_bindings(port_bindings):
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def convert_volume_binds(binds):
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(v, dict):
|
||||
result.append('%s:%s:%s' % (
|
||||
k, v['bind'], 'ro' if v.get('ro', False) else 'rw'
|
||||
))
|
||||
else:
|
||||
result.append('%s:%s:rw' % (k, v))
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo):
|
||||
column_index = repo.rfind(':')
|
||||
if column_index < 0:
|
||||
return repo, None
|
||||
tag = repo[column_index+1:]
|
||||
slash_index = tag.find('/')
|
||||
if slash_index < 0:
|
||||
return repo[:column_index], tag
|
||||
|
||||
return repo, None
|
||||
|
||||
1
fig/packages/docker/version.py
Normal file
1
fig/packages/docker/version.py
Normal file
@@ -0,0 +1 @@
|
||||
version = "0.3.2"
|
||||
83
fig/progress_stream.py
Normal file
83
fig/progress_stream.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import json
|
||||
import os
|
||||
import codecs
|
||||
|
||||
|
||||
class StreamOutputError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def stream_output(output, stream):
|
||||
is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno())
|
||||
stream = codecs.getwriter('utf-8')(stream)
|
||||
all_events = []
|
||||
lines = {}
|
||||
diff = 0
|
||||
|
||||
for chunk in output:
|
||||
event = json.loads(chunk)
|
||||
all_events.append(event)
|
||||
|
||||
if 'progress' in event or 'progressDetail' in event:
|
||||
image_id = event['id']
|
||||
|
||||
if image_id in lines:
|
||||
diff = len(lines) - lines[image_id]
|
||||
else:
|
||||
lines[image_id] = len(lines)
|
||||
stream.write("\n")
|
||||
diff = 0
|
||||
|
||||
if is_terminal:
|
||||
# move cursor up `diff` rows
|
||||
stream.write("%c[%dA" % (27, diff))
|
||||
|
||||
print_output_event(event, stream, is_terminal)
|
||||
|
||||
if 'id' in event and is_terminal:
|
||||
# move cursor back down
|
||||
stream.write("%c[%dB" % (27, diff))
|
||||
|
||||
stream.flush()
|
||||
|
||||
return all_events
|
||||
|
||||
|
||||
def print_output_event(event, stream, is_terminal):
|
||||
if 'errorDetail' in event:
|
||||
raise StreamOutputError(event['errorDetail']['message'])
|
||||
|
||||
terminator = ''
|
||||
|
||||
if is_terminal and 'stream' not in event:
|
||||
# erase current line
|
||||
stream.write("%c[2K\r" % 27)
|
||||
terminator = "\r"
|
||||
pass
|
||||
elif 'progressDetail' in event:
|
||||
return
|
||||
|
||||
if 'time' in event:
|
||||
stream.write("[%s] " % event['time'])
|
||||
|
||||
if 'id' in event:
|
||||
stream.write("%s: " % event['id'])
|
||||
|
||||
if 'from' in event:
|
||||
stream.write("(from %s) " % event['from'])
|
||||
|
||||
status = event.get('status', '')
|
||||
|
||||
if 'progress' in event:
|
||||
stream.write("%s %s%s" % (status, event['progress'], terminator))
|
||||
elif 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total']) * 100
|
||||
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
|
||||
else:
|
||||
stream.write('%s%s' % (status, terminator))
|
||||
elif 'stream' in event:
|
||||
stream.write("%s%s" % (event['stream'], terminator))
|
||||
else:
|
||||
stream.write("%s%s\n" % (status, terminator))
|
||||
119
fig/project.py
119
fig/project.py
@@ -2,6 +2,8 @@ from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
from .service import Service
|
||||
from .container import Container
|
||||
from .packages.docker.errors import APIError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,11 +20,13 @@ def sort_service_dicts(services):
|
||||
if n['name'] in temporary_marked:
|
||||
if n['name'] in get_service_names(n.get('links', [])):
|
||||
raise DependencyError('A service can not link to itself: %s' % n['name'])
|
||||
if n['name'] in n.get('volumes_from', []):
|
||||
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
|
||||
else:
|
||||
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
|
||||
if n in unmarked:
|
||||
temporary_marked.add(n['name'])
|
||||
dependents = [m for m in services if n['name'] in get_service_names(m.get('links', []))]
|
||||
dependents = [m for m in services if (n['name'] in get_service_names(m.get('links', []))) or (n['name'] in m.get('volumes_from', []))]
|
||||
for m in dependents:
|
||||
visit(m)
|
||||
temporary_marked.remove(n['name'])
|
||||
@@ -50,17 +54,10 @@ class Project(object):
|
||||
"""
|
||||
project = cls(name, [], client)
|
||||
for service_dict in sort_service_dicts(service_dicts):
|
||||
# Reference links by object
|
||||
links = []
|
||||
if 'links' in service_dict:
|
||||
for link in service_dict.get('links', []):
|
||||
if ':' in link:
|
||||
service_name, link_name = link.split(':', 1)
|
||||
else:
|
||||
service_name, link_name = link, None
|
||||
links.append((project.get_service(service_name), link_name))
|
||||
del service_dict['links']
|
||||
project.services.append(Service(client=client, project=name, links=links, **service_dict))
|
||||
links = project.get_links(service_dict)
|
||||
volumes_from = project.get_volumes_from(service_dict)
|
||||
|
||||
project.services.append(Service(client=client, project=name, links=links, volumes_from=volumes_from, **service_dict))
|
||||
return project
|
||||
|
||||
@classmethod
|
||||
@@ -84,39 +81,66 @@ class Project(object):
|
||||
|
||||
raise NoSuchService(name)
|
||||
|
||||
def get_services(self, service_names=None):
|
||||
def get_services(self, service_names=None, include_links=False):
|
||||
"""
|
||||
Returns a list of this project's services filtered
|
||||
by the provided list of names, or all services if
|
||||
service_names is None or [].
|
||||
by the provided list of names, or all services if service_names is None
|
||||
or [].
|
||||
|
||||
Preserves the original order of self.services.
|
||||
If include_links is specified, returns a list including the links for
|
||||
service_names, in order of dependency.
|
||||
|
||||
Raises NoSuchService if any of the named services
|
||||
do not exist.
|
||||
Preserves the original order of self.services where possible,
|
||||
reordering as needed to resolve links.
|
||||
|
||||
Raises NoSuchService if any of the named services do not exist.
|
||||
"""
|
||||
if service_names is None or len(service_names) == 0:
|
||||
return self.services
|
||||
return self.get_services(
|
||||
service_names=[s.name for s in self.services],
|
||||
include_links=include_links
|
||||
)
|
||||
else:
|
||||
unsorted = [self.get_service(name) for name in service_names]
|
||||
return [s for s in self.services if s in unsorted]
|
||||
services = [s for s in self.services if s in unsorted]
|
||||
|
||||
def recreate_containers(self, service_names=None):
|
||||
"""
|
||||
For each service, create or recreate their containers.
|
||||
Returns a tuple with two lists. The first is a list of
|
||||
(service, old_container) tuples; the second is a list
|
||||
of (service, new_container) tuples.
|
||||
"""
|
||||
old = []
|
||||
new = []
|
||||
if include_links:
|
||||
services = reduce(self._inject_links, services, [])
|
||||
|
||||
for service in self.get_services(service_names):
|
||||
(s_old, s_new) = service.recreate_containers()
|
||||
old += [(service, container) for container in s_old]
|
||||
new += [(service, container) for container in s_new]
|
||||
uniques = []
|
||||
[uniques.append(s) for s in services if s not in uniques]
|
||||
return uniques
|
||||
|
||||
return (old, new)
|
||||
def get_links(self, service_dict):
|
||||
links = []
|
||||
if 'links' in service_dict:
|
||||
for link in service_dict.get('links', []):
|
||||
if ':' in link:
|
||||
service_name, link_name = link.split(':', 1)
|
||||
else:
|
||||
service_name, link_name = link, None
|
||||
try:
|
||||
links.append((self.get_service(service_name), link_name))
|
||||
except NoSuchService:
|
||||
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
|
||||
del service_dict['links']
|
||||
return links
|
||||
|
||||
def get_volumes_from(self, service_dict):
|
||||
volumes_from = []
|
||||
if 'volumes_from' in service_dict:
|
||||
for volume_name in service_dict.get('volumes_from', []):
|
||||
try:
|
||||
service = self.get_service(volume_name)
|
||||
volumes_from.append(service)
|
||||
except NoSuchService:
|
||||
try:
|
||||
container = Container.from_id(client, volume_name)
|
||||
volumes_from.append(Container.from_id(client, volume_name))
|
||||
except APIError:
|
||||
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
|
||||
del service_dict['volumes_from']
|
||||
return volumes_from
|
||||
|
||||
def start(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
@@ -137,6 +161,19 @@ class Project(object):
|
||||
else:
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
def up(self, service_names=None, start_links=True, recreate=True):
|
||||
running_containers = []
|
||||
|
||||
for service in self.get_services(service_names, include_links=start_links):
|
||||
if recreate:
|
||||
for (_, container) in service.recreate_containers():
|
||||
running_containers.append(container)
|
||||
else:
|
||||
for container in service.start_or_create_containers():
|
||||
running_containers.append(container)
|
||||
|
||||
return running_containers
|
||||
|
||||
def remove_stopped(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
service.remove_stopped(**options)
|
||||
@@ -148,6 +185,20 @@ class Project(object):
|
||||
l.append(container)
|
||||
return l
|
||||
|
||||
def _inject_links(self, acc, service):
|
||||
linked_names = service.get_linked_names()
|
||||
|
||||
if len(linked_names) > 0:
|
||||
linked_services = self.get_services(
|
||||
service_names=linked_names,
|
||||
include_links=True
|
||||
)
|
||||
else:
|
||||
linked_services = []
|
||||
|
||||
linked_services.append(service)
|
||||
return acc + linked_services
|
||||
|
||||
|
||||
class NoSuchService(Exception):
|
||||
def __init__(self, name):
|
||||
|
||||
221
fig/service.py
221
fig/service.py
@@ -1,25 +1,34 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .packages.docker.client import APIError
|
||||
from .packages.docker.errors import APIError
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from .container import Container
|
||||
from .progress_stream import stream_output, StreamOutputError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'volumes_from', 'entrypoint']
|
||||
DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'domainname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'entrypoint', 'privileged', 'volumes_from', 'net', 'working_dir']
|
||||
DOCKER_CONFIG_HINTS = {
|
||||
'link': 'links',
|
||||
'port': 'ports',
|
||||
'volume': 'volumes',
|
||||
'link' : 'links',
|
||||
'port' : 'ports',
|
||||
'privilege' : 'privileged',
|
||||
'priviliged': 'privileged',
|
||||
'privilige' : 'privileged',
|
||||
'volume' : 'volumes',
|
||||
'workdir' : 'working_dir',
|
||||
}
|
||||
|
||||
VALID_NAME_CHARS = '[a-zA-Z0-9]'
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
pass
|
||||
def __init__(self, service, reason):
|
||||
self.service = service
|
||||
self.reason = reason
|
||||
|
||||
|
||||
class CannotBeScaledError(Exception):
|
||||
@@ -31,15 +40,15 @@ class ConfigError(ValueError):
|
||||
|
||||
|
||||
class Service(object):
|
||||
def __init__(self, name, client=None, project='default', links=[], **options):
|
||||
if not re.match('^[a-zA-Z0-9]+$', name):
|
||||
raise ConfigError('Invalid name: %s' % name)
|
||||
if not re.match('^[a-zA-Z0-9]+$', project):
|
||||
raise ConfigError('Invalid project: %s' % project)
|
||||
def __init__(self, name, client=None, project='default', links=[], volumes_from=[], **options):
|
||||
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
|
||||
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
|
||||
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
|
||||
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
|
||||
if 'image' in options and 'build' in options:
|
||||
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
|
||||
|
||||
supported_options = DOCKER_CONFIG_KEYS + ['build']
|
||||
supported_options = DOCKER_CONFIG_KEYS + ['build', 'expose']
|
||||
|
||||
for k in options:
|
||||
if k not in supported_options:
|
||||
@@ -52,6 +61,7 @@ class Service(object):
|
||||
self.client = client
|
||||
self.project = project
|
||||
self.links = links or []
|
||||
self.volumes_from = volumes_from or []
|
||||
self.options = options
|
||||
|
||||
def containers(self, stopped=False, one_off=False):
|
||||
@@ -67,9 +77,7 @@ class Service(object):
|
||||
|
||||
def start(self, **options):
|
||||
for c in self.containers(stopped=True):
|
||||
if not c.is_running:
|
||||
log.info("Starting %s..." % c.name)
|
||||
self.start_container(c, **options)
|
||||
self.start_container_if_stopped(c, **options)
|
||||
|
||||
def stop(self, **options):
|
||||
for c in self.containers():
|
||||
@@ -82,6 +90,14 @@ class Service(object):
|
||||
c.kill(**options)
|
||||
|
||||
def scale(self, desired_num):
|
||||
"""
|
||||
Adjusts the number of containers to the specified number and ensures they are running.
|
||||
|
||||
- creates containers until there are at least `desired_num`
|
||||
- stops containers until there are at most `desired_num` running
|
||||
- starts containers until there are at least `desired_num` running
|
||||
- removes all stopped containers
|
||||
"""
|
||||
if not self.can_be_scaled():
|
||||
raise CannotBeScaledError()
|
||||
|
||||
@@ -114,6 +130,8 @@ class Service(object):
|
||||
self.start_container(c)
|
||||
running_containers.append(c)
|
||||
|
||||
self.remove_stopped()
|
||||
|
||||
|
||||
def remove_stopped(self, **options):
|
||||
for c in self.containers(stopped=True):
|
||||
@@ -126,37 +144,37 @@ class Service(object):
|
||||
Create a container for this service. If the image doesn't exist, attempt to pull
|
||||
it.
|
||||
"""
|
||||
container_options = self._get_container_options(override_options, one_off=one_off)
|
||||
container_options = self._get_container_create_options(override_options, one_off=one_off)
|
||||
try:
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError as e:
|
||||
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
|
||||
log.info('Pulling image %s...' % container_options['image'])
|
||||
self.client.pull(container_options['image'])
|
||||
output = self.client.pull(container_options['image'], stream=True)
|
||||
stream_output(output, sys.stdout)
|
||||
return Container.create(self.client, **container_options)
|
||||
raise
|
||||
|
||||
def recreate_containers(self, **override_options):
|
||||
"""
|
||||
If a container for this service doesn't exist, create one. If there are
|
||||
any, stop them and create new ones. Does not remove the old containers.
|
||||
If a container for this service doesn't exist, create and start one. If there are
|
||||
any, stop them, create+start new ones, and remove the old containers.
|
||||
"""
|
||||
containers = self.containers(stopped=True)
|
||||
|
||||
if len(containers) == 0:
|
||||
log.info("Creating %s..." % self.next_container_name())
|
||||
return ([], [self.create_container(**override_options)])
|
||||
container = self.create_container(**override_options)
|
||||
self.start_container(container)
|
||||
return [(None, container)]
|
||||
else:
|
||||
old_containers = []
|
||||
new_containers = []
|
||||
tuples = []
|
||||
|
||||
for c in containers:
|
||||
log.info("Recreating %s..." % c.name)
|
||||
(old_container, new_container) = self.recreate_container(c, **override_options)
|
||||
old_containers.append(old_container)
|
||||
new_containers.append(new_container)
|
||||
tuples.append(self.recreate_container(c, **override_options))
|
||||
|
||||
return (old_containers, new_containers)
|
||||
return tuples
|
||||
|
||||
def recreate_container(self, container, **override_options):
|
||||
if container.is_running:
|
||||
@@ -165,21 +183,29 @@ class Service(object):
|
||||
intermediate_container = Container.create(
|
||||
self.client,
|
||||
image=container.image,
|
||||
volumes_from=container.id,
|
||||
entrypoint=['echo'],
|
||||
command=[],
|
||||
)
|
||||
intermediate_container.start()
|
||||
intermediate_container.start(volumes_from=container.id)
|
||||
intermediate_container.wait()
|
||||
container.remove()
|
||||
|
||||
options = dict(override_options)
|
||||
options['volumes_from'] = intermediate_container.id
|
||||
new_container = self.create_container(**options)
|
||||
self.start_container(new_container, intermediate_container=intermediate_container)
|
||||
|
||||
intermediate_container.remove()
|
||||
|
||||
return (intermediate_container, new_container)
|
||||
|
||||
def start_container(self, container=None, **override_options):
|
||||
def start_container_if_stopped(self, container, **options):
|
||||
if container.is_running:
|
||||
return container
|
||||
else:
|
||||
log.info("Starting %s..." % container.name)
|
||||
return self.start_container(container, **options)
|
||||
|
||||
def start_container(self, container=None, intermediate_container=None,**override_options):
|
||||
if container is None:
|
||||
container = self.create_container(**override_options)
|
||||
|
||||
@@ -190,12 +216,7 @@ class Service(object):
|
||||
|
||||
if options.get('ports', None) is not None:
|
||||
for port in options['ports']:
|
||||
port = str(port)
|
||||
if ':' in port:
|
||||
external_port, internal_port = port.split(':', 1)
|
||||
else:
|
||||
external_port, internal_port = (None, port)
|
||||
|
||||
internal_port, external_port = split_port(port)
|
||||
port_bindings[internal_port] = external_port
|
||||
|
||||
volume_bindings = {}
|
||||
@@ -204,15 +225,37 @@ class Service(object):
|
||||
for volume in options['volumes']:
|
||||
if ':' in volume:
|
||||
external_dir, internal_dir = volume.split(':')
|
||||
volume_bindings[os.path.abspath(external_dir)] = internal_dir
|
||||
volume_bindings[os.path.abspath(external_dir)] = {
|
||||
'bind': internal_dir,
|
||||
'ro': False,
|
||||
}
|
||||
|
||||
privileged = options.get('privileged', False)
|
||||
net = options.get('net', 'bridge')
|
||||
|
||||
container.start(
|
||||
links=self._get_links(),
|
||||
links=self._get_links(link_to_self=override_options.get('one_off', False)),
|
||||
port_bindings=port_bindings,
|
||||
binds=volume_bindings,
|
||||
volumes_from=self._get_volumes_from(intermediate_container),
|
||||
privileged=privileged,
|
||||
network_mode=net,
|
||||
)
|
||||
return container
|
||||
|
||||
def start_or_create_containers(self):
|
||||
containers = self.containers(stopped=True)
|
||||
|
||||
if len(containers) == 0:
|
||||
log.info("Creating %s..." % self.next_container_name())
|
||||
new_container = self.create_container()
|
||||
return [self.start_container(new_container)]
|
||||
else:
|
||||
return [self.start_container_if_stopped(c) for c in containers]
|
||||
|
||||
def get_linked_names(self):
|
||||
return [s.name for (s, _) in self.links]
|
||||
|
||||
def next_container_name(self, one_off=False):
|
||||
bits = [self.project, self.name]
|
||||
if one_off:
|
||||
@@ -227,7 +270,7 @@ class Service(object):
|
||||
else:
|
||||
return max(numbers) + 1
|
||||
|
||||
def _get_links(self):
|
||||
def _get_links(self, link_to_self):
|
||||
links = []
|
||||
for service, link_name in self.links:
|
||||
for container in service.containers():
|
||||
@@ -235,20 +278,47 @@ class Service(object):
|
||||
links.append((container.name, link_name))
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
for container in self.containers():
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
if link_to_self:
|
||||
for container in self.containers():
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
return links
|
||||
|
||||
def _get_container_options(self, override_options, one_off=False):
|
||||
def _get_volumes_from(self, intermediate_container=None):
|
||||
volumes_from = []
|
||||
for v in self.volumes_from:
|
||||
if isinstance(v, Service):
|
||||
for container in v.containers(stopped=True):
|
||||
volumes_from.append(container.id)
|
||||
elif isinstance(v, Container):
|
||||
volumes_from.append(v.id)
|
||||
|
||||
if intermediate_container:
|
||||
volumes_from.append(intermediate_container.id)
|
||||
|
||||
return volumes_from
|
||||
|
||||
def _get_container_create_options(self, override_options, one_off=False):
|
||||
container_options = dict((k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options)
|
||||
container_options.update(override_options)
|
||||
|
||||
container_options['name'] = self.next_container_name(one_off)
|
||||
|
||||
if 'ports' in container_options:
|
||||
# If a qualified hostname was given, split it into an
|
||||
# unqualified hostname and a domainname unless domainname
|
||||
# was also given explicitly. This matches the behavior of
|
||||
# the official Docker CLI in that scenario.
|
||||
if ('hostname' in container_options
|
||||
and 'domainname' not in container_options
|
||||
and '.' in container_options['hostname']):
|
||||
parts = container_options['hostname'].partition('.')
|
||||
container_options['hostname'] = parts[0]
|
||||
container_options['domainname'] = parts[2]
|
||||
|
||||
if 'ports' in container_options or 'expose' in self.options:
|
||||
ports = []
|
||||
for port in container_options['ports']:
|
||||
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
|
||||
for port in all_ports:
|
||||
port = str(port)
|
||||
if ':' in port:
|
||||
port = port.split(':')[-1]
|
||||
@@ -260,11 +330,24 @@ class Service(object):
|
||||
if 'volumes' in container_options:
|
||||
container_options['volumes'] = dict((split_volume(v)[1], {}) for v in container_options['volumes'])
|
||||
|
||||
if 'environment' in container_options:
|
||||
if isinstance(container_options['environment'], list):
|
||||
container_options['environment'] = dict(split_env(e) for e in container_options['environment'])
|
||||
container_options['environment'] = dict(resolve_env(k,v) for k,v in container_options['environment'].iteritems())
|
||||
|
||||
if self.can_be_built():
|
||||
if len(self.client.images(name=self._build_tag_name())) == 0:
|
||||
self.build()
|
||||
container_options['image'] = self._build_tag_name()
|
||||
|
||||
# Priviliged is only required for starting containers, not for creating them
|
||||
if 'privileged' in container_options:
|
||||
del container_options['privileged']
|
||||
|
||||
# net is only required for starting containers, not for creating them
|
||||
if 'net' in container_options:
|
||||
del container_options['net']
|
||||
|
||||
return container_options
|
||||
|
||||
def build(self):
|
||||
@@ -273,20 +356,25 @@ class Service(object):
|
||||
build_output = self.client.build(
|
||||
self.options['build'],
|
||||
tag=self._build_tag_name(),
|
||||
stream=True
|
||||
stream=True,
|
||||
rm=True
|
||||
)
|
||||
|
||||
try:
|
||||
all_events = stream_output(build_output, sys.stdout)
|
||||
except StreamOutputError, e:
|
||||
raise BuildError(self, unicode(e))
|
||||
|
||||
image_id = None
|
||||
|
||||
for line in build_output:
|
||||
if line:
|
||||
match = re.search(r'Successfully built ([0-9a-f]+)', line)
|
||||
for event in all_events:
|
||||
if 'stream' in event:
|
||||
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
|
||||
if match:
|
||||
image_id = match.group(1)
|
||||
sys.stdout.write(line)
|
||||
|
||||
if image_id is None:
|
||||
raise BuildError()
|
||||
raise BuildError(self)
|
||||
|
||||
return image_id
|
||||
|
||||
@@ -346,3 +434,34 @@ def split_volume(v):
|
||||
return v.split(':', 1)
|
||||
else:
|
||||
return (None, v)
|
||||
|
||||
|
||||
def split_port(port):
|
||||
port = str(port)
|
||||
external_ip = None
|
||||
if ':' in port:
|
||||
external_port, internal_port = port.rsplit(':', 1)
|
||||
if ':' in external_port:
|
||||
external_ip, external_port = external_port.split(':', 1)
|
||||
else:
|
||||
external_port, internal_port = (None, port)
|
||||
if external_ip:
|
||||
if external_port:
|
||||
external_port = (external_ip, external_port)
|
||||
else:
|
||||
external_port = (external_ip,)
|
||||
return internal_port, external_port
|
||||
|
||||
def split_env(env):
|
||||
if '=' in env:
|
||||
return env.split('=', 1)
|
||||
else:
|
||||
return env, None
|
||||
|
||||
def resolve_env(key,val):
|
||||
if val is not None:
|
||||
return key, val
|
||||
elif key in os.environ:
|
||||
return key, os.environ[key]
|
||||
else:
|
||||
return key, ''
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
mock==1.0.1
|
||||
nose==1.3.0
|
||||
pyinstaller==2.1
|
||||
unittest2
|
||||
|
||||
@@ -3,3 +3,4 @@ PyYAML==3.10
|
||||
requests==2.2.1
|
||||
texttable==0.8.1
|
||||
websocket-client==0.11.0
|
||||
dockerpty==0.2.1
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
#!/bin/sh
|
||||
set -ex
|
||||
mkdir -p `pwd`/dist
|
||||
chmod 777 `pwd`/dist
|
||||
docker build -t fig .
|
||||
docker run -v `pwd`/dist:/code/dist fig pyinstaller -F bin/fig
|
||||
docker run -v `pwd`/dist:/code/dist fig dist/fig --version
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
rm -r venv
|
||||
rm -rf venv
|
||||
virtualenv venv
|
||||
venv/bin/pip install pyinstaller==2.1
|
||||
venv/bin/pip install .
|
||||
venv/bin/pyinstaller -F bin/fig
|
||||
dist/fig --version
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/sh
|
||||
find . -type f -name '*.pyc' -delete
|
||||
|
||||
rm -rf docs/_site build dist
|
||||
|
||||
@@ -21,8 +21,7 @@ git reset --soft origin/gh-pages
|
||||
|
||||
echo ".git-gh-pages" > .gitignore
|
||||
|
||||
git add -u
|
||||
git add .
|
||||
git add -A .
|
||||
|
||||
git commit -m "update" || echo "didn't commit"
|
||||
git push origin master:gh-pages
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/sh
|
||||
nosetests
|
||||
PYTHONIOENCODING=ascii nosetests $@
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on first error
|
||||
set -ex
|
||||
|
||||
# Put Python eggs in a writeable directory
|
||||
export PYTHON_EGG_CACHE="/tmp/.python-eggs"
|
||||
|
||||
# Activate correct virtualenv
|
||||
TRAVIS_PYTHON_VERSION=$1
|
||||
source /home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/activate
|
||||
|
||||
env
|
||||
|
||||
# Kill background processes on exit
|
||||
trap 'kill -9 $(jobs -p)' SIGINT SIGTERM EXIT
|
||||
|
||||
# Start docker daemon
|
||||
docker -d -H unix:///var/run/docker.sock 2>> /dev/null >> /dev/null &
|
||||
sleep 2
|
||||
|
||||
# $init is set by sekexe
|
||||
cd $(dirname $init)/.. && nosetests -v
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
echo exit 101 | sudo tee /usr/sbin/policy-rc.d
|
||||
sudo chmod +x /usr/sbin/policy-rc.d
|
||||
sudo apt-get install -qy slirp lxc lxc-docker-$DOCKER_VERSION
|
||||
git clone git://github.com/jpetazzo/sekexe
|
||||
python setup.py install
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then
|
||||
pip install unittest2
|
||||
fi
|
||||
|
||||
10
script/travis-integration
Executable file
10
script/travis-integration
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Kill background processes on exit
|
||||
trap 'kill -9 $(jobs -p)' SIGINT SIGTERM EXIT
|
||||
|
||||
export DOCKER_HOST=tcp://localhost:4243
|
||||
orchard proxy -H $TRAVIS_JOB_ID $DOCKER_HOST &
|
||||
sleep 2
|
||||
nosetests -v
|
||||
2
setup.py
2
setup.py
@@ -35,7 +35,7 @@ setup(
|
||||
url='http://orchardup.github.io/fig/',
|
||||
author='Orchard Laboratories Ltd.',
|
||||
author_email='hello@orchardup.com',
|
||||
license='BSD',
|
||||
license='Apache License 2.0',
|
||||
packages=find_packages(),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .testcases import DockerClientTestCase
|
||||
from mock import patch
|
||||
from fig.packages.six import StringIO
|
||||
from fig.cli.main import TopLevelCommand
|
||||
|
||||
class CLITestCase(DockerClientTestCase):
|
||||
def setUp(self):
|
||||
super(CLITestCase, self).setUp()
|
||||
self.command = TopLevelCommand()
|
||||
self.command.base_dir = 'tests/fixtures/simple-figfile'
|
||||
|
||||
def tearDown(self):
|
||||
self.command.project.kill()
|
||||
self.command.project.remove_stopped()
|
||||
|
||||
def test_yaml_filename_check(self):
|
||||
self.command.base_dir = 'tests/fixtures/longer-filename-figfile'
|
||||
|
||||
project = self.command.project
|
||||
|
||||
self.assertTrue( project.get_service('definedinyamlnotyml'), "Service: definedinyamlnotyml should have been loaded from .yaml file" )
|
||||
|
||||
def test_help(self):
|
||||
self.assertRaises(SystemExit, lambda: self.command.dispatch(['-h'], None))
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps(self, mock_stdout):
|
||||
self.command.project.get_service('simple').create_container()
|
||||
self.command.dispatch(['ps'], None)
|
||||
self.assertIn('fig_simple_1', mock_stdout.getvalue())
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps_default_figfile(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
self.command.dispatch(['ps'], None)
|
||||
|
||||
output = mock_stdout.getvalue()
|
||||
self.assertIn('fig_simple_1', output)
|
||||
self.assertIn('fig_another_1', output)
|
||||
self.assertNotIn('fig_yetanother_1', output)
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps_alternate_figfile(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
|
||||
self.command.dispatch(['-f', 'fig2.yml', 'up', '-d'], None)
|
||||
self.command.dispatch(['-f', 'fig2.yml', 'ps'], None)
|
||||
|
||||
output = mock_stdout.getvalue()
|
||||
self.assertNotIn('fig_simple_1', output)
|
||||
self.assertNotIn('fig_another_1', output)
|
||||
self.assertIn('fig_yetanother_1', output)
|
||||
|
||||
def test_rm(self):
|
||||
service = self.command.project.get_service('simple')
|
||||
service.create_container()
|
||||
service.kill()
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
self.command.dispatch(['rm', '--force'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True)), 0)
|
||||
|
||||
def test_scale(self):
|
||||
project = self.command.project
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=3', 'another=2']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 3)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 2)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=0', 'another=0']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 0)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 0)
|
||||
@@ -1,51 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
from .testcases import DockerClientTestCase
|
||||
from fig.container import Container
|
||||
|
||||
class ContainerTest(DockerClientTestCase):
|
||||
def test_from_ps(self):
|
||||
container = Container.from_ps(self.client, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
"Created":1387384730,
|
||||
"Status":"Up 8 seconds",
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.dictionary, {
|
||||
"ID": "abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Name": "/db_1",
|
||||
})
|
||||
|
||||
def test_environment(self):
|
||||
container = Container(self.client, {
|
||||
'ID': 'abc',
|
||||
'Config': {
|
||||
'Env': [
|
||||
'FOO=BAR',
|
||||
'BAZ=DOGE',
|
||||
]
|
||||
}
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.environment, {
|
||||
'FOO': 'BAR',
|
||||
'BAZ': 'DOGE',
|
||||
})
|
||||
|
||||
def test_number(self):
|
||||
container = Container.from_ps(self.client, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
"Created":1387384730,
|
||||
"Status":"Up 8 seconds",
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.number, 1)
|
||||
11
tests/fixtures/links-figfile/fig.yml
vendored
Normal file
11
tests/fixtures/links-figfile/fig.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
db:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
web:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
links:
|
||||
- db:db
|
||||
console:
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
@@ -1,3 +1,3 @@
|
||||
definedinyamlnotyml:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
4
tests/fixtures/multiple-figfiles/fig.yml
vendored
4
tests/fixtures/multiple-figfiles/fig.yml
vendored
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
another:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
|
||||
2
tests/fixtures/multiple-figfiles/fig2.yml
vendored
2
tests/fixtures/multiple-figfiles/fig2.yml
vendored
@@ -1,3 +1,3 @@
|
||||
yetanother:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
|
||||
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
2
tests/fixtures/simple-dockerfile/Dockerfile
vendored
@@ -1,2 +1,2 @@
|
||||
FROM ubuntu
|
||||
FROM busybox:latest
|
||||
CMD echo "success"
|
||||
|
||||
4
tests/fixtures/simple-figfile/fig.yml
vendored
4
tests/fixtures/simple-figfile/fig.yml
vendored
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
another:
|
||||
image: ubuntu
|
||||
image: busybox:latest
|
||||
command: /bin/sleep 300
|
||||
|
||||
0
tests/integration/__init__.py
Normal file
0
tests/integration/__init__.py
Normal file
175
tests/integration/cli_test.py
Normal file
175
tests/integration/cli_test.py
Normal file
@@ -0,0 +1,175 @@
|
||||
from __future__ import absolute_import
|
||||
from .testcases import DockerClientTestCase
|
||||
from mock import patch
|
||||
from fig.cli.main import TopLevelCommand
|
||||
from fig.packages.six import StringIO
|
||||
import sys
|
||||
|
||||
class CLITestCase(DockerClientTestCase):
|
||||
def setUp(self):
|
||||
super(CLITestCase, self).setUp()
|
||||
self.old_sys_exit = sys.exit
|
||||
sys.exit = lambda code=0: None
|
||||
self.command = TopLevelCommand()
|
||||
self.command.base_dir = 'tests/fixtures/simple-figfile'
|
||||
|
||||
def tearDown(self):
|
||||
sys.exit = self.old_sys_exit
|
||||
self.command.project.kill()
|
||||
self.command.project.remove_stopped()
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps(self, mock_stdout):
|
||||
self.command.project.get_service('simple').create_container()
|
||||
self.command.dispatch(['ps'], None)
|
||||
self.assertIn('fig_simple_1', mock_stdout.getvalue())
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps_default_figfile(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
self.command.dispatch(['ps'], None)
|
||||
|
||||
output = mock_stdout.getvalue()
|
||||
self.assertIn('fig_simple_1', output)
|
||||
self.assertIn('fig_another_1', output)
|
||||
self.assertNotIn('fig_yetanother_1', output)
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps_alternate_figfile(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/multiple-figfiles'
|
||||
self.command.dispatch(['-f', 'fig2.yml', 'up', '-d'], None)
|
||||
self.command.dispatch(['-f', 'fig2.yml', 'ps'], None)
|
||||
|
||||
output = mock_stdout.getvalue()
|
||||
self.assertNotIn('fig_simple_1', output)
|
||||
self.assertNotIn('fig_another_1', output)
|
||||
self.assertIn('fig_yetanother_1', output)
|
||||
|
||||
def test_up(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.command.project.get_service('simple')
|
||||
another = self.command.project.get_service('another')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
self.assertEqual(len(another.containers()), 1)
|
||||
|
||||
def test_up_with_links(self):
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['up', '-d', 'web'], None)
|
||||
web = self.command.project.get_service('web')
|
||||
db = self.command.project.get_service('db')
|
||||
console = self.command.project.get_service('console')
|
||||
self.assertEqual(len(web.containers()), 1)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
def test_up_with_no_deps(self):
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
|
||||
web = self.command.project.get_service('web')
|
||||
db = self.command.project.get_service('db')
|
||||
console = self.command.project.get_service('console')
|
||||
self.assertEqual(len(web.containers()), 1)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
def test_up_with_recreate(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.command.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
old_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
new_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.assertNotEqual(old_ids, new_ids)
|
||||
|
||||
def test_up_with_keep_old(self):
|
||||
self.command.dispatch(['up', '-d'], None)
|
||||
service = self.command.project.get_service('simple')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
old_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.command.dispatch(['up', '-d', '--no-recreate'], None)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
|
||||
new_ids = [c.id for c in service.containers()]
|
||||
|
||||
self.assertEqual(old_ids, new_ids)
|
||||
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_service_without_links(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['run', 'console', '/bin/true'], None)
|
||||
self.assertEqual(len(self.command.project.containers()), 0)
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_service_with_links(self, mock_stdout):
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['run', 'web', '/bin/true'], None)
|
||||
db = self.command.project.get_service('db')
|
||||
console = self.command.project.get_service('console')
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_with_no_deps(self, mock_stdout):
|
||||
mock_stdout.fileno = lambda: 1
|
||||
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
|
||||
db = self.command.project.get_service('db')
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
|
||||
@patch('dockerpty.start')
|
||||
def test_run_does_not_recreate_linked_containers(self, mock_stdout):
|
||||
mock_stdout.fileno = lambda: 1
|
||||
|
||||
self.command.base_dir = 'tests/fixtures/links-figfile'
|
||||
self.command.dispatch(['up', '-d', 'db'], None)
|
||||
db = self.command.project.get_service('db')
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
|
||||
old_ids = [c.id for c in db.containers()]
|
||||
|
||||
self.command.dispatch(['run', 'web', '/bin/true'], None)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
|
||||
new_ids = [c.id for c in db.containers()]
|
||||
|
||||
self.assertEqual(old_ids, new_ids)
|
||||
|
||||
def test_rm(self):
|
||||
service = self.command.project.get_service('simple')
|
||||
service.create_container()
|
||||
service.kill()
|
||||
self.assertEqual(len(service.containers(stopped=True)), 1)
|
||||
self.command.dispatch(['rm', '--force'], None)
|
||||
self.assertEqual(len(service.containers(stopped=True)), 0)
|
||||
|
||||
def test_scale(self):
|
||||
project = self.command.project
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=3', 'another=2']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 3)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 2)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=0', 'another=0']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 0)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 0)
|
||||
203
tests/integration/project_test.py
Normal file
203
tests/integration/project_test.py
Normal file
@@ -0,0 +1,203 @@
|
||||
from __future__ import unicode_literals
|
||||
from fig.project import Project, ConfigurationError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ProjectTest(DockerClientTestCase):
|
||||
def test_start_stop_kill_remove(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
|
||||
web_container_1 = web.create_container()
|
||||
web_container_2 = web.create_container()
|
||||
db_container = db.create_container()
|
||||
|
||||
project.start(service_names=['web'])
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name]))
|
||||
|
||||
project.start()
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name, db_container.name]))
|
||||
|
||||
project.stop(service_names=['web'], timeout=1)
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([db_container.name]))
|
||||
|
||||
project.kill(service_names=['db'])
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
self.assertEqual(len(project.containers(stopped=True)), 3)
|
||||
|
||||
project.remove_stopped(service_names=['web'])
|
||||
self.assertEqual(len(project.containers(stopped=True)), 1)
|
||||
|
||||
project.remove_stopped()
|
||||
self.assertEqual(len(project.containers(stopped=True)), 0)
|
||||
|
||||
def test_project_up(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_recreates_containers(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
old_db_id = project.containers()[0].id
|
||||
db_volume_path = project.containers()[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up()
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
self.assertNotEqual(c.id, old_db_id)
|
||||
self.assertEqual(c.inspect()['Volumes']['/var/db'], db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_recreate_running(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
old_db_id = project.containers()[0].id
|
||||
db_volume_path = project.containers()[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up(recreate=False)
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
self.assertEqual(c.id, old_db_id)
|
||||
self.assertEqual(c.inspect()['Volumes']['/var/db'], db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_recreate_stopped(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
project.stop()
|
||||
|
||||
old_containers = project.containers(stopped=True)
|
||||
|
||||
self.assertEqual(len(old_containers), 1)
|
||||
old_db_id = old_containers[0].id
|
||||
db_volume_path = old_containers[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up(recreate=False)
|
||||
|
||||
new_containers = project.containers(stopped=True)
|
||||
self.assertEqual(len(new_containers), 2)
|
||||
|
||||
db_container = [c for c in new_containers if 'db' in c.name][0]
|
||||
self.assertEqual(c.id, old_db_id)
|
||||
self.assertEqual(c.inspect()['Volumes']['/var/db'], db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_without_all_services(self):
|
||||
console = self.create_service('console')
|
||||
db = self.create_service('db')
|
||||
project = Project('figtest', [console, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up()
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 1)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_starts_links(self):
|
||||
console = self.create_service('console')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
web = self.create_service('web', links=[(db, 'db')])
|
||||
|
||||
project = Project('figtest', [web, db, console], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['web'])
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
self.assertEqual(len(web.containers()), 1)
|
||||
self.assertEqual(len(db.containers()), 1)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_project_up_with_no_deps(self):
|
||||
console = self.create_service('console')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
web = self.create_service('web', links=[(db, 'db')])
|
||||
|
||||
project = Project('figtest', [web, db, console], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['web'], start_links=False)
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
self.assertEqual(len(web.containers()), 1)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
self.assertEqual(len(console.containers()), 0)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_unscale_after_restart(self):
|
||||
web = self.create_service('web')
|
||||
project = Project('figtest', [web], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
service = project.get_service('web')
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
service.scale(3)
|
||||
self.assertEqual(len(service.containers()), 3)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 3)
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
# does scale=0 ,makes any sense? after recreating at least 1 container is running
|
||||
service.scale(0)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
@@ -1,34 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig import Service
|
||||
from fig.service import CannotBeScaledError, ConfigError
|
||||
from fig.service import CannotBeScaledError
|
||||
from fig.container import Container
|
||||
from fig.packages.docker.errors import APIError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
import os
|
||||
|
||||
class ServiceTest(DockerClientTestCase):
|
||||
def test_name_validations(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name=''))
|
||||
|
||||
self.assertRaises(ConfigError, lambda: Service(name=' '))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='/'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='!'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='\xe2'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='_'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='____'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo_bar'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='__foo_bar__'))
|
||||
|
||||
Service('a')
|
||||
Service('foo')
|
||||
|
||||
def test_project_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', project='_'))
|
||||
Service(name='foo', project='bar')
|
||||
|
||||
def test_config_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', port=['8000']))
|
||||
Service(name='foo', ports=['8000'])
|
||||
|
||||
def test_containers(self):
|
||||
foo = self.create_service('foo')
|
||||
bar = self.create_service('bar')
|
||||
@@ -113,6 +92,22 @@ class ServiceTest(DockerClientTestCase):
|
||||
service.start_container(container)
|
||||
self.assertIn('/var/db', container.inspect()['Volumes'])
|
||||
|
||||
def test_create_container_with_specified_volume(self):
|
||||
service = self.create_service('db', volumes=['/tmp:/host-tmp'])
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertIn('/host-tmp', container.inspect()['Volumes'])
|
||||
|
||||
def test_create_container_with_volumes_from(self):
|
||||
volume_service = self.create_service('data')
|
||||
volume_container_1 = volume_service.create_container()
|
||||
volume_container_2 = Container.create(self.client, image='busybox:latest', command=["/bin/sleep", "300"])
|
||||
host_service = self.create_service('host', volumes_from=[volume_service, volume_container_2])
|
||||
host_container = host_service.create_container()
|
||||
host_service.start_container(host_container)
|
||||
self.assertIn(volume_container_1.id, host_container.inspect()['HostConfig']['VolumesFrom'])
|
||||
self.assertIn(volume_container_2.id, host_container.inspect()['HostConfig']['VolumesFrom'])
|
||||
|
||||
def test_recreate_containers(self):
|
||||
service = self.create_service(
|
||||
'db',
|
||||
@@ -132,23 +127,23 @@ class ServiceTest(DockerClientTestCase):
|
||||
num_containers_before = len(self.client.containers(all=True))
|
||||
|
||||
service.options['environment']['FOO'] = '2'
|
||||
(intermediate, new) = service.recreate_containers()
|
||||
self.assertEqual(len(intermediate), 1)
|
||||
self.assertEqual(len(new), 1)
|
||||
tuples = service.recreate_containers()
|
||||
self.assertEqual(len(tuples), 1)
|
||||
|
||||
new_container = new[0]
|
||||
intermediate_container = intermediate[0]
|
||||
intermediate_container = tuples[0][0]
|
||||
new_container = tuples[0][1]
|
||||
self.assertEqual(intermediate_container.dictionary['Config']['Entrypoint'], ['echo'])
|
||||
|
||||
self.assertEqual(new_container.dictionary['Config']['Entrypoint'], ['ps'])
|
||||
self.assertEqual(new_container.dictionary['Config']['Cmd'], ['ax'])
|
||||
self.assertIn('FOO=2', new_container.dictionary['Config']['Env'])
|
||||
self.assertEqual(new_container.name, 'figtest_db_1')
|
||||
service.start_container(new_container)
|
||||
self.assertEqual(new_container.inspect()['Volumes']['/var/db'], volume_path)
|
||||
self.assertIn(intermediate_container.id, new_container.dictionary['HostConfig']['VolumesFrom'])
|
||||
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before + 1)
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
|
||||
self.assertNotEqual(old_container.id, new_container.id)
|
||||
self.assertRaises(APIError, lambda: self.client.inspect_container(intermediate_container.id))
|
||||
|
||||
def test_start_container_passes_through_options(self):
|
||||
db = self.create_service('db')
|
||||
@@ -175,12 +170,17 @@ class ServiceTest(DockerClientTestCase):
|
||||
web.start_container()
|
||||
self.assertIn('custom_link_name', web.containers()[0].links())
|
||||
|
||||
def test_start_container_creates_links_to_its_own_service(self):
|
||||
db1 = self.create_service('db')
|
||||
db2 = self.create_service('db')
|
||||
db1.start_container()
|
||||
db2.start_container()
|
||||
self.assertIn('db_1', db2.containers()[0].links())
|
||||
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
|
||||
db = self.create_service('db')
|
||||
c1 = db.start_container()
|
||||
c2 = db.start_container()
|
||||
self.assertNotIn(c1.name, c2.links())
|
||||
|
||||
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
||||
db = self.create_service('db')
|
||||
c1 = db.start_container()
|
||||
c2 = db.start_container(one_off=True)
|
||||
self.assertIn(c1.name, c2.links())
|
||||
|
||||
def test_start_container_builds_images(self):
|
||||
service = Service(
|
||||
@@ -209,25 +209,61 @@ class ServiceTest(DockerClientTestCase):
|
||||
def test_start_container_creates_ports(self):
|
||||
service = self.create_service('web', ports=[8000])
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(list(container['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
|
||||
self.assertNotEqual(container['HostConfig']['PortBindings']['8000/tcp'][0]['HostPort'], '8000')
|
||||
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
|
||||
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
|
||||
|
||||
def test_start_container_stays_unpriviliged(self):
|
||||
service = self.create_service('web')
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], False)
|
||||
|
||||
def test_start_container_becomes_priviliged(self):
|
||||
service = self.create_service('web', privileged = True)
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], True)
|
||||
|
||||
def test_expose_does_not_publish_ports(self):
|
||||
service = self.create_service('web', expose=[8000])
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
|
||||
|
||||
def test_start_container_creates_port_with_explicit_protocol(self):
|
||||
service = self.create_service('web', ports=['8000/udp'])
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(list(container['HostConfig']['PortBindings'].keys()), ['8000/udp'])
|
||||
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
|
||||
|
||||
def test_start_container_creates_fixed_external_ports(self):
|
||||
service = self.create_service('web', ports=['8000:8000'])
|
||||
container = service.start_container().inspect()
|
||||
self.assertIn('8000/tcp', container['HostConfig']['PortBindings'])
|
||||
self.assertEqual(container['HostConfig']['PortBindings']['8000/tcp'][0]['HostPort'], '8000')
|
||||
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
|
||||
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
|
||||
|
||||
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
|
||||
service = self.create_service('web', ports=['8001:8000'])
|
||||
container = service.start_container().inspect()
|
||||
self.assertIn('8000/tcp', container['HostConfig']['PortBindings'])
|
||||
self.assertEqual(container['HostConfig']['PortBindings']['8000/tcp'][0]['HostPort'], '8001')
|
||||
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
|
||||
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
|
||||
|
||||
def test_port_with_explicit_interface(self):
|
||||
service = self.create_service('web', ports=[
|
||||
'127.0.0.1:8001:8000',
|
||||
'0.0.0.0:9001:9000',
|
||||
])
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['NetworkSettings']['Ports'], {
|
||||
'8000/tcp': [
|
||||
{
|
||||
'HostIp': '127.0.0.1',
|
||||
'HostPort': '8001',
|
||||
},
|
||||
],
|
||||
'9000/tcp': [
|
||||
{
|
||||
'HostIp': '0.0.0.0',
|
||||
'HostPort': '9001',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
def test_scale(self):
|
||||
service = self.create_service('web')
|
||||
@@ -252,4 +288,42 @@ class ServiceTest(DockerClientTestCase):
|
||||
for container in containers:
|
||||
self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
|
||||
|
||||
def test_network_mode_none(self):
|
||||
service = self.create_service('web', net='none')
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['NetworkMode'], 'none')
|
||||
|
||||
def test_network_mode_bridged(self):
|
||||
service = self.create_service('web', net='bridge')
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['NetworkMode'], 'bridge')
|
||||
|
||||
def test_network_mode_host(self):
|
||||
service = self.create_service('web', net='host')
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['NetworkMode'], 'host')
|
||||
|
||||
def test_working_dir_param(self):
|
||||
service = self.create_service('container', working_dir='/working/dir/sample')
|
||||
container = service.create_container().inspect()
|
||||
self.assertEqual(container['Config']['WorkingDir'], '/working/dir/sample')
|
||||
|
||||
def test_split_env(self):
|
||||
service = self.create_service('web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
|
||||
env = service.start_container().environment
|
||||
for k,v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.iteritems():
|
||||
self.assertEqual(env[k], v)
|
||||
|
||||
def test_resolve_env(self):
|
||||
service = self.create_service('web', environment={'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None})
|
||||
os.environ['FILE_DEF'] = 'E1'
|
||||
os.environ['FILE_DEF_EMPTY'] = 'E2'
|
||||
os.environ['ENV_DEF'] = 'E3'
|
||||
try:
|
||||
env = service.start_container().environment
|
||||
for k,v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.iteritems():
|
||||
self.assertEqual(env[k], v)
|
||||
finally:
|
||||
del os.environ['FILE_DEF']
|
||||
del os.environ['FILE_DEF_EMPTY']
|
||||
del os.environ['ENV_DEF']
|
||||
@@ -3,14 +3,14 @@ from __future__ import absolute_import
|
||||
from fig.packages.docker import Client
|
||||
from fig.service import Service
|
||||
from fig.cli.utils import docker_url
|
||||
from . import unittest
|
||||
from .. import unittest
|
||||
|
||||
|
||||
class DockerClientTestCase(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.client = Client(docker_url())
|
||||
cls.client.pull('ubuntu', tag='latest')
|
||||
cls.client.pull('busybox', tag='latest')
|
||||
|
||||
def setUp(self):
|
||||
for c in self.client.containers(all=True):
|
||||
@@ -18,7 +18,7 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
self.client.kill(c['Id'])
|
||||
self.client.remove_container(c['Id'])
|
||||
for i in self.client.images():
|
||||
if isinstance(i['Tag'], basestring) and 'figtest' in i['Tag']:
|
||||
if isinstance(i.get('Tag'), basestring) and 'figtest' in i['Tag']:
|
||||
self.client.remove_image(i)
|
||||
|
||||
def create_service(self, name, **kwargs):
|
||||
@@ -28,7 +28,7 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
project='figtest',
|
||||
name=name,
|
||||
client=self.client,
|
||||
image="ubuntu",
|
||||
image="busybox:latest",
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
from fig.project import Project, ConfigurationError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ProjectTest(DockerClientTestCase):
|
||||
def test_from_dict(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu'
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], self.client)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_dict_sorts_in_dependency_order(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu',
|
||||
'links': ['db'],
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], self.client)
|
||||
|
||||
self.assertEqual(project.services[0].name, 'db')
|
||||
self.assertEqual(project.services[1].name, 'web')
|
||||
|
||||
def test_from_config(self):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
'db': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
}, self.client)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_config_throws_error_when_not_dict(self):
|
||||
with self.assertRaises(ConfigurationError):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': 'ubuntu',
|
||||
}, self.client)
|
||||
|
||||
def test_get_service(self):
|
||||
web = self.create_service('web')
|
||||
project = Project('test', [web], self.client)
|
||||
self.assertEqual(project.get_service('web'), web)
|
||||
|
||||
def test_recreate_containers(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('test', [web, db], self.client)
|
||||
|
||||
old_web_container = web.create_container()
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 0)
|
||||
|
||||
(old, new) = project.recreate_containers()
|
||||
self.assertEqual(len(old), 1)
|
||||
self.assertEqual(old[0][0], web)
|
||||
self.assertEqual(len(new), 2)
|
||||
self.assertEqual(new[0][0], web)
|
||||
self.assertEqual(new[1][0], db)
|
||||
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 1)
|
||||
|
||||
# remove intermediate containers
|
||||
for (service, container) in old:
|
||||
container.remove()
|
||||
|
||||
def test_start_stop_kill_remove(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
|
||||
web_container_1 = web.create_container()
|
||||
web_container_2 = web.create_container()
|
||||
db_container = db.create_container()
|
||||
|
||||
project.start(service_names=['web'])
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name]))
|
||||
|
||||
project.start()
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name, db_container.name]))
|
||||
|
||||
project.stop(service_names=['web'], timeout=1)
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([db_container.name]))
|
||||
|
||||
project.kill(service_names=['db'])
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
self.assertEqual(len(project.containers(stopped=True)), 3)
|
||||
|
||||
project.remove_stopped(service_names=['web'])
|
||||
self.assertEqual(len(project.containers(stopped=True)), 1)
|
||||
|
||||
project.remove_stopped()
|
||||
self.assertEqual(len(project.containers(stopped=True)), 0)
|
||||
@@ -1,37 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig.cli.utils import split_buffer
|
||||
from . import unittest
|
||||
|
||||
class SplitBufferTest(unittest.TestCase):
|
||||
def test_single_line_chunks(self):
|
||||
def reader():
|
||||
yield "abc\n"
|
||||
yield "def\n"
|
||||
yield "ghi\n"
|
||||
|
||||
self.assertEqual(list(split_buffer(reader(), '\n')), ["abc\n", "def\n", "ghi\n"])
|
||||
|
||||
def test_no_end_separator(self):
|
||||
def reader():
|
||||
yield "abc\n"
|
||||
yield "def\n"
|
||||
yield "ghi"
|
||||
|
||||
self.assertEqual(list(split_buffer(reader(), '\n')), ["abc\n", "def\n", "ghi"])
|
||||
|
||||
def test_multiple_line_chunk(self):
|
||||
def reader():
|
||||
yield "abc\ndef\nghi"
|
||||
|
||||
self.assertEqual(list(split_buffer(reader(), '\n')), ["abc\n", "def\n", "ghi"])
|
||||
|
||||
def test_chunked_line(self):
|
||||
def reader():
|
||||
yield "a"
|
||||
yield "b"
|
||||
yield "c"
|
||||
yield "\n"
|
||||
yield "d"
|
||||
|
||||
self.assertEqual(list(split_buffer(reader(), '\n')), ["abc\n", "d"])
|
||||
0
tests/unit/__init__.py
Normal file
0
tests/unit/__init__.py
Normal file
16
tests/unit/cli_test.py
Normal file
16
tests/unit/cli_test.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .. import unittest
|
||||
from fig.cli.main import TopLevelCommand
|
||||
from fig.packages.six import StringIO
|
||||
|
||||
class CLITestCase(unittest.TestCase):
|
||||
def test_yaml_filename_check(self):
|
||||
command = TopLevelCommand()
|
||||
command.base_dir = 'tests/fixtures/longer-filename-figfile'
|
||||
self.assertTrue(command.project.get_service('definedinyamlnotyml'))
|
||||
|
||||
def test_help(self):
|
||||
command = TopLevelCommand()
|
||||
with self.assertRaises(SystemExit):
|
||||
command.dispatch(['-h'], None)
|
||||
69
tests/unit/container_test.py
Normal file
69
tests/unit/container_test.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import unicode_literals
|
||||
from .. import unittest
|
||||
from fig.container import Container
|
||||
|
||||
class ContainerTest(unittest.TestCase):
|
||||
def test_from_ps(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"busybox:latest",
|
||||
"Command":"sleep 300",
|
||||
"Created":1387384730,
|
||||
"Status":"Up 8 seconds",
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.dictionary, {
|
||||
"Id": "abc",
|
||||
"Image":"busybox:latest",
|
||||
"Name": "/figtest_db_1",
|
||||
})
|
||||
|
||||
def test_environment(self):
|
||||
container = Container(None, {
|
||||
'Id': 'abc',
|
||||
'Config': {
|
||||
'Env': [
|
||||
'FOO=BAR',
|
||||
'BAZ=DOGE',
|
||||
]
|
||||
}
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.environment, {
|
||||
'FOO': 'BAR',
|
||||
'BAZ': 'DOGE',
|
||||
})
|
||||
|
||||
def test_number(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"busybox:latest",
|
||||
"Command":"sleep 300",
|
||||
"Created":1387384730,
|
||||
"Status":"Up 8 seconds",
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.number, 1)
|
||||
|
||||
def test_name(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"busybox:latest",
|
||||
"Command":"sleep 300",
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.name, "figtest_db_1")
|
||||
|
||||
def test_name_without_project(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"busybox:latest",
|
||||
"Command":"sleep 300",
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.name_without_project, "db_1")
|
||||
57
tests/unit/log_printer_test.py
Normal file
57
tests/unit/log_printer_test.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
from fig.cli.log_printer import LogPrinter
|
||||
from .. import unittest
|
||||
|
||||
|
||||
class LogPrinterTest(unittest.TestCase):
|
||||
def test_single_container(self):
|
||||
def reader(*args, **kwargs):
|
||||
yield "hello\nworld"
|
||||
|
||||
container = MockContainer(reader)
|
||||
output = run_log_printer([container])
|
||||
|
||||
self.assertIn('hello', output)
|
||||
self.assertIn('world', output)
|
||||
|
||||
def test_unicode(self):
|
||||
glyph = u'\u2022'.encode('utf-8')
|
||||
|
||||
def reader(*args, **kwargs):
|
||||
yield glyph + b'\n'
|
||||
|
||||
container = MockContainer(reader)
|
||||
output = run_log_printer([container])
|
||||
|
||||
self.assertIn(glyph, output)
|
||||
|
||||
|
||||
def run_log_printer(containers):
|
||||
r, w = os.pipe()
|
||||
reader, writer = os.fdopen(r, 'r'), os.fdopen(w, 'w')
|
||||
printer = LogPrinter(containers, output=writer)
|
||||
printer.run()
|
||||
writer.close()
|
||||
return reader.read()
|
||||
|
||||
|
||||
class MockContainer(object):
|
||||
def __init__(self, reader):
|
||||
self._reader = reader
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return 'myapp_web_1'
|
||||
|
||||
@property
|
||||
def name_without_project(self):
|
||||
return 'web_1'
|
||||
|
||||
def attach(self, *args, **kwargs):
|
||||
return self._reader()
|
||||
|
||||
def wait(self, *args, **kwargs):
|
||||
return 0
|
||||
141
tests/unit/project_test.py
Normal file
141
tests/unit/project_test.py
Normal file
@@ -0,0 +1,141 @@
|
||||
from __future__ import unicode_literals
|
||||
from .. import unittest
|
||||
from fig.service import Service
|
||||
from fig.project import Project, ConfigurationError
|
||||
|
||||
class ProjectTest(unittest.TestCase):
|
||||
def test_from_dict(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'busybox:latest'
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'busybox:latest'
|
||||
},
|
||||
], None)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
|
||||
|
||||
def test_from_dict_sorts_in_dependency_order(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'busybox:latest',
|
||||
'links': ['db'],
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'busybox:latest',
|
||||
'volumes_from': ['volume']
|
||||
},
|
||||
{
|
||||
'name': 'volume',
|
||||
'image': 'busybox:latest',
|
||||
'volumes': ['/tmp'],
|
||||
}
|
||||
], None)
|
||||
|
||||
self.assertEqual(project.services[0].name, 'volume')
|
||||
self.assertEqual(project.services[1].name, 'db')
|
||||
self.assertEqual(project.services[2].name, 'web')
|
||||
|
||||
def test_from_config(self):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': {
|
||||
'image': 'busybox:latest',
|
||||
},
|
||||
'db': {
|
||||
'image': 'busybox:latest',
|
||||
},
|
||||
}, None)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
|
||||
|
||||
def test_from_config_throws_error_when_not_dict(self):
|
||||
with self.assertRaises(ConfigurationError):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': 'busybox:latest',
|
||||
}, None)
|
||||
|
||||
def test_get_service(self):
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
client=None,
|
||||
image="busybox:latest",
|
||||
)
|
||||
project = Project('test', [web], None)
|
||||
self.assertEqual(project.get_service('web'), web)
|
||||
|
||||
def test_get_services_returns_all_services_without_args(self):
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
)
|
||||
console = Service(
|
||||
project='figtest',
|
||||
name='console',
|
||||
)
|
||||
project = Project('test', [web, console], None)
|
||||
self.assertEqual(project.get_services(), [web, console])
|
||||
|
||||
def test_get_services_returns_listed_services_with_args(self):
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
)
|
||||
console = Service(
|
||||
project='figtest',
|
||||
name='console',
|
||||
)
|
||||
project = Project('test', [web, console], None)
|
||||
self.assertEqual(project.get_services(['console']), [console])
|
||||
|
||||
def test_get_services_with_include_links(self):
|
||||
db = Service(
|
||||
project='figtest',
|
||||
name='db',
|
||||
)
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
links=[(db, 'database')]
|
||||
)
|
||||
cache = Service(
|
||||
project='figtest',
|
||||
name='cache'
|
||||
)
|
||||
console = Service(
|
||||
project='figtest',
|
||||
name='console',
|
||||
links=[(web, 'web')]
|
||||
)
|
||||
project = Project('test', [web, db, cache, console], None)
|
||||
self.assertEqual(
|
||||
project.get_services(['console'], include_links=True),
|
||||
[db, web, console]
|
||||
)
|
||||
|
||||
def test_get_services_removes_duplicates_following_links(self):
|
||||
db = Service(
|
||||
project='figtest',
|
||||
name='db',
|
||||
)
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
links=[(db, 'database')]
|
||||
)
|
||||
project = Project('test', [web, db], None)
|
||||
self.assertEqual(
|
||||
project.get_services(['web', 'db'], include_links=True),
|
||||
[db, web]
|
||||
)
|
||||
80
tests/unit/service_test.py
Normal file
80
tests/unit/service_test.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .. import unittest
|
||||
from fig import Service
|
||||
from fig.service import ConfigError, split_port
|
||||
|
||||
class ServiceTest(unittest.TestCase):
|
||||
def test_name_validations(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name=''))
|
||||
|
||||
self.assertRaises(ConfigError, lambda: Service(name=' '))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='/'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='!'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='\xe2'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='_'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='____'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo_bar'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='__foo_bar__'))
|
||||
|
||||
Service('a')
|
||||
Service('foo')
|
||||
|
||||
def test_project_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', project='_'))
|
||||
Service(name='foo', project='bar')
|
||||
|
||||
def test_config_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', port=['8000']))
|
||||
Service(name='foo', ports=['8000'])
|
||||
|
||||
def test_split_port(self):
|
||||
internal_port, external_port = split_port("127.0.0.1:1000:2000")
|
||||
self.assertEqual(internal_port, "2000")
|
||||
self.assertEqual(external_port, ("127.0.0.1", "1000"))
|
||||
|
||||
internal_port, external_port = split_port("127.0.0.1::2000")
|
||||
self.assertEqual(internal_port, "2000")
|
||||
self.assertEqual(external_port, ("127.0.0.1",))
|
||||
|
||||
internal_port, external_port = split_port("1000:2000")
|
||||
self.assertEqual(internal_port, "2000")
|
||||
self.assertEqual(external_port, "1000")
|
||||
|
||||
def test_split_domainname_none(self):
|
||||
service = Service('foo',
|
||||
hostname = 'name',
|
||||
)
|
||||
service.next_container_name = lambda x: 'foo'
|
||||
opts = service._get_container_create_options({})
|
||||
self.assertEqual(opts['hostname'], 'name', 'hostname')
|
||||
self.assertFalse('domainname' in opts, 'domainname')
|
||||
|
||||
def test_split_domainname_fqdn(self):
|
||||
service = Service('foo',
|
||||
hostname = 'name.domain.tld',
|
||||
)
|
||||
service.next_container_name = lambda x: 'foo'
|
||||
opts = service._get_container_create_options({})
|
||||
self.assertEqual(opts['hostname'], 'name', 'hostname')
|
||||
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
|
||||
|
||||
def test_split_domainname_both(self):
|
||||
service = Service('foo',
|
||||
hostname = 'name',
|
||||
domainname = 'domain.tld',
|
||||
)
|
||||
service.next_container_name = lambda x: 'foo'
|
||||
opts = service._get_container_create_options({})
|
||||
self.assertEqual(opts['hostname'], 'name', 'hostname')
|
||||
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
|
||||
|
||||
def test_split_domainname_weird(self):
|
||||
service = Service('foo',
|
||||
hostname = 'name.sub',
|
||||
domainname = 'domain.tld',
|
||||
)
|
||||
service.next_container_name = lambda x: 'foo'
|
||||
opts = service._get_container_create_options({})
|
||||
self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
|
||||
self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
|
||||
@@ -1,5 +1,5 @@
|
||||
from fig.project import sort_service_dicts, DependencyError
|
||||
from . import unittest
|
||||
from .. import unittest
|
||||
|
||||
|
||||
class SortServiceTest(unittest.TestCase):
|
||||
52
tests/unit/split_buffer_test.py
Normal file
52
tests/unit/split_buffer_test.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig.cli.utils import split_buffer
|
||||
from .. import unittest
|
||||
|
||||
class SplitBufferTest(unittest.TestCase):
|
||||
def test_single_line_chunks(self):
|
||||
def reader():
|
||||
yield b'abc\n'
|
||||
yield b'def\n'
|
||||
yield b'ghi\n'
|
||||
|
||||
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi\n'])
|
||||
|
||||
def test_no_end_separator(self):
|
||||
def reader():
|
||||
yield b'abc\n'
|
||||
yield b'def\n'
|
||||
yield b'ghi'
|
||||
|
||||
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi'])
|
||||
|
||||
def test_multiple_line_chunk(self):
|
||||
def reader():
|
||||
yield b'abc\ndef\nghi'
|
||||
|
||||
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi'])
|
||||
|
||||
def test_chunked_line(self):
|
||||
def reader():
|
||||
yield b'a'
|
||||
yield b'b'
|
||||
yield b'c'
|
||||
yield b'\n'
|
||||
yield b'd'
|
||||
|
||||
self.assert_produces(reader, [b'abc\n', b'd'])
|
||||
|
||||
def test_preserves_unicode_sequences_within_lines(self):
|
||||
string = u"a\u2022c\n".encode('utf-8')
|
||||
|
||||
def reader():
|
||||
yield string
|
||||
|
||||
self.assert_produces(reader, [string])
|
||||
|
||||
def assert_produces(self, reader, expectations):
|
||||
split = split_buffer(reader(), b'\n')
|
||||
|
||||
for (actual, expected) in zip(split, expectations):
|
||||
self.assertEqual(type(actual), type(expected))
|
||||
self.assertEqual(actual, expected)
|
||||
Reference in New Issue
Block a user