mirror of
https://github.com/docker/compose.git
synced 2026-02-11 11:09:23 +08:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6dc1a404d5 | ||
|
|
d1d4f47764 | ||
|
|
3abce4259f | ||
|
|
c78b952c3d | ||
|
|
fff5e51426 | ||
|
|
a724aa5717 | ||
|
|
10725136d8 | ||
|
|
8f1793dd06 | ||
|
|
fd85be2c9e | ||
|
|
24959209cc | ||
|
|
29c9763feb | ||
|
|
ca7151aeb1 | ||
|
|
6e932794f7 | ||
|
|
9e1dfcfb37 | ||
|
|
5166b2c1a8 | ||
|
|
80991f1521 | ||
|
|
b752a86589 | ||
|
|
c2acceba4e | ||
|
|
f8ee52ca2a | ||
|
|
2b245bdf9e | ||
|
|
d7e01a23f8 | ||
|
|
4e20be9c66 | ||
|
|
94e15a9985 | ||
|
|
5061875fa9 | ||
|
|
d9782b2dd1 | ||
|
|
530afba5cb | ||
|
|
4a90a7691b | ||
|
|
050f81e37c | ||
|
|
aecaf665f1 | ||
|
|
23a8938809 | ||
|
|
641c773476 | ||
|
|
97be5b4cfb | ||
|
|
76b7d0095d | ||
|
|
352ad7a38c | ||
|
|
401ea4e7a8 | ||
|
|
710cd38591 | ||
|
|
859d4bb98b | ||
|
|
a3374ac51d | ||
|
|
168b1909ae | ||
|
|
a96ab41739 | ||
|
|
0f5a56b3c2 | ||
|
|
c1d9e5156f | ||
|
|
b0ec54b6f7 | ||
|
|
2360bf0eb9 | ||
|
|
1a1a61a672 | ||
|
|
13c7380113 | ||
|
|
3a48172332 | ||
|
|
14e778b3de |
25
.travis.yml
25
.travis.yml
@@ -3,17 +3,22 @@ python:
|
||||
- '2.6'
|
||||
- '2.7'
|
||||
env:
|
||||
- DOCKER_VERSION=0.8.0
|
||||
- DOCKER_VERSION=0.8.1
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: '3.2'
|
||||
- python: '3.3'
|
||||
install: script/travis-install
|
||||
global:
|
||||
- secure: exbot0LTV/0Wic6ElKCrOZmh2ZrieuGwEqfYKf5rVuwu1sLngYRihh+lBL/hTwc79NSu829pbwiWfsQZrXbk/yvaS7avGR0CLDoipyPxlYa2/rfs/o4OdTZqXv0LcFmmd54j5QBMpWU1S+CYOwNkwas57trrvIpPbzWjMtfYzOU=
|
||||
install:
|
||||
- pip install .
|
||||
- pip install -r requirements.txt
|
||||
- pip install -r requirements-dev.txt
|
||||
- sudo curl -L -o /usr/local/bin/orchard https://github.com/orchardup/go-orchard/releases/download/2.0.5/linux
|
||||
- sudo chmod +x /usr/local/bin/orchard
|
||||
before_script:
|
||||
- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && orchard hosts rm -f $TRAVIS_JOB_ID'
|
||||
- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && orchard hosts create $TRAVIS_JOB_ID || false'
|
||||
script:
|
||||
- pwd
|
||||
- env
|
||||
- sekexe/run "`pwd`/script/travis $TRAVIS_PYTHON_VERSION"
|
||||
- nosetests tests/unit
|
||||
- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && script/travis-integration || false'
|
||||
after_script:
|
||||
- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && orchard hosts rm -f $TRAVIS_JOB_ID || false'
|
||||
deploy:
|
||||
provider: pypi
|
||||
user: orchard
|
||||
|
||||
12
CHANGES.md
12
CHANGES.md
@@ -1,6 +1,18 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
0.4.0 (2014-04-29)
|
||||
------------------
|
||||
|
||||
- Support Docker 0.9 and 0.10
|
||||
- Display progress bars correctly when pulling images (no more ski slopes)
|
||||
- `fig up` now stops all services when any container exits
|
||||
- Added support for the `privileged` config option in fig.yml (thanks @kvz!)
|
||||
- Shortened and aligned log prefixes in `fig up` output
|
||||
- Only containers started with `fig run` link back to their own service
|
||||
- Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!)
|
||||
- Error message improvements
|
||||
|
||||
0.3.2 (2014-03-05)
|
||||
------------------
|
||||
|
||||
|
||||
30
CONTRIBUTING.md
Normal file
30
CONTRIBUTING.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Contributing to Fig
|
||||
|
||||
If you're looking contribute to [Fig](http://orchardup.github.io/fig/)
|
||||
but you're new to the project or maybe even to Python, here are the steps
|
||||
that should get you started.
|
||||
|
||||
1. Fork [https://github.com/orchardup/fig](https://github.com/orchardup/fig) to your username. kvz in this example.
|
||||
1. Clone your forked repository locally `git clone git@github.com:kvz/fig.git`.
|
||||
1. Enter the local directory `cd fig`.
|
||||
1. Set up a development environment `python setup.py develop`. That will install the dependencies and set up a symlink from your `fig` executable to the checkout of the repo. So from any of your fig projects, `fig` now refers to your development project. Time to start hacking : )
|
||||
1. Works for you? Run the test suite via `./scripts/test` to verify it won't break other usecases.
|
||||
1. All good? Commit and push to GitHub, and submit a pull request.
|
||||
|
||||
## Running the test suite
|
||||
|
||||
$ script/test
|
||||
|
||||
## Building binaries
|
||||
|
||||
Linux:
|
||||
|
||||
$ script/build-linux
|
||||
|
||||
OS X:
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
Note that this only works on Mountain Lion, not Mavericks, due to a [bug in PyInstaller](http://www.pyinstaller.org/ticket/807).
|
||||
|
||||
|
||||
28
README.md
28
README.md
@@ -1,7 +1,7 @@
|
||||
Fig
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/orchardup/fig)
|
||||
[](https://travis-ci.org/orchardup/fig)
|
||||
[](http://badge.fury.io/py/fig)
|
||||
|
||||
Fast, isolated development environments using Docker.
|
||||
@@ -47,29 +47,3 @@ Installation and documentation
|
||||
------------------------------
|
||||
|
||||
Full documentation is available on [Fig's website](http://orchardup.github.io/fig/).
|
||||
|
||||
Running the test suite
|
||||
----------------------
|
||||
|
||||
$ script/test
|
||||
|
||||
Building OS X binaries
|
||||
---------------------
|
||||
|
||||
$ script/build-osx
|
||||
|
||||
Note that this only works on Mountain Lion, not Mavericks, due to a [bug in PyInstaller](http://www.pyinstaller.org/ticket/807).
|
||||
|
||||
Contributing to Fig
|
||||
-------------------
|
||||
|
||||
If you're looking contribute to [Fig](http://orchardup.github.io/fig/)
|
||||
but you're new to the project or maybe even to Python, here are the steps
|
||||
that should get you started.
|
||||
|
||||
1. Fork [https://github.com/orchardup/fig](https://github.com/orchardup/fig) to your username. kvz in this example.
|
||||
1. Clone your forked repository locally `git clone git@github.com:kvz/fig.git`.
|
||||
1. Enter the local directory `cd fig`.
|
||||
1. Set up a development environment `python setup.py develop`. That will install the dependencies and set up a symlink from your `fig` executable to the checkout of the repo. So from any of your fig projects, `fig` now refers to your development project. Time to start hacking : )
|
||||
1. Works for you? Run the test suite via `./scripts/test` to verify it won't break other usecases.
|
||||
1. All good? Commit and push to GitHub, and submit a pull request.
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
markdown: redcarpet
|
||||
encoding: utf-8
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ Simple enough. Finally, this is all tied together with a file called `fig.yml`.
|
||||
links:
|
||||
- db
|
||||
|
||||
See the [`fig.yml` reference]() for more information on how it works.
|
||||
See the [`fig.yml` reference](http://orchardup.github.io/fig/yml.html) for more information on how it works.
|
||||
|
||||
We can now start a Django project using `fig run`:
|
||||
|
||||
|
||||
@@ -47,19 +47,7 @@ Quick start
|
||||
|
||||
Let's get a basic Python web app running on Fig. It assumes a little knowledge of Python, but the concepts should be clear if you're not familiar with it.
|
||||
|
||||
First, install Docker. If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
|
||||
$ curl https://raw.github.com/noplay/docker-osx/0.7.6/docker-osx > /usr/local/bin/docker-osx
|
||||
$ chmod +x /usr/local/bin/docker-osx
|
||||
$ docker-osx shell
|
||||
|
||||
Docker has guides for [Ubuntu](http://docs.docker.io/en/latest/installation/ubuntulinux/) and [other platforms](http://docs.docker.io/en/latest/installation/) in their documentation.
|
||||
|
||||
Next, install Fig:
|
||||
|
||||
$ sudo pip install -U fig
|
||||
|
||||
(This command also upgrades Fig when we release a new version. If you don’t have pip installed, try `brew install python` or `apt-get install python-pip`.)
|
||||
First, [install Docker and Fig](install.html).
|
||||
|
||||
You'll want to make a directory for the project:
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ title: Installing Fig
|
||||
Installing Fig
|
||||
==============
|
||||
|
||||
First, install Docker (version 0.8 or higher). If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
First, install Docker version 0.10.0. If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
|
||||
$ curl https://raw.github.com/noplay/docker-osx/0.8.0/docker-osx > /usr/local/bin/docker-osx
|
||||
$ curl https://raw.github.com/noplay/docker-osx/0.10.0/docker-osx > /usr/local/bin/docker-osx
|
||||
$ chmod +x /usr/local/bin/docker-osx
|
||||
$ docker-osx shell
|
||||
|
||||
@@ -16,12 +16,12 @@ Docker has guides for [Ubuntu](http://docs.docker.io/en/latest/installation/ubun
|
||||
|
||||
Next, install Fig. On OS X:
|
||||
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.1/darwin > /usr/local/bin/fig
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.2/darwin > /usr/local/bin/fig
|
||||
$ chmod +x /usr/local/bin/fig
|
||||
|
||||
On 64-bit Linux:
|
||||
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.1/linux > /usr/local/bin/fig
|
||||
$ curl -L https://github.com/orchardup/fig/releases/download/0.3.2/linux > /usr/local/bin/fig
|
||||
$ chmod +x /usr/local/bin/fig
|
||||
|
||||
Fig is also available as a Python package if you're on another platform (or if you prefer that sort of thing):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from .service import Service
|
||||
|
||||
__version__ = '0.3.2'
|
||||
__version__ = '0.4.0'
|
||||
|
||||
@@ -4,7 +4,7 @@ import sys
|
||||
|
||||
from itertools import cycle
|
||||
|
||||
from .multiplexer import Multiplexer
|
||||
from .multiplexer import Multiplexer, STOP
|
||||
from . import colors
|
||||
from .utils import split_buffer
|
||||
|
||||
@@ -13,12 +13,26 @@ class LogPrinter(object):
|
||||
def __init__(self, containers, attach_params=None):
|
||||
self.containers = containers
|
||||
self.attach_params = attach_params or {}
|
||||
self.prefix_width = self._calculate_prefix_width(containers)
|
||||
self.generators = self._make_log_generators()
|
||||
|
||||
def run(self):
|
||||
mux = Multiplexer(self.generators)
|
||||
for line in mux.loop():
|
||||
sys.stdout.write(line)
|
||||
sys.stdout.write(line.encode(sys.__stdout__.encoding or 'utf-8'))
|
||||
|
||||
def _calculate_prefix_width(self, containers):
|
||||
"""
|
||||
Calculate the maximum width of container names so we can make the log
|
||||
prefixes line up like so:
|
||||
|
||||
db_1 | Listening
|
||||
web_1 | Listening
|
||||
"""
|
||||
prefix_width = 0
|
||||
for container in containers:
|
||||
prefix_width = max(prefix_width, len(container.name_without_project))
|
||||
return prefix_width
|
||||
|
||||
def _make_log_generators(self):
|
||||
color_fns = cycle(colors.rainbow())
|
||||
@@ -31,10 +45,24 @@ class LogPrinter(object):
|
||||
return generators
|
||||
|
||||
def _make_log_generator(self, container, color_fn):
|
||||
prefix = color_fn(container.name + " | ")
|
||||
prefix = color_fn(self._generate_prefix(container))
|
||||
# Attach to container before log printer starts running
|
||||
line_generator = split_buffer(self._attach(container), '\n')
|
||||
return (prefix + line.decode('utf-8') for line in line_generator)
|
||||
|
||||
for line in line_generator:
|
||||
yield prefix + line.decode('utf-8')
|
||||
|
||||
exit_code = container.wait()
|
||||
yield color_fn("%s exited with code %s\n" % (container.name, exit_code))
|
||||
yield STOP
|
||||
|
||||
def _generate_prefix(self, container):
|
||||
"""
|
||||
Generate the prefix for a log line without colour
|
||||
"""
|
||||
name = container.name_without_project
|
||||
padding = ' ' * (self.prefix_width - len(name))
|
||||
return ''.join([name, padding, ' | '])
|
||||
|
||||
def _attach(self, container):
|
||||
params = {
|
||||
|
||||
@@ -9,13 +9,13 @@ from inspect import getdoc
|
||||
|
||||
from .. import __version__
|
||||
from ..project import NoSuchService, ConfigurationError
|
||||
from ..service import CannotBeScaledError
|
||||
from ..service import BuildError, CannotBeScaledError
|
||||
from .command import Command
|
||||
from .formatter import Formatter
|
||||
from .log_printer import LogPrinter
|
||||
from .utils import yesno
|
||||
|
||||
from ..packages.docker.client import APIError
|
||||
from ..packages.docker.errors import APIError
|
||||
from .errors import UserError
|
||||
from .docopt_command import NoSuchCommand
|
||||
from .socketclient import SocketClient
|
||||
@@ -51,6 +51,9 @@ def main():
|
||||
except APIError as e:
|
||||
log.error(e.explanation)
|
||||
sys.exit(1)
|
||||
except BuildError as e:
|
||||
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# stolen from docopt master
|
||||
@@ -224,11 +227,11 @@ class TopLevelCommand(Command):
|
||||
}
|
||||
container = service.create_container(one_off=True, **container_options)
|
||||
if options['-d']:
|
||||
service.start_container(container, ports=None)
|
||||
service.start_container(container, ports=None, one_off=True)
|
||||
print(container.name)
|
||||
else:
|
||||
with self._attach_to_container(container.id, raw=tty) as c:
|
||||
service.start_container(container, ports=None)
|
||||
service.start_container(container, ports=None, one_off=True)
|
||||
c.run()
|
||||
if options['--rm']:
|
||||
container.wait()
|
||||
@@ -298,20 +301,12 @@ class TopLevelCommand(Command):
|
||||
"""
|
||||
detached = options['-d']
|
||||
|
||||
(old, new) = self.project.recreate_containers(service_names=options['SERVICE'])
|
||||
to_attach = self.project.up(service_names=options['SERVICE'])
|
||||
|
||||
if not detached:
|
||||
to_attach = [c for (s, c) in new]
|
||||
print("Attaching to", list_containers(to_attach))
|
||||
log_printer = LogPrinter(to_attach, attach_params={"logs": True})
|
||||
|
||||
for (service, container) in new:
|
||||
service.start_container(container)
|
||||
|
||||
for (service, container) in old:
|
||||
container.remove()
|
||||
|
||||
if not detached:
|
||||
try:
|
||||
log_printer.run()
|
||||
finally:
|
||||
|
||||
@@ -7,6 +7,11 @@ except ImportError:
|
||||
from queue import Queue, Empty # Python 3.x
|
||||
|
||||
|
||||
# Yield STOP from an input generator to stop the
|
||||
# top-level loop without processing any more input.
|
||||
STOP = object()
|
||||
|
||||
|
||||
class Multiplexer(object):
|
||||
def __init__(self, generators):
|
||||
self.generators = generators
|
||||
@@ -17,7 +22,11 @@ class Multiplexer(object):
|
||||
|
||||
while True:
|
||||
try:
|
||||
yield self.queue.get(timeout=0.1)
|
||||
item = self.queue.get(timeout=0.1)
|
||||
if item is STOP:
|
||||
break
|
||||
else:
|
||||
yield item
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ class SocketClient:
|
||||
chunk = socket.recv(4096)
|
||||
|
||||
if chunk:
|
||||
stream.write(chunk)
|
||||
stream.write(chunk.encode(stream.encoding or 'utf-8'))
|
||||
stream.flush()
|
||||
else:
|
||||
break
|
||||
|
||||
@@ -3,10 +3,8 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
import datetime
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import platform
|
||||
from .errors import UserError
|
||||
|
||||
|
||||
def cached_property(f):
|
||||
|
||||
@@ -12,4 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .client import Client, APIError # flake8: noqa
|
||||
__title__ = 'docker-py'
|
||||
__version__ = '0.3.0'
|
||||
|
||||
from .client import Client # flake8: noqa
|
||||
|
||||
@@ -20,6 +20,7 @@ import os
|
||||
from fig.packages import six
|
||||
|
||||
from ..utils import utils
|
||||
from .. import errors
|
||||
|
||||
INDEX_URL = 'https://index.docker.io/v1/'
|
||||
DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
@@ -45,18 +46,19 @@ def expand_registry_url(hostname):
|
||||
|
||||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise ValueError('Repository name cannot contain a '
|
||||
'scheme ({0})'.format(repo_name))
|
||||
raise errors.InvalidRepository(
|
||||
'Repository name cannot contain a scheme ({0})'.format(repo_name))
|
||||
parts = repo_name.split('/', 1)
|
||||
if not '.' in parts[0] and not ':' in parts[0] and parts[0] != 'localhost':
|
||||
if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
|
||||
# This is a docker index repo (ex: foo/bar or ubuntu)
|
||||
return INDEX_URL, repo_name
|
||||
if len(parts) < 2:
|
||||
raise ValueError('Invalid repository name ({0})'.format(repo_name))
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid repository name ({0})'.format(repo_name))
|
||||
|
||||
if 'index.docker.io' in parts[0]:
|
||||
raise ValueError('Invalid repository name,'
|
||||
'try "{0}" instead'.format(parts[1]))
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid repository name, try "{0}" instead'.format(parts[1]))
|
||||
|
||||
return expand_registry_url(parts[0]), parts[1]
|
||||
|
||||
@@ -87,6 +89,11 @@ def resolve_authconfig(authconfig, registry=None):
|
||||
return authconfig.get(swap_protocol(registry), None)
|
||||
|
||||
|
||||
def encode_auth(auth_info):
|
||||
return base64.b64encode(auth_info.get('username', '') + b':' +
|
||||
auth_info.get('password', ''))
|
||||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, six.string_types):
|
||||
auth = auth.encode('ascii')
|
||||
@@ -100,6 +107,12 @@ def encode_header(auth):
|
||||
return base64.b64encode(auth_json)
|
||||
|
||||
|
||||
def encode_full_header(auth):
|
||||
""" Returns the given auth block encoded for the X-Registry-Config header.
|
||||
"""
|
||||
return encode_header({'configs': auth})
|
||||
|
||||
|
||||
def load_config(root=None):
|
||||
"""Loads authentication data from a Docker configuration file in the given
|
||||
root directory."""
|
||||
@@ -136,7 +149,8 @@ def load_config(root=None):
|
||||
data.append(line.strip().split(' = ')[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise Exception('Invalid or empty configuration file!')
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid or empty configuration file!')
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
conf[INDEX_URL] = {
|
||||
|
||||
@@ -24,48 +24,18 @@ from fig.packages import six
|
||||
from .auth import auth
|
||||
from .unixconn import unixconn
|
||||
from .utils import utils
|
||||
from . import errors
|
||||
|
||||
if not six.PY3:
|
||||
import websocket
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.9'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
super(APIError, self).__init__(message, response=response)
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '%s Client Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '%s Server Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '%s ("%s")' % (message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class Client(requests.Session):
|
||||
def __init__(self, base_url=None, version="1.6",
|
||||
def __init__(self, base_url=None, version=DEFAULT_DOCKER_API_VERSION,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS):
|
||||
super(Client, self).__init__()
|
||||
if base_url is None:
|
||||
@@ -108,7 +78,7 @@ class Client(requests.Session):
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise APIError(e, response, explanation=explanation)
|
||||
raise errors.APIError(e, response, explanation=explanation)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
@@ -125,7 +95,7 @@ class Client(requests.Session):
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False, entrypoint=None,
|
||||
cpu_shares=None, working_dir=None):
|
||||
cpu_shares=None, working_dir=None, domainname=None):
|
||||
if isinstance(command, six.string_types):
|
||||
command = shlex.split(str(command))
|
||||
if isinstance(environment, dict):
|
||||
@@ -133,7 +103,7 @@ class Client(requests.Session):
|
||||
'{0}={1}'.format(k, v) for k, v in environment.items()
|
||||
]
|
||||
|
||||
if ports and isinstance(ports, list):
|
||||
if isinstance(ports, list):
|
||||
exposed_ports = {}
|
||||
for port_definition in ports:
|
||||
port = port_definition
|
||||
@@ -145,12 +115,15 @@ class Client(requests.Session):
|
||||
exposed_ports['{0}/{1}'.format(port, proto)] = {}
|
||||
ports = exposed_ports
|
||||
|
||||
if volumes and isinstance(volumes, list):
|
||||
if isinstance(volumes, list):
|
||||
volumes_dict = {}
|
||||
for vol in volumes:
|
||||
volumes_dict[vol] = {}
|
||||
volumes = volumes_dict
|
||||
|
||||
if volumes_from and not isinstance(volumes_from, six.string_types):
|
||||
volumes_from = ','.join(volumes_from)
|
||||
|
||||
attach_stdin = False
|
||||
attach_stdout = False
|
||||
attach_stderr = False
|
||||
@@ -165,26 +138,27 @@ class Client(requests.Session):
|
||||
stdin_once = True
|
||||
|
||||
return {
|
||||
'Hostname': hostname,
|
||||
'Hostname': hostname,
|
||||
'Domainname': domainname,
|
||||
'ExposedPorts': ports,
|
||||
'User': user,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'StdinOnce': stdin_once,
|
||||
'Memory': mem_limit,
|
||||
'AttachStdin': attach_stdin,
|
||||
'User': user,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'StdinOnce': stdin_once,
|
||||
'Memory': mem_limit,
|
||||
'AttachStdin': attach_stdin,
|
||||
'AttachStdout': attach_stdout,
|
||||
'AttachStderr': attach_stderr,
|
||||
'Env': environment,
|
||||
'Cmd': command,
|
||||
'Dns': dns,
|
||||
'Image': image,
|
||||
'Volumes': volumes,
|
||||
'VolumesFrom': volumes_from,
|
||||
'Env': environment,
|
||||
'Cmd': command,
|
||||
'Dns': dns,
|
||||
'Image': image,
|
||||
'Volumes': volumes,
|
||||
'VolumesFrom': volumes_from,
|
||||
'NetworkDisabled': network_disabled,
|
||||
'Entrypoint': entrypoint,
|
||||
'CpuShares': cpu_shares,
|
||||
'WorkingDir': working_dir
|
||||
'Entrypoint': entrypoint,
|
||||
'CpuShares': cpu_shares,
|
||||
'WorkingDir': working_dir
|
||||
}
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
@@ -222,25 +196,26 @@ class Client(requests.Session):
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
|
||||
def _stream_result(self, response):
|
||||
"""Generator for straight-out, non chunked-encoded HTTP responses."""
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1, decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line + '\n'
|
||||
|
||||
def _stream_result_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
return response.raw._fp.fp._sock
|
||||
if six.PY3:
|
||||
return response.raw._fp.fp.raw._sock
|
||||
else:
|
||||
return response.raw._fp.fp._sock
|
||||
|
||||
def _stream_helper(self, response):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
socket_fp = self._stream_result_socket(response)
|
||||
socket_fp = self._get_raw_response_socket(response)
|
||||
socket_fp.setblocking(1)
|
||||
socket = socket_fp.makefile()
|
||||
while True:
|
||||
size = int(socket.readline(), 16)
|
||||
# Because Docker introduced newlines at the end of chunks in v0.9,
|
||||
# and only on some API endpoints, we have to cater for both cases.
|
||||
size_line = socket.readline()
|
||||
if size_line == '\r\n':
|
||||
size_line = socket.readline()
|
||||
|
||||
size = int(size_line, 16)
|
||||
if size <= 0:
|
||||
break
|
||||
data = socket.readline()
|
||||
@@ -265,17 +240,20 @@ class Client(requests.Session):
|
||||
def _multiplexed_socket_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
socket."""
|
||||
socket = self._stream_result_socket(response)
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
def recvall(socket, size):
|
||||
data = ''
|
||||
blocks = []
|
||||
while size > 0:
|
||||
block = socket.recv(size)
|
||||
if not block:
|
||||
return None
|
||||
|
||||
data += block
|
||||
blocks.append(block)
|
||||
size -= len(block)
|
||||
|
||||
sep = bytes() if six.PY3 else str()
|
||||
data = sep.join(blocks)
|
||||
return data
|
||||
|
||||
while True:
|
||||
@@ -304,9 +282,18 @@ class Client(requests.Session):
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
response = self._post(u, params=params, stream=stream)
|
||||
|
||||
# Stream multi-plexing was introduced in API v1.6.
|
||||
# Stream multi-plexing was only introduced in API v1.6. Anything before
|
||||
# that needs old-style streaming.
|
||||
if utils.compare_version('1.6', self._version) < 0:
|
||||
return stream and self._stream_result(response) or \
|
||||
def stream_result():
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1,
|
||||
decode_unicode=True):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line
|
||||
|
||||
return stream_result() if stream else \
|
||||
self._result(response, binary=True)
|
||||
|
||||
return stream and self._multiplexed_socket_stream_helper(response) or \
|
||||
@@ -319,20 +306,22 @@ class Client(requests.Session):
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
if ws:
|
||||
return self._attach_websocket(container, params)
|
||||
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
return self._stream_result_socket(self.post(
|
||||
return self._get_raw_response_socket(self.post(
|
||||
u, None, params=self._attach_params(params), stream=True))
|
||||
|
||||
def build(self, path=None, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, stream=False, timeout=None):
|
||||
remote = context = headers = None
|
||||
if path is None and fileobj is None:
|
||||
raise Exception("Either path or fileobj needs to be provided.")
|
||||
raise TypeError("Either path or fileobj needs to be provided.")
|
||||
|
||||
if fileobj is not None:
|
||||
context = utils.mkbuildcontext(fileobj)
|
||||
@@ -341,6 +330,9 @@ class Client(requests.Session):
|
||||
else:
|
||||
context = utils.tar(path)
|
||||
|
||||
if utils.compare_version('1.8', self._version) >= 0:
|
||||
stream = True
|
||||
|
||||
u = self._url('/build')
|
||||
params = {
|
||||
't': tag,
|
||||
@@ -352,6 +344,19 @@ class Client(requests.Session):
|
||||
if context is not None:
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
|
||||
if utils.compare_version('1.9', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
# Send the full auth configuration (if any exists), since the build
|
||||
# could use any (or all) of the registries.
|
||||
if self._auth_configs:
|
||||
headers['X-Registry-Config'] = auth.encode_full_header(
|
||||
self._auth_configs
|
||||
)
|
||||
|
||||
response = self._post(
|
||||
u,
|
||||
data=context,
|
||||
@@ -363,8 +368,9 @@ class Client(requests.Session):
|
||||
|
||||
if context is not None:
|
||||
context.close()
|
||||
|
||||
if stream:
|
||||
return self._stream_result(response)
|
||||
return self._stream_helper(response)
|
||||
else:
|
||||
output = self._result(response)
|
||||
srch = r'Successfully built ([0-9a-f]+)'
|
||||
@@ -403,6 +409,8 @@ class Client(requests.Session):
|
||||
return res
|
||||
|
||||
def copy(self, container, resource):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
res = self._post_json(
|
||||
self._url("/containers/{0}/copy".format(container)),
|
||||
data={"Resource": resource},
|
||||
@@ -416,12 +424,12 @@ class Client(requests.Session):
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False, name=None, entrypoint=None,
|
||||
cpu_shares=None, working_dir=None):
|
||||
cpu_shares=None, working_dir=None, domainname=None):
|
||||
|
||||
config = self._container_config(
|
||||
image, command, hostname, user, detach, stdin_open, tty, mem_limit,
|
||||
ports, environment, dns, volumes, volumes_from, network_disabled,
|
||||
entrypoint, cpu_shares, working_dir
|
||||
entrypoint, cpu_shares, working_dir, domainname
|
||||
)
|
||||
return self.create_container_from_config(config, name)
|
||||
|
||||
@@ -440,21 +448,7 @@ class Client(requests.Session):
|
||||
format(container))), True)
|
||||
|
||||
def events(self):
|
||||
u = self._url("/events")
|
||||
|
||||
socket = self._stream_result_socket(self.get(u, stream=True))
|
||||
|
||||
while True:
|
||||
chunk = socket.recv(4096)
|
||||
if chunk:
|
||||
# Messages come in the format of length, data, newline.
|
||||
length, data = chunk.split("\n", 1)
|
||||
length = int(length, 16)
|
||||
if length > len(data):
|
||||
data += socket.recv(length - len(data))
|
||||
yield json.loads(data)
|
||||
else:
|
||||
break
|
||||
return self._stream_helper(self.get(self._url('/events'), stream=True))
|
||||
|
||||
def export(self, container):
|
||||
if isinstance(container, dict):
|
||||
@@ -471,6 +465,8 @@ class Client(requests.Session):
|
||||
|
||||
def images(self, name=None, quiet=False, all=False, viz=False):
|
||||
if viz:
|
||||
if utils.compare_version('1.7', self._version) >= 0:
|
||||
raise Exception('Viz output is not supported in API >= 1.7!')
|
||||
return self._result(self._get(self._url("images/viz")))
|
||||
params = {
|
||||
'filter': name,
|
||||
@@ -618,7 +614,7 @@ class Client(requests.Session):
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# Do not fail here if no authentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
@@ -644,7 +640,7 @@ class Client(requests.Session):
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# Do not fail here if no authentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
@@ -652,7 +648,7 @@ class Client(requests.Session):
|
||||
|
||||
response = self._post_json(u, None, headers=headers, stream=stream)
|
||||
else:
|
||||
response = self._post_json(u, authcfg, stream=stream)
|
||||
response = self._post_json(u, None, stream=stream)
|
||||
|
||||
return stream and self._stream_helper(response) \
|
||||
or self._result(response)
|
||||
@@ -682,8 +678,8 @@ class Client(requests.Session):
|
||||
params={'term': term}),
|
||||
True)
|
||||
|
||||
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
|
||||
publish_all_ports=False, links=None, privileged=False):
|
||||
def start(self, container, binds=None, volumes_from=None, port_bindings=None,
|
||||
lxc_conf=None, publish_all_ports=False, links=None, privileged=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
@@ -698,10 +694,19 @@ class Client(requests.Session):
|
||||
}
|
||||
if binds:
|
||||
bind_pairs = [
|
||||
'{0}:{1}'.format(host, dest) for host, dest in binds.items()
|
||||
'%s:%s:%s' % (
|
||||
h, d['bind'],
|
||||
'ro' if 'ro' in d and d['ro'] else 'rw'
|
||||
) for h, d in binds.items()
|
||||
]
|
||||
|
||||
start_config['Binds'] = bind_pairs
|
||||
|
||||
if volumes_from and not isinstance(volumes_from, six.string_types):
|
||||
volumes_from = ','.join(volumes_from)
|
||||
|
||||
start_config['VolumesFrom'] = volumes_from
|
||||
|
||||
if port_bindings:
|
||||
start_config['PortBindings'] = utils.convert_port_bindings(
|
||||
port_bindings
|
||||
|
||||
61
fig/packages/docker/errors.py
Normal file
61
fig/packages/docker/errors.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# Copyright 2014 dotCloud inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 doesn't
|
||||
super(APIError, self).__init__(message)
|
||||
self.response = response
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '%s Client Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '%s Server Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '%s ("%s")' % (message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
@@ -40,7 +40,7 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
|
||||
self.sock = sock
|
||||
|
||||
def _extract_path(self, url):
|
||||
#remove the base_url entirely..
|
||||
# remove the base_url entirely..
|
||||
return url.replace(self.base_url, "")
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .utils import (
|
||||
compare_version, convert_port_bindings, mkbuildcontext, ping, tar
|
||||
compare_version, convert_port_bindings, mkbuildcontext, ping, tar, parse_repository_tag
|
||||
) # flake8: noqa
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
import io
|
||||
import tarfile
|
||||
import tempfile
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
import requests
|
||||
from fig.packages import six
|
||||
@@ -51,15 +52,34 @@ def tar(path):
|
||||
|
||||
|
||||
def compare_version(v1, v2):
|
||||
return float(v2) - float(v1)
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
elif s1 > s2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def ping(url):
|
||||
try:
|
||||
res = requests.get(url)
|
||||
return res.status >= 400
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return res.status_code < 400
|
||||
|
||||
|
||||
def _convert_port_binding(binding):
|
||||
@@ -94,3 +114,15 @@ def convert_port_bindings(port_bindings):
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo):
|
||||
column_index = repo.rfind(':')
|
||||
if column_index < 0:
|
||||
return repo, ""
|
||||
tag = repo[column_index+1:]
|
||||
slash_index = tag.find('/')
|
||||
if slash_index < 0:
|
||||
return repo[:column_index], tag
|
||||
|
||||
return repo, ""
|
||||
|
||||
@@ -58,7 +58,11 @@ class Project(object):
|
||||
service_name, link_name = link.split(':', 1)
|
||||
else:
|
||||
service_name, link_name = link, None
|
||||
links.append((project.get_service(service_name), link_name))
|
||||
try:
|
||||
links.append((project.get_service(service_name), link_name))
|
||||
except NoSuchService:
|
||||
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
|
||||
|
||||
del service_dict['links']
|
||||
project.services.append(Service(client=client, project=name, links=links, **service_dict))
|
||||
return project
|
||||
@@ -101,23 +105,6 @@ class Project(object):
|
||||
unsorted = [self.get_service(name) for name in service_names]
|
||||
return [s for s in self.services if s in unsorted]
|
||||
|
||||
def recreate_containers(self, service_names=None):
|
||||
"""
|
||||
For each service, create or recreate their containers.
|
||||
Returns a tuple with two lists. The first is a list of
|
||||
(service, old_container) tuples; the second is a list
|
||||
of (service, new_container) tuples.
|
||||
"""
|
||||
old = []
|
||||
new = []
|
||||
|
||||
for service in self.get_services(service_names):
|
||||
(s_old, s_new) = service.recreate_containers()
|
||||
old += [(service, container) for container in s_old]
|
||||
new += [(service, container) for container in s_new]
|
||||
|
||||
return (old, new)
|
||||
|
||||
def start(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
service.start(**options)
|
||||
@@ -137,6 +124,15 @@ class Project(object):
|
||||
else:
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
def up(self, service_names=None):
|
||||
new_containers = []
|
||||
|
||||
for service in self.get_services(service_names):
|
||||
for (_, new) in service.recreate_containers():
|
||||
new_containers.append(new)
|
||||
|
||||
return new_containers
|
||||
|
||||
def remove_stopped(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
service.remove_stopped(**options)
|
||||
|
||||
175
fig/service.py
175
fig/service.py
@@ -1,25 +1,31 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .packages.docker.client import APIError
|
||||
from .packages.docker.errors import APIError
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from .container import Container
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'volumes_from', 'entrypoint']
|
||||
DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'volumes_from', 'entrypoint', 'privileged']
|
||||
DOCKER_CONFIG_HINTS = {
|
||||
'link': 'links',
|
||||
'port': 'ports',
|
||||
'volume': 'volumes',
|
||||
'link' : 'links',
|
||||
'port' : 'ports',
|
||||
'privilege' : 'privileged',
|
||||
'priviliged': 'privileged',
|
||||
'privilige' : 'privileged',
|
||||
'volume' : 'volumes',
|
||||
}
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
pass
|
||||
def __init__(self, service, reason):
|
||||
self.service = service
|
||||
self.reason = reason
|
||||
|
||||
|
||||
class CannotBeScaledError(Exception):
|
||||
@@ -82,6 +88,14 @@ class Service(object):
|
||||
c.kill(**options)
|
||||
|
||||
def scale(self, desired_num):
|
||||
"""
|
||||
Adjusts the number of containers to the specified number and ensures they are running.
|
||||
|
||||
- creates containers until there are at least `desired_num`
|
||||
- stops containers until there are at most `desired_num` running
|
||||
- starts containers until there are at least `desired_num` running
|
||||
- removes all stopped containers
|
||||
"""
|
||||
if not self.can_be_scaled():
|
||||
raise CannotBeScaledError()
|
||||
|
||||
@@ -114,6 +128,8 @@ class Service(object):
|
||||
self.start_container(c)
|
||||
running_containers.append(c)
|
||||
|
||||
self.remove_stopped()
|
||||
|
||||
|
||||
def remove_stopped(self, **options):
|
||||
for c in self.containers(stopped=True):
|
||||
@@ -126,37 +142,37 @@ class Service(object):
|
||||
Create a container for this service. If the image doesn't exist, attempt to pull
|
||||
it.
|
||||
"""
|
||||
container_options = self._get_container_options(override_options, one_off=one_off)
|
||||
container_options = self._get_container_create_options(override_options, one_off=one_off)
|
||||
try:
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError as e:
|
||||
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
|
||||
log.info('Pulling image %s...' % container_options['image'])
|
||||
self.client.pull(container_options['image'])
|
||||
output = self.client.pull(container_options['image'], stream=True)
|
||||
stream_output(output, sys.stdout)
|
||||
return Container.create(self.client, **container_options)
|
||||
raise
|
||||
|
||||
def recreate_containers(self, **override_options):
|
||||
"""
|
||||
If a container for this service doesn't exist, create one. If there are
|
||||
any, stop them and create new ones. Does not remove the old containers.
|
||||
If a container for this service doesn't exist, create and start one. If there are
|
||||
any, stop them, create+start new ones, and remove the old containers.
|
||||
"""
|
||||
containers = self.containers(stopped=True)
|
||||
|
||||
if len(containers) == 0:
|
||||
log.info("Creating %s..." % self.next_container_name())
|
||||
return ([], [self.create_container(**override_options)])
|
||||
container = self.create_container(**override_options)
|
||||
self.start_container(container)
|
||||
return [(None, container)]
|
||||
else:
|
||||
old_containers = []
|
||||
new_containers = []
|
||||
tuples = []
|
||||
|
||||
for c in containers:
|
||||
log.info("Recreating %s..." % c.name)
|
||||
(old_container, new_container) = self.recreate_container(c, **override_options)
|
||||
old_containers.append(old_container)
|
||||
new_containers.append(new_container)
|
||||
tuples.append(self.recreate_container(c, **override_options))
|
||||
|
||||
return (old_containers, new_containers)
|
||||
return tuples
|
||||
|
||||
def recreate_container(self, container, **override_options):
|
||||
if container.is_running:
|
||||
@@ -169,17 +185,20 @@ class Service(object):
|
||||
entrypoint=['echo'],
|
||||
command=[],
|
||||
)
|
||||
intermediate_container.start()
|
||||
intermediate_container.start(volumes_from=container.id)
|
||||
intermediate_container.wait()
|
||||
container.remove()
|
||||
|
||||
options = dict(override_options)
|
||||
options['volumes_from'] = intermediate_container.id
|
||||
new_container = self.create_container(**options)
|
||||
self.start_container(new_container, volumes_from=intermediate_container.id)
|
||||
|
||||
intermediate_container.remove()
|
||||
|
||||
return (intermediate_container, new_container)
|
||||
|
||||
def start_container(self, container=None, **override_options):
|
||||
def start_container(self, container=None, volumes_from=None, **override_options):
|
||||
if container is None:
|
||||
container = self.create_container(**override_options)
|
||||
|
||||
@@ -204,12 +223,19 @@ class Service(object):
|
||||
for volume in options['volumes']:
|
||||
if ':' in volume:
|
||||
external_dir, internal_dir = volume.split(':')
|
||||
volume_bindings[os.path.abspath(external_dir)] = internal_dir
|
||||
volume_bindings[os.path.abspath(external_dir)] = {
|
||||
'bind': internal_dir,
|
||||
'ro': False,
|
||||
}
|
||||
|
||||
privileged = options.get('privileged', False)
|
||||
|
||||
container.start(
|
||||
links=self._get_links(),
|
||||
links=self._get_links(link_to_self=override_options.get('one_off', False)),
|
||||
port_bindings=port_bindings,
|
||||
binds=volume_bindings,
|
||||
volumes_from=volumes_from,
|
||||
privileged=privileged,
|
||||
)
|
||||
return container
|
||||
|
||||
@@ -227,7 +253,7 @@ class Service(object):
|
||||
else:
|
||||
return max(numbers) + 1
|
||||
|
||||
def _get_links(self):
|
||||
def _get_links(self, link_to_self):
|
||||
links = []
|
||||
for service, link_name in self.links:
|
||||
for container in service.containers():
|
||||
@@ -235,12 +261,13 @@ class Service(object):
|
||||
links.append((container.name, link_name))
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
for container in self.containers():
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
if link_to_self:
|
||||
for container in self.containers():
|
||||
links.append((container.name, container.name))
|
||||
links.append((container.name, container.name_without_project))
|
||||
return links
|
||||
|
||||
def _get_container_options(self, override_options, one_off=False):
|
||||
def _get_container_create_options(self, override_options, one_off=False):
|
||||
container_options = dict((k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options)
|
||||
container_options.update(override_options)
|
||||
|
||||
@@ -266,6 +293,10 @@ class Service(object):
|
||||
self.build()
|
||||
container_options['image'] = self._build_tag_name()
|
||||
|
||||
# Priviliged is only required for starting containers, not for creating them
|
||||
if 'privileged' in container_options:
|
||||
del container_options['privileged']
|
||||
|
||||
return container_options
|
||||
|
||||
def build(self):
|
||||
@@ -277,17 +308,21 @@ class Service(object):
|
||||
stream=True
|
||||
)
|
||||
|
||||
try:
|
||||
all_events = stream_output(build_output, sys.stdout)
|
||||
except StreamOutputError, e:
|
||||
raise BuildError(self, unicode(e))
|
||||
|
||||
image_id = None
|
||||
|
||||
for line in build_output:
|
||||
if line:
|
||||
match = re.search(r'Successfully built ([0-9a-f]+)', line)
|
||||
for event in all_events:
|
||||
if 'stream' in event:
|
||||
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
|
||||
if match:
|
||||
image_id = match.group(1)
|
||||
sys.stdout.write(line)
|
||||
|
||||
if image_id is None:
|
||||
raise BuildError()
|
||||
raise BuildError(self)
|
||||
|
||||
return image_id
|
||||
|
||||
@@ -307,6 +342,84 @@ class Service(object):
|
||||
return True
|
||||
|
||||
|
||||
class StreamOutputError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def stream_output(output, stream):
|
||||
is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno())
|
||||
all_events = []
|
||||
lines = {}
|
||||
diff = 0
|
||||
|
||||
for chunk in output:
|
||||
event = json.loads(chunk)
|
||||
all_events.append(event)
|
||||
|
||||
if 'progress' in event or 'progressDetail' in event:
|
||||
image_id = event['id']
|
||||
|
||||
if image_id in lines:
|
||||
diff = len(lines) - lines[image_id]
|
||||
else:
|
||||
lines[image_id] = len(lines)
|
||||
stream.write("\n")
|
||||
diff = 0
|
||||
|
||||
if is_terminal:
|
||||
# move cursor up `diff` rows
|
||||
stream.write("%c[%dA" % (27, diff))
|
||||
|
||||
print_output_event(event, stream, is_terminal)
|
||||
|
||||
if 'id' in event and is_terminal:
|
||||
# move cursor back down
|
||||
stream.write("%c[%dB" % (27, diff))
|
||||
|
||||
stream.flush()
|
||||
|
||||
return all_events
|
||||
|
||||
def print_output_event(event, stream, is_terminal):
|
||||
if 'errorDetail' in event:
|
||||
raise StreamOutputError(event['errorDetail']['message'])
|
||||
|
||||
terminator = ''
|
||||
|
||||
if is_terminal and 'stream' not in event:
|
||||
# erase current line
|
||||
stream.write("%c[2K\r" % 27)
|
||||
terminator = "\r"
|
||||
pass
|
||||
elif 'progressDetail' in event:
|
||||
return
|
||||
|
||||
if 'time' in event:
|
||||
stream.write("[%s] " % event['time'])
|
||||
|
||||
if 'id' in event:
|
||||
stream.write("%s: " % event['id'])
|
||||
|
||||
if 'from' in event:
|
||||
stream.write("(from %s) " % event['from'])
|
||||
|
||||
status = event.get('status', '')
|
||||
|
||||
if 'progress' in event:
|
||||
stream.write("%s %s%s" % (status, event['progress'], terminator))
|
||||
elif 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total']) * 100
|
||||
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
|
||||
else:
|
||||
stream.write('%s%s' % (status, terminator))
|
||||
elif 'stream' in event:
|
||||
stream.write("%s%s" % (event['stream'], terminator))
|
||||
else:
|
||||
stream.write("%s%s\n" % (status, terminator))
|
||||
|
||||
|
||||
NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
mock==1.0.1
|
||||
nose==1.3.0
|
||||
pyinstaller==2.1
|
||||
unittest2
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#!/bin/sh
|
||||
mkdir -p `pwd`/dist
|
||||
chmod 777 `pwd`/dist
|
||||
docker build -t fig .
|
||||
docker run -v `pwd`/dist:/code/dist fig pyinstaller -F bin/fig
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
rm -r venv
|
||||
rm -rf venv
|
||||
virtualenv venv
|
||||
venv/bin/pip install pyinstaller==2.1
|
||||
venv/bin/pip install .
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on first error
|
||||
set -ex
|
||||
|
||||
# Put Python eggs in a writeable directory
|
||||
export PYTHON_EGG_CACHE="/tmp/.python-eggs"
|
||||
|
||||
# Activate correct virtualenv
|
||||
TRAVIS_PYTHON_VERSION=$1
|
||||
source /home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/activate
|
||||
|
||||
env
|
||||
|
||||
# Kill background processes on exit
|
||||
trap 'kill -9 $(jobs -p)' SIGINT SIGTERM EXIT
|
||||
|
||||
# Start docker daemon
|
||||
docker -d -H unix:///var/run/docker.sock 2>> /dev/null >> /dev/null &
|
||||
sleep 2
|
||||
|
||||
# $init is set by sekexe
|
||||
cd $(dirname $init)/.. && nosetests -v
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
echo exit 101 | sudo tee /usr/sbin/policy-rc.d
|
||||
sudo chmod +x /usr/sbin/policy-rc.d
|
||||
sudo apt-get install -qy slirp lxc lxc-docker-$DOCKER_VERSION
|
||||
git clone git://github.com/jpetazzo/sekexe
|
||||
python setup.py install
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then
|
||||
pip install unittest2
|
||||
fi
|
||||
|
||||
10
script/travis-integration
Executable file
10
script/travis-integration
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Kill background processes on exit
|
||||
trap 'kill -9 $(jobs -p)' SIGINT SIGTERM EXIT
|
||||
|
||||
export DOCKER_HOST=tcp://localhost:4243
|
||||
orchard proxy -H $TRAVIS_JOB_ID $DOCKER_HOST &
|
||||
sleep 2
|
||||
nosetests -v
|
||||
0
tests/integration/__init__.py
Normal file
0
tests/integration/__init__.py
Normal file
@@ -2,8 +2,8 @@ from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .testcases import DockerClientTestCase
|
||||
from mock import patch
|
||||
from fig.packages.six import StringIO
|
||||
from fig.cli.main import TopLevelCommand
|
||||
from fig.packages.six import StringIO
|
||||
|
||||
class CLITestCase(DockerClientTestCase):
|
||||
def setUp(self):
|
||||
@@ -15,16 +15,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.command.project.kill()
|
||||
self.command.project.remove_stopped()
|
||||
|
||||
def test_yaml_filename_check(self):
|
||||
self.command.base_dir = 'tests/fixtures/longer-filename-figfile'
|
||||
|
||||
project = self.command.project
|
||||
|
||||
self.assertTrue( project.get_service('definedinyamlnotyml'), "Service: definedinyamlnotyml should have been loaded from .yaml file" )
|
||||
|
||||
def test_help(self):
|
||||
self.assertRaises(SystemExit, lambda: self.command.dispatch(['-h'], None))
|
||||
|
||||
@patch('sys.stdout', new_callable=StringIO)
|
||||
def test_ps(self, mock_stdout):
|
||||
self.command.project.get_service('simple').create_container()
|
||||
87
tests/integration/project_test.py
Normal file
87
tests/integration/project_test.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from __future__ import unicode_literals
|
||||
from fig.project import Project, ConfigurationError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ProjectTest(DockerClientTestCase):
|
||||
def test_start_stop_kill_remove(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
|
||||
web_container_1 = web.create_container()
|
||||
web_container_2 = web.create_container()
|
||||
db_container = db.create_container()
|
||||
|
||||
project.start(service_names=['web'])
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name]))
|
||||
|
||||
project.start()
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name, db_container.name]))
|
||||
|
||||
project.stop(service_names=['web'], timeout=1)
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([db_container.name]))
|
||||
|
||||
project.kill(service_names=['db'])
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
self.assertEqual(len(project.containers(stopped=True)), 3)
|
||||
|
||||
project.remove_stopped(service_names=['web'])
|
||||
self.assertEqual(len(project.containers(stopped=True)), 1)
|
||||
|
||||
project.remove_stopped()
|
||||
self.assertEqual(len(project.containers(stopped=True)), 0)
|
||||
|
||||
def test_project_up(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db', volumes=['/var/db'])
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
project.start()
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
|
||||
project.up(['db'])
|
||||
self.assertEqual(len(project.containers()), 1)
|
||||
old_db_id = project.containers()[0].id
|
||||
db_volume_path = project.containers()[0].inspect()['Volumes']['/var/db']
|
||||
|
||||
project.up()
|
||||
self.assertEqual(len(project.containers()), 2)
|
||||
|
||||
db_container = [c for c in project.containers() if 'db' in c.name][0]
|
||||
self.assertNotEqual(c.id, old_db_id)
|
||||
self.assertEqual(c.inspect()['Volumes']['/var/db'], db_volume_path)
|
||||
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
|
||||
def test_unscale_after_restart(self):
|
||||
web = self.create_service('web')
|
||||
project = Project('figtest', [web], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
service = project.get_service('web')
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
service.scale(3)
|
||||
self.assertEqual(len(service.containers()), 3)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 3)
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
# does scale=0 ,makes any sense? after recreating at least 1 container is running
|
||||
service.scale(0)
|
||||
project.up()
|
||||
service = project.get_service('web')
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
project.kill()
|
||||
project.remove_stopped()
|
||||
@@ -1,34 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig import Service
|
||||
from fig.service import CannotBeScaledError, ConfigError
|
||||
from fig.service import CannotBeScaledError
|
||||
from fig.packages.docker.errors import APIError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ServiceTest(DockerClientTestCase):
|
||||
def test_name_validations(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name=''))
|
||||
|
||||
self.assertRaises(ConfigError, lambda: Service(name=' '))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='/'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='!'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='\xe2'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='_'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='____'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo_bar'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='__foo_bar__'))
|
||||
|
||||
Service('a')
|
||||
Service('foo')
|
||||
|
||||
def test_project_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', project='_'))
|
||||
Service(name='foo', project='bar')
|
||||
|
||||
def test_config_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', port=['8000']))
|
||||
Service(name='foo', ports=['8000'])
|
||||
|
||||
def test_containers(self):
|
||||
foo = self.create_service('foo')
|
||||
bar = self.create_service('bar')
|
||||
@@ -113,6 +90,12 @@ class ServiceTest(DockerClientTestCase):
|
||||
service.start_container(container)
|
||||
self.assertIn('/var/db', container.inspect()['Volumes'])
|
||||
|
||||
def test_create_container_with_specified_volume(self):
|
||||
service = self.create_service('db', volumes=['/tmp:/host-tmp'])
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertIn('/host-tmp', container.inspect()['Volumes'])
|
||||
|
||||
def test_recreate_containers(self):
|
||||
service = self.create_service(
|
||||
'db',
|
||||
@@ -132,23 +115,22 @@ class ServiceTest(DockerClientTestCase):
|
||||
num_containers_before = len(self.client.containers(all=True))
|
||||
|
||||
service.options['environment']['FOO'] = '2'
|
||||
(intermediate, new) = service.recreate_containers()
|
||||
self.assertEqual(len(intermediate), 1)
|
||||
self.assertEqual(len(new), 1)
|
||||
tuples = service.recreate_containers()
|
||||
self.assertEqual(len(tuples), 1)
|
||||
|
||||
new_container = new[0]
|
||||
intermediate_container = intermediate[0]
|
||||
intermediate_container = tuples[0][0]
|
||||
new_container = tuples[0][1]
|
||||
self.assertEqual(intermediate_container.dictionary['Config']['Entrypoint'], ['echo'])
|
||||
|
||||
self.assertEqual(new_container.dictionary['Config']['Entrypoint'], ['ps'])
|
||||
self.assertEqual(new_container.dictionary['Config']['Cmd'], ['ax'])
|
||||
self.assertIn('FOO=2', new_container.dictionary['Config']['Env'])
|
||||
self.assertEqual(new_container.name, 'figtest_db_1')
|
||||
service.start_container(new_container)
|
||||
self.assertEqual(new_container.inspect()['Volumes']['/var/db'], volume_path)
|
||||
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before + 1)
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
|
||||
self.assertNotEqual(old_container.id, new_container.id)
|
||||
self.assertRaises(APIError, lambda: self.client.inspect_container(intermediate_container.id))
|
||||
|
||||
def test_start_container_passes_through_options(self):
|
||||
db = self.create_service('db')
|
||||
@@ -175,12 +157,17 @@ class ServiceTest(DockerClientTestCase):
|
||||
web.start_container()
|
||||
self.assertIn('custom_link_name', web.containers()[0].links())
|
||||
|
||||
def test_start_container_creates_links_to_its_own_service(self):
|
||||
db1 = self.create_service('db')
|
||||
db2 = self.create_service('db')
|
||||
db1.start_container()
|
||||
db2.start_container()
|
||||
self.assertIn('db_1', db2.containers()[0].links())
|
||||
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
|
||||
db = self.create_service('db')
|
||||
c1 = db.start_container()
|
||||
c2 = db.start_container()
|
||||
self.assertNotIn(c1.name, c2.links())
|
||||
|
||||
def test_start_one_off_container_creates_links_to_its_own_service(self):
|
||||
db = self.create_service('db')
|
||||
c1 = db.start_container()
|
||||
c2 = db.start_container(one_off=True)
|
||||
self.assertIn(c1.name, c2.links())
|
||||
|
||||
def test_start_container_builds_images(self):
|
||||
service = Service(
|
||||
@@ -212,6 +199,16 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
|
||||
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
|
||||
|
||||
def test_start_container_stays_unpriviliged(self):
|
||||
service = self.create_service('web')
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], False)
|
||||
|
||||
def test_start_container_becomes_priviliged(self):
|
||||
service = self.create_service('web', privileged = True)
|
||||
container = service.start_container().inspect()
|
||||
self.assertEqual(container['HostConfig']['Privileged'], True)
|
||||
|
||||
def test_expose_does_not_publish_ports(self):
|
||||
service = self.create_service('web', expose=[8000])
|
||||
container = service.start_container().inspect()
|
||||
@@ -256,5 +253,3 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertEqual(len(containers), 2)
|
||||
for container in containers:
|
||||
self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import absolute_import
|
||||
from fig.packages.docker import Client
|
||||
from fig.service import Service
|
||||
from fig.cli.utils import docker_url
|
||||
from . import unittest
|
||||
from .. import unittest
|
||||
|
||||
|
||||
class DockerClientTestCase(unittest.TestCase):
|
||||
@@ -18,7 +18,7 @@ class DockerClientTestCase(unittest.TestCase):
|
||||
self.client.kill(c['Id'])
|
||||
self.client.remove_container(c['Id'])
|
||||
for i in self.client.images():
|
||||
if isinstance(i['Tag'], basestring) and 'figtest' in i['Tag']:
|
||||
if isinstance(i.get('Tag'), basestring) and 'figtest' in i['Tag']:
|
||||
self.client.remove_image(i)
|
||||
|
||||
def create_service(self, name, **kwargs):
|
||||
@@ -1,120 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
from fig.project import Project, ConfigurationError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
class ProjectTest(DockerClientTestCase):
|
||||
def test_from_dict(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu'
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], self.client)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_dict_sorts_in_dependency_order(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu',
|
||||
'links': ['db'],
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], self.client)
|
||||
|
||||
self.assertEqual(project.services[0].name, 'db')
|
||||
self.assertEqual(project.services[1].name, 'web')
|
||||
|
||||
def test_from_config(self):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
'db': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
}, self.client)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_config_throws_error_when_not_dict(self):
|
||||
with self.assertRaises(ConfigurationError):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': 'ubuntu',
|
||||
}, self.client)
|
||||
|
||||
def test_get_service(self):
|
||||
web = self.create_service('web')
|
||||
project = Project('test', [web], self.client)
|
||||
self.assertEqual(project.get_service('web'), web)
|
||||
|
||||
def test_recreate_containers(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('test', [web, db], self.client)
|
||||
|
||||
old_web_container = web.create_container()
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 0)
|
||||
|
||||
(old, new) = project.recreate_containers()
|
||||
self.assertEqual(len(old), 1)
|
||||
self.assertEqual(old[0][0], web)
|
||||
self.assertEqual(len(new), 2)
|
||||
self.assertEqual(new[0][0], web)
|
||||
self.assertEqual(new[1][0], db)
|
||||
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 1)
|
||||
|
||||
# remove intermediate containers
|
||||
for (service, container) in old:
|
||||
container.remove()
|
||||
|
||||
def test_start_stop_kill_remove(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('figtest', [web, db], self.client)
|
||||
|
||||
project.start()
|
||||
|
||||
self.assertEqual(len(web.containers()), 0)
|
||||
self.assertEqual(len(db.containers()), 0)
|
||||
|
||||
web_container_1 = web.create_container()
|
||||
web_container_2 = web.create_container()
|
||||
db_container = db.create_container()
|
||||
|
||||
project.start(service_names=['web'])
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name]))
|
||||
|
||||
project.start()
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([web_container_1.name, web_container_2.name, db_container.name]))
|
||||
|
||||
project.stop(service_names=['web'], timeout=1)
|
||||
self.assertEqual(set(c.name for c in project.containers()), set([db_container.name]))
|
||||
|
||||
project.kill(service_names=['db'])
|
||||
self.assertEqual(len(project.containers()), 0)
|
||||
self.assertEqual(len(project.containers(stopped=True)), 3)
|
||||
|
||||
project.remove_stopped(service_names=['web'])
|
||||
self.assertEqual(len(project.containers(stopped=True)), 1)
|
||||
|
||||
project.remove_stopped()
|
||||
self.assertEqual(len(project.containers(stopped=True)), 0)
|
||||
0
tests/unit/__init__.py
Normal file
0
tests/unit/__init__.py
Normal file
16
tests/unit/cli_test.py
Normal file
16
tests/unit/cli_test.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .. import unittest
|
||||
from fig.cli.main import TopLevelCommand
|
||||
from fig.packages.six import StringIO
|
||||
|
||||
class CLITestCase(unittest.TestCase):
|
||||
def test_yaml_filename_check(self):
|
||||
command = TopLevelCommand()
|
||||
command.base_dir = 'tests/fixtures/longer-filename-figfile'
|
||||
self.assertTrue(command.project.get_service('definedinyamlnotyml'))
|
||||
|
||||
def test_help(self):
|
||||
command = TopLevelCommand()
|
||||
with self.assertRaises(SystemExit):
|
||||
command.dispatch(['-h'], None)
|
||||
@@ -1,10 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .testcases import DockerClientTestCase
|
||||
from .. import unittest
|
||||
from fig.container import Container
|
||||
|
||||
class ContainerTest(DockerClientTestCase):
|
||||
class ContainerTest(unittest.TestCase):
|
||||
def test_from_ps(self):
|
||||
container = Container.from_ps(self.client, {
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
@@ -13,16 +13,16 @@ class ContainerTest(DockerClientTestCase):
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/db_1"]
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.dictionary, {
|
||||
"ID": "abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Name": "/db_1",
|
||||
"Name": "/figtest_db_1",
|
||||
})
|
||||
|
||||
def test_environment(self):
|
||||
container = Container(self.client, {
|
||||
container = Container(None, {
|
||||
'ID': 'abc',
|
||||
'Config': {
|
||||
'Env': [
|
||||
@@ -37,7 +37,7 @@ class ContainerTest(DockerClientTestCase):
|
||||
})
|
||||
|
||||
def test_number(self):
|
||||
container = Container.from_ps(self.client, {
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
@@ -46,6 +46,24 @@ class ContainerTest(DockerClientTestCase):
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/db_1"]
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.number, 1)
|
||||
|
||||
def test_name(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.name, "figtest_db_1")
|
||||
|
||||
def test_name_without_project(self):
|
||||
container = Container.from_ps(None, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
"Names":["/figtest_db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.name_without_project, "db_1")
|
||||
69
tests/unit/project_test.py
Normal file
69
tests/unit/project_test.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import unicode_literals
|
||||
from .. import unittest
|
||||
from fig.service import Service
|
||||
from fig.project import Project, ConfigurationError
|
||||
|
||||
class ProjectTest(unittest.TestCase):
|
||||
def test_from_dict(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu'
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], None)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_dict_sorts_in_dependency_order(self):
|
||||
project = Project.from_dicts('figtest', [
|
||||
{
|
||||
'name': 'web',
|
||||
'image': 'ubuntu',
|
||||
'links': ['db'],
|
||||
},
|
||||
{
|
||||
'name': 'db',
|
||||
'image': 'ubuntu'
|
||||
}
|
||||
], None)
|
||||
|
||||
self.assertEqual(project.services[0].name, 'db')
|
||||
self.assertEqual(project.services[1].name, 'web')
|
||||
|
||||
def test_from_config(self):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
'db': {
|
||||
'image': 'ubuntu',
|
||||
},
|
||||
}, None)
|
||||
self.assertEqual(len(project.services), 2)
|
||||
self.assertEqual(project.get_service('web').name, 'web')
|
||||
self.assertEqual(project.get_service('web').options['image'], 'ubuntu')
|
||||
self.assertEqual(project.get_service('db').name, 'db')
|
||||
self.assertEqual(project.get_service('db').options['image'], 'ubuntu')
|
||||
|
||||
def test_from_config_throws_error_when_not_dict(self):
|
||||
with self.assertRaises(ConfigurationError):
|
||||
project = Project.from_config('figtest', {
|
||||
'web': 'ubuntu',
|
||||
}, None)
|
||||
|
||||
def test_get_service(self):
|
||||
web = Service(
|
||||
project='figtest',
|
||||
name='web',
|
||||
client=None,
|
||||
image="ubuntu",
|
||||
)
|
||||
project = Project('test', [web], None)
|
||||
self.assertEqual(project.get_service('web'), web)
|
||||
29
tests/unit/service_test.py
Normal file
29
tests/unit/service_test.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .. import unittest
|
||||
from fig import Service
|
||||
from fig.service import ConfigError
|
||||
|
||||
class ServiceTest(unittest.TestCase):
|
||||
def test_name_validations(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name=''))
|
||||
|
||||
self.assertRaises(ConfigError, lambda: Service(name=' '))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='/'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='!'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='\xe2'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='_'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='____'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo_bar'))
|
||||
self.assertRaises(ConfigError, lambda: Service(name='__foo_bar__'))
|
||||
|
||||
Service('a')
|
||||
Service('foo')
|
||||
|
||||
def test_project_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', project='_'))
|
||||
Service(name='foo', project='bar')
|
||||
|
||||
def test_config_validation(self):
|
||||
self.assertRaises(ConfigError, lambda: Service(name='foo', port=['8000']))
|
||||
Service(name='foo', ports=['8000'])
|
||||
@@ -1,5 +1,5 @@
|
||||
from fig.project import sort_service_dicts, DependencyError
|
||||
from . import unittest
|
||||
from .. import unittest
|
||||
|
||||
|
||||
class SortServiceTest(unittest.TestCase):
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig.cli.utils import split_buffer
|
||||
from . import unittest
|
||||
from .. import unittest
|
||||
|
||||
class SplitBufferTest(unittest.TestCase):
|
||||
def test_single_line_chunks(self):
|
||||
Reference in New Issue
Block a user