mirror of
https://github.com/docker/compose.git
synced 2026-02-11 02:59:25 +08:00
Compare commits
85 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
573fae089f | ||
|
|
83cd16e373 | ||
|
|
74fb400fef | ||
|
|
a07b00606b | ||
|
|
65f23583ae | ||
|
|
c8a58f2547 | ||
|
|
56c6efdfce | ||
|
|
8ed86ed551 | ||
|
|
d2f4c81d62 | ||
|
|
b20190da98 | ||
|
|
5d9a5a0c84 | ||
|
|
bb7613f37b | ||
|
|
9bd54d7be2 | ||
|
|
5c8fac5993 | ||
|
|
3e2fd6a2a1 | ||
|
|
f36dd414a0 | ||
|
|
af1b0ed088 | ||
|
|
feafea2c6d | ||
|
|
c4f5ed839f | ||
|
|
b4c905dc83 | ||
|
|
804e2cdcb1 | ||
|
|
0bb5e48f53 | ||
|
|
21528f08d4 | ||
|
|
b1e7f548f4 | ||
|
|
cdcea98290 | ||
|
|
c06456da37 | ||
|
|
7b31fdf6f6 | ||
|
|
e38b403b14 | ||
|
|
ee0c4bf690 | ||
|
|
8c583d1bb2 | ||
|
|
ea4753c49a | ||
|
|
3956d85a8c | ||
|
|
8a0071d9c1 | ||
|
|
5db6c9f51b | ||
|
|
f5f9357736 | ||
|
|
bdc6b47e1f | ||
|
|
3669236aa1 | ||
|
|
207e83ac2f | ||
|
|
3c5e334d9d | ||
|
|
a8e275a432 | ||
|
|
887a30e327 | ||
|
|
7a1fb3a8d2 | ||
|
|
d4000e07a9 | ||
|
|
a3d024e11d | ||
|
|
b92e998929 | ||
|
|
f448a841c5 | ||
|
|
c9c844c279 | ||
|
|
342f187318 | ||
|
|
d063f0e00c | ||
|
|
0614e2c590 | ||
|
|
431b3dc2b2 | ||
|
|
544cd884ee | ||
|
|
c6efb45585 | ||
|
|
38008a87e8 | ||
|
|
059d240824 | ||
|
|
7a4b69edc0 | ||
|
|
8cab05feb4 | ||
|
|
892677a9d3 | ||
|
|
00a1835fae | ||
|
|
7888027425 | ||
|
|
0760ea1b00 | ||
|
|
31f0907732 | ||
|
|
9bec059cc7 | ||
|
|
f600fa8bf3 | ||
|
|
c6e91db32f | ||
|
|
b101118d1e | ||
|
|
30ea4508c3 | ||
|
|
3c91315426 | ||
|
|
bf8875d930 | ||
|
|
93b9b6fd9f | ||
|
|
f96a1a0b35 | ||
|
|
8de07ccf65 | ||
|
|
ff9fa5661d | ||
|
|
17b9cc430c | ||
|
|
d8a2a0f003 | ||
|
|
490742b892 | ||
|
|
3fa80cd974 | ||
|
|
9ede185d4b | ||
|
|
aaf90639a0 | ||
|
|
5ba7040df2 | ||
|
|
3d411ed0bb | ||
|
|
dd1f8934ad | ||
|
|
febcbcddb9 | ||
|
|
88c74d67f6 | ||
|
|
0fb915e57e |
16
.travis.yml
16
.travis.yml
@@ -4,8 +4,16 @@ python:
|
||||
- "2.7"
|
||||
- "3.2"
|
||||
- "3.3"
|
||||
install:
|
||||
- python setup.py install
|
||||
- pip install nose==1.3.0
|
||||
script: nosetests
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- python: "3.2"
|
||||
- python: "3.3"
|
||||
|
||||
install: script/travis-install
|
||||
|
||||
script:
|
||||
- pwd
|
||||
- env
|
||||
- sekexe/run "`pwd`/script/travis $TRAVIS_PYTHON_VERSION"
|
||||
|
||||
|
||||
14
CHANGES.md
14
CHANGES.md
@@ -1,6 +1,20 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
0.1.0 (2014-01-16)
|
||||
------------------
|
||||
|
||||
- Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2)
|
||||
- Add `fig scale` command (#9)
|
||||
- Use DOCKER_HOST environment variable to find Docker daemon (#19)
|
||||
- Truncate long commands in `fig ps` (#18)
|
||||
- Fill out CLI help banners for commands (#15, #16)
|
||||
- Show a friendlier error when `fig.yml` is missing (#4)
|
||||
|
||||
- Fix bug with `fig build` logging (#3)
|
||||
- Fix bug where builds would time out if a step took a long time without generating output (#6)
|
||||
- Fix bug where streaming container output over the Unix socket raised an error (#7)
|
||||
|
||||
0.0.2 (2014-01-02)
|
||||
------------------
|
||||
|
||||
|
||||
9
Dockerfile
Normal file
9
Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM stackbrew/ubuntu:12.04
|
||||
RUN apt-get update -qq
|
||||
RUN apt-get install -y python python-pip
|
||||
ADD requirements.txt /code/
|
||||
WORKDIR /code/
|
||||
RUN pip install -r requirements.txt
|
||||
ADD requirements-dev.txt /code/
|
||||
RUN pip install -r requirements-dev.txt
|
||||
ADD . /code/
|
||||
@@ -1,3 +1,10 @@
|
||||
include Dockerfile
|
||||
include LICENSE
|
||||
include *.md
|
||||
include requirements.txt
|
||||
include requirements-dev.txt
|
||||
tox.ini
|
||||
include *.md
|
||||
recursive-include tests *
|
||||
global-exclude *.pyc
|
||||
global-exclude *.pyo
|
||||
global-exclude *.un~
|
||||
|
||||
89
README.md
89
README.md
@@ -1,5 +1,7 @@
|
||||
Fig
|
||||
====
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/orchardup/fig)
|
||||
|
||||
Punctual, lightweight development environments using Docker.
|
||||
|
||||
@@ -18,13 +20,7 @@ db:
|
||||
|
||||
Then type `fig up`, and Fig will start and run your entire app:
|
||||
|
||||
$ fig up
|
||||
Pulling image orchardup/postgresql...
|
||||
Building web...
|
||||
Starting example_db_1...
|
||||
Starting example_web_1...
|
||||
example_db_1 | 2014-01-02 14:47:18 UTC LOG: database system is ready to accept connections
|
||||
example_web_1 | * Running on http://0.0.0.0:5000/
|
||||

|
||||
|
||||
There are commands to:
|
||||
|
||||
@@ -43,9 +39,9 @@ Let's get a basic Python web app running on Fig. It assumes a little knowledge o
|
||||
|
||||
First, install Docker. If you're on OS X, you can use [docker-osx](https://github.com/noplay/docker-osx):
|
||||
|
||||
$ curl https://raw.github.com/noplay/docker-osx/master/docker > /usr/local/bin/docker
|
||||
$ chmod +x /usr/local/bin/docker
|
||||
$ docker version
|
||||
$ curl https://raw.github.com/noplay/docker-osx/master/docker-osx > /usr/local/bin/docker-osx
|
||||
$ chmod +x /usr/local/bin/docker-osx
|
||||
$ docker-osx shell
|
||||
|
||||
Docker has guides for [Ubuntu](http://docs.docker.io/en/latest/installation/ubuntulinux/) and [other platforms](http://docs.docker.io/en/latest/installation/) in their documentation.
|
||||
|
||||
@@ -147,7 +143,7 @@ If you want to run your services in the background, you can pass the `-d` flag t
|
||||
|
||||
See `fig --help` other commands that are available.
|
||||
|
||||
You'll probably want to stop your services when you've finished with them:
|
||||
If you started Fig with `fig up -d`, you'll probably want to stop your services once you've finished with them:
|
||||
|
||||
$ fig stop
|
||||
|
||||
@@ -194,6 +190,74 @@ environment:
|
||||
RACK_ENV: development
|
||||
```
|
||||
|
||||
### Commands
|
||||
|
||||
Most commands are run against one or more services. If the service is omitted, it will apply to all services.
|
||||
|
||||
Run `fig [COMMAND] --help` for full usage.
|
||||
|
||||
#### build
|
||||
|
||||
Build or rebuild services.
|
||||
|
||||
Services are built once and then tagged as `project_service`, e.g. `figtest_db`. If you change a service's `Dockerfile` or the contents of its build directory, you can run `fig build` to rebuild it.
|
||||
|
||||
#### help
|
||||
|
||||
Get help on a command.
|
||||
|
||||
#### kill
|
||||
|
||||
Force stop service containers.
|
||||
|
||||
#### logs
|
||||
|
||||
View output from services.
|
||||
|
||||
#### ps
|
||||
|
||||
List containers.
|
||||
|
||||
#### rm
|
||||
|
||||
Remove stopped service containers.
|
||||
|
||||
|
||||
#### run
|
||||
|
||||
Run a one-off command on a service.
|
||||
|
||||
For example:
|
||||
|
||||
$ fig run web python manage.py shell
|
||||
|
||||
Note that this will not start any services that the command's service links to. So if, for example, your one-off command talks to your database, you will need to run `fig up -d db` first.
|
||||
|
||||
#### scale
|
||||
|
||||
Set number of containers to run for a service.
|
||||
|
||||
Numbers are specified in the form `service=num` as arguments.
|
||||
For example:
|
||||
|
||||
$ fig scale web=2 worker=3
|
||||
|
||||
#### start
|
||||
|
||||
Start existing containers for a service.
|
||||
|
||||
#### stop
|
||||
|
||||
Stop running containers without removing them. They can be started again with `fig start`.
|
||||
|
||||
#### up
|
||||
|
||||
Build, (re)create, start and attach to containers for a service.
|
||||
|
||||
By default, `fig up` will aggregate the output of each container, and when it exits, all containers will be stopped. If you run `fig up -d`, it'll start the containers in the background and leave them running.
|
||||
|
||||
If there are existing containers for a service, `fig up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `fig.yml` are picked up.
|
||||
|
||||
### Environment variables
|
||||
|
||||
Fig uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container.
|
||||
@@ -218,3 +282,4 @@ Fully qualified container name, e.g. `MYAPP_DB_1_NAME=/myapp_web_1/myapp_db_1`
|
||||
|
||||
|
||||
[Docker links]: http://docs.docker.io/en/latest/use/port_redirection/#linking-a-container
|
||||
[volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from .service import Service
|
||||
|
||||
__version__ = '0.0.2'
|
||||
__version__ = '0.1.0'
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
NAMES = [
|
||||
'grey',
|
||||
'red',
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from docker import Client
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from ..packages.docker import Client
|
||||
from requests.exceptions import ConnectionError
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -8,17 +12,40 @@ from ..project import Project
|
||||
from .docopt_command import DocoptCommand
|
||||
from .formatter import Formatter
|
||||
from .utils import cached_property, docker_url
|
||||
from .errors import UserError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class Command(DocoptCommand):
|
||||
base_dir = '.'
|
||||
|
||||
def dispatch(self, *args, **kwargs):
|
||||
try:
|
||||
super(Command, self).dispatch(*args, **kwargs)
|
||||
except ConnectionError:
|
||||
raise UserError("""
|
||||
Couldn't connect to Docker daemon at %s - is it running?
|
||||
|
||||
If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
|
||||
""" % self.client.base_url)
|
||||
|
||||
@cached_property
|
||||
def client(self):
|
||||
return Client(docker_url())
|
||||
|
||||
@cached_property
|
||||
def project(self):
|
||||
config = yaml.load(open('fig.yml'))
|
||||
try:
|
||||
yaml_path = os.path.join(self.base_dir, 'fig.yml')
|
||||
config = yaml.load(open(yaml_path))
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
log.error("Can't find %s. Are you in the right directory?", os.path.basename(e.filename))
|
||||
else:
|
||||
log.error(e)
|
||||
|
||||
exit(1)
|
||||
|
||||
return Project.from_config(self.project_name, config, self.client)
|
||||
|
||||
@cached_property
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
from inspect import getdoc
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
from __future__ import absolute_import
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
class UserError(Exception):
|
||||
def __init__(self, msg):
|
||||
self.msg = dedent(msg).strip()
|
||||
|
||||
def __unicode__(self):
|
||||
return self.msg
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import texttable
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import texttable
|
||||
|
||||
|
||||
class Formatter(object):
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
from itertools import cycle
|
||||
@@ -29,34 +31,29 @@ class LogPrinter(object):
|
||||
|
||||
def _make_log_generator(self, container, color_fn):
|
||||
prefix = color_fn(container.name + " | ")
|
||||
websocket = self._attach(container)
|
||||
return (prefix + line for line in split_buffer(read_websocket(websocket), '\n'))
|
||||
for line in split_buffer(self._attach(container), '\n'):
|
||||
yield prefix + line
|
||||
|
||||
def _attach(self, container):
|
||||
params = {
|
||||
'stdin': False,
|
||||
'stdout': True,
|
||||
'stderr': True,
|
||||
'logs': False,
|
||||
'stream': True,
|
||||
}
|
||||
params.update(self.attach_params)
|
||||
params = dict((name, 1 if value else 0) for (name, value) in params.items())
|
||||
return container.attach_socket(params=params, ws=True)
|
||||
|
||||
def read_websocket(websocket):
|
||||
while True:
|
||||
data = websocket.recv()
|
||||
if data:
|
||||
yield data
|
||||
else:
|
||||
break
|
||||
params = dict((name, 1 if value else 0) for (name, value) in list(params.items()))
|
||||
return container.attach(**params)
|
||||
|
||||
def split_buffer(reader, separator):
|
||||
"""
|
||||
Given a generator which yields strings and a separator string,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Requires that each input string is decodable as UTF-8.
|
||||
"""
|
||||
buffered = ''
|
||||
|
||||
for data in reader:
|
||||
lines = (buffered + data).split(separator)
|
||||
lines = (buffered + data.decode('utf-8')).split(separator)
|
||||
for line in lines[:-1]:
|
||||
yield line + separator
|
||||
if len(lines) > 1:
|
||||
|
||||
121
fig/cli/main.py
121
fig/cli/main.py
@@ -1,3 +1,5 @@
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
import sys
|
||||
import re
|
||||
@@ -8,12 +10,13 @@ from inspect import getdoc
|
||||
|
||||
from .. import __version__
|
||||
from ..project import NoSuchService
|
||||
from ..service import CannotBeScaledError
|
||||
from .command import Command
|
||||
from .formatter import Formatter
|
||||
from .log_printer import LogPrinter
|
||||
from .utils import yesno
|
||||
|
||||
from docker.client import APIError
|
||||
from ..packages.docker.client import APIError
|
||||
from .errors import UserError
|
||||
from .docopt_command import NoSuchCommand
|
||||
from .socketclient import SocketClient
|
||||
@@ -38,18 +41,18 @@ def main():
|
||||
except KeyboardInterrupt:
|
||||
log.error("\nAborting.")
|
||||
exit(1)
|
||||
except UserError, e:
|
||||
except UserError as e:
|
||||
log.error(e.msg)
|
||||
exit(1)
|
||||
except NoSuchService, e:
|
||||
except NoSuchService as e:
|
||||
log.error(e.msg)
|
||||
exit(1)
|
||||
except NoSuchCommand, e:
|
||||
except NoSuchCommand as e:
|
||||
log.error("No such command: %s", e.command)
|
||||
log.error("")
|
||||
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
|
||||
exit(1)
|
||||
except APIError, e:
|
||||
except APIError as e:
|
||||
log.error(e.explanation)
|
||||
exit(1)
|
||||
|
||||
@@ -74,11 +77,13 @@ class TopLevelCommand(Command):
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
help Get help on a command
|
||||
kill Kill containers
|
||||
logs View output from containers
|
||||
ps List containers
|
||||
rm Remove stopped containers
|
||||
run Run a one-off command
|
||||
scale Set number of containers for a service
|
||||
start Start services
|
||||
stop Stop services
|
||||
up Create and start containers
|
||||
@@ -93,13 +98,28 @@ class TopLevelCommand(Command):
|
||||
"""
|
||||
Build or rebuild services.
|
||||
|
||||
Services are built once and then tagged as `project_service`,
|
||||
e.g. `figtest_db`. If you change a service's `Dockerfile` or the
|
||||
contents of its build directory, you can run `fig build` to rebuild it.
|
||||
|
||||
Usage: build [SERVICE...]
|
||||
"""
|
||||
self.project.build(service_names=options['SERVICE'])
|
||||
|
||||
def help(self, options):
|
||||
"""
|
||||
Get help on a command.
|
||||
|
||||
Usage: help COMMAND
|
||||
"""
|
||||
command = options['COMMAND']
|
||||
if not hasattr(self, command):
|
||||
raise NoSuchCommand(command, self)
|
||||
raise SystemExit(getdoc(getattr(self, command)))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
Kill containers.
|
||||
Force stop service containers.
|
||||
|
||||
Usage: kill [SERVICE...]
|
||||
"""
|
||||
@@ -111,8 +131,8 @@ class TopLevelCommand(Command):
|
||||
|
||||
Usage: logs [SERVICE...]
|
||||
"""
|
||||
containers = self.project.containers(service_names=options['SERVICE'], stopped=False)
|
||||
print "Attaching to", list_containers(containers)
|
||||
containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
|
||||
print("Attaching to", list_containers(containers))
|
||||
LogPrinter(containers, attach_params={'logs': True}).run()
|
||||
|
||||
def ps(self, options):
|
||||
@@ -128,7 +148,7 @@ class TopLevelCommand(Command):
|
||||
|
||||
if options['-q']:
|
||||
for container in containers:
|
||||
print container.id
|
||||
print(container.id)
|
||||
else:
|
||||
headers = [
|
||||
'Name',
|
||||
@@ -138,17 +158,20 @@ class TopLevelCommand(Command):
|
||||
]
|
||||
rows = []
|
||||
for container in containers:
|
||||
command = container.human_readable_command
|
||||
if len(command) > 30:
|
||||
command = '%s ...' % command[:26]
|
||||
rows.append([
|
||||
container.name,
|
||||
container.human_readable_command,
|
||||
command,
|
||||
container.human_readable_state,
|
||||
container.human_readable_ports,
|
||||
])
|
||||
print Formatter().table(headers, rows)
|
||||
print(Formatter().table(headers, rows))
|
||||
|
||||
def rm(self, options):
|
||||
"""
|
||||
Remove stopped containers
|
||||
Remove stopped service containers.
|
||||
|
||||
Usage: rm [SERVICE...]
|
||||
"""
|
||||
@@ -156,15 +179,23 @@ class TopLevelCommand(Command):
|
||||
stopped_containers = [c for c in all_containers if not c.is_running]
|
||||
|
||||
if len(stopped_containers) > 0:
|
||||
print "Going to remove", list_containers(stopped_containers)
|
||||
print("Going to remove", list_containers(stopped_containers))
|
||||
if yesno("Are you sure? [yN] ", default=False):
|
||||
self.project.remove_stopped(service_names=options['SERVICE'])
|
||||
else:
|
||||
print "No stopped containers"
|
||||
print("No stopped containers")
|
||||
|
||||
def run(self, options):
|
||||
"""
|
||||
Run a one-off command.
|
||||
Run a one-off command on a service.
|
||||
|
||||
For example:
|
||||
|
||||
$ fig run web python manage.py shell
|
||||
|
||||
Note that this will not start any services that the command's service
|
||||
links to. So if, for example, your one-off command talks to your
|
||||
database, you will need to run `fig up -d db` first.
|
||||
|
||||
Usage: run [options] SERVICE COMMAND [ARGS...]
|
||||
|
||||
@@ -180,7 +211,7 @@ class TopLevelCommand(Command):
|
||||
container = service.create_container(one_off=True, **container_options)
|
||||
if options['-d']:
|
||||
service.start_container(container, ports=None)
|
||||
print container.name
|
||||
print(container.name)
|
||||
else:
|
||||
with self._attach_to_container(
|
||||
container.id,
|
||||
@@ -191,6 +222,31 @@ class TopLevelCommand(Command):
|
||||
service.start_container(container, ports=None)
|
||||
c.run()
|
||||
|
||||
def scale(self, options):
|
||||
"""
|
||||
Set number of containers to run for a service.
|
||||
|
||||
Numbers are specified in the form `service=num` as arguments.
|
||||
For example:
|
||||
|
||||
$ fig scale web=2 worker=3
|
||||
|
||||
Usage: scale [SERVICE=NUM...]
|
||||
"""
|
||||
for s in options['SERVICE=NUM']:
|
||||
if '=' not in s:
|
||||
raise UserError('Arguments to scale should be in the form service=num')
|
||||
service_name, num = s.split('=', 1)
|
||||
try:
|
||||
num = int(num)
|
||||
except ValueError:
|
||||
raise UserError('Number of containers for service "%s" is not a number' % service)
|
||||
try:
|
||||
self.project.get_service(service_name).scale(num)
|
||||
except CannotBeScaledError:
|
||||
raise UserError('Service "%s" cannot be scaled because it specifies a port on the host. If multiple containers for this service were created, the port would clash.\n\nRemove the ":" from the port definition in fig.yml so Docker can choose a random port for each container.' % service_name)
|
||||
|
||||
|
||||
def start(self, options):
|
||||
"""
|
||||
Start existing containers.
|
||||
@@ -201,7 +257,9 @@ class TopLevelCommand(Command):
|
||||
|
||||
def stop(self, options):
|
||||
"""
|
||||
Stop running containers.
|
||||
Stop running containers without removing them.
|
||||
|
||||
They can be started again with `fig start`.
|
||||
|
||||
Usage: stop [SERVICE...]
|
||||
"""
|
||||
@@ -209,23 +267,36 @@ class TopLevelCommand(Command):
|
||||
|
||||
def up(self, options):
|
||||
"""
|
||||
Create and start containers.
|
||||
Build, (re)create, start and attach to containers for a service.
|
||||
|
||||
By default, `fig up` will aggregate the output of each container, and
|
||||
when it exits, all containers will be stopped. If you run `fig up -d`,
|
||||
it'll start the containers in the background and leave them running.
|
||||
|
||||
If there are existing containers for a service, `fig up` will stop
|
||||
and recreate them (preserving mounted volumes with volumes-from),
|
||||
so that changes in `fig.yml` are picked up.
|
||||
|
||||
Usage: up [options] [SERVICE...]
|
||||
|
||||
Options:
|
||||
-d Detached mode: Run containers in the background, print new container names
|
||||
-d Detached mode: Run containers in the background, print new
|
||||
container names
|
||||
"""
|
||||
detached = options['-d']
|
||||
|
||||
self.project.create_containers(service_names=options['SERVICE'])
|
||||
containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
|
||||
(old, new) = self.project.recreate_containers(service_names=options['SERVICE'])
|
||||
|
||||
if not detached:
|
||||
print "Attaching to", list_containers(containers)
|
||||
log_printer = LogPrinter(containers)
|
||||
to_attach = [c for (s, c) in new]
|
||||
print("Attaching to", list_containers(to_attach))
|
||||
log_printer = LogPrinter(to_attach)
|
||||
|
||||
self.project.start(service_names=options['SERVICE'])
|
||||
for (service, container) in new:
|
||||
service.start_container(container)
|
||||
|
||||
for (service, container) in old:
|
||||
container.remove()
|
||||
|
||||
if not detached:
|
||||
try:
|
||||
@@ -236,7 +307,7 @@ class TopLevelCommand(Command):
|
||||
sys.exit(0)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
|
||||
print "Gracefully stopping... (press Ctrl+C again to force)"
|
||||
print("Gracefully stopping... (press Ctrl+C again to force)")
|
||||
self.project.stop(service_names=options['SERVICE'])
|
||||
|
||||
def _attach_to_container(self, container_id, interactive, logs=False, stream=True, raw=False):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from threading import Thread
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import print_function
|
||||
# Adapted from https://github.com/benthor/remotty/blob/master/socketclient.py
|
||||
|
||||
from select import select
|
||||
@@ -85,7 +86,7 @@ class SocketClient:
|
||||
stream.flush()
|
||||
else:
|
||||
break
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
log.debug(e)
|
||||
|
||||
def send_ws(self, socket, stream):
|
||||
@@ -101,7 +102,7 @@ class SocketClient:
|
||||
else:
|
||||
try:
|
||||
socket.send(chunk)
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
if hasattr(e, 'errno') and e.errno == errno.EPIPE:
|
||||
break
|
||||
else:
|
||||
@@ -123,7 +124,7 @@ if __name__ == '__main__':
|
||||
url = sys.argv[1]
|
||||
socket = websocket.create_connection(url)
|
||||
|
||||
print "connected\r"
|
||||
print("connected\r")
|
||||
|
||||
with SocketClient(socket, interactive=True) as client:
|
||||
client.run()
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
import datetime
|
||||
import os
|
||||
import socket
|
||||
@@ -69,7 +72,7 @@ def prettydate(d):
|
||||
return '{0} hours ago'.format(s/3600)
|
||||
|
||||
|
||||
def mkdir(path, permissions=0700):
|
||||
def mkdir(path, permissions=0o700):
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
|
||||
@@ -79,33 +82,4 @@ def mkdir(path, permissions=0700):
|
||||
|
||||
|
||||
def docker_url():
|
||||
if os.environ.get('DOCKER_URL'):
|
||||
return os.environ['DOCKER_URL']
|
||||
|
||||
socket_path = '/var/run/docker.sock'
|
||||
tcp_hosts = [
|
||||
('localdocker', 4243),
|
||||
('127.0.0.1', 4243),
|
||||
]
|
||||
tcp_host = '127.0.0.1'
|
||||
tcp_port = 4243
|
||||
|
||||
if os.path.exists(socket_path):
|
||||
return 'unix://%s' % socket_path
|
||||
|
||||
for host, port in tcp_hosts:
|
||||
try:
|
||||
s = socket.create_connection((host, port), timeout=1)
|
||||
s.close()
|
||||
return 'http://%s:%s' % (host, port)
|
||||
except:
|
||||
pass
|
||||
|
||||
raise UserError("""
|
||||
Couldn't find Docker daemon - tried:
|
||||
|
||||
unix://%s
|
||||
%s
|
||||
|
||||
If it's running elsewhere, specify a url with DOCKER_URL.
|
||||
""" % (socket_path, '\n'.join('tcp://%s:%s' % h for h in tcp_hosts)))
|
||||
return os.environ.get('DOCKER_HOST')
|
||||
|
||||
0
fig/compat/__init__.py
Normal file
0
fig/compat/__init__.py
Normal file
23
fig/compat/functools.py
Normal file
23
fig/compat/functools.py
Normal file
@@ -0,0 +1,23 @@
|
||||
|
||||
# Taken from python2.7/3.3 functools
|
||||
def cmp_to_key(mycmp):
|
||||
"""Convert a cmp= function into a key= function"""
|
||||
class K(object):
|
||||
__slots__ = ['obj']
|
||||
def __init__(self, obj):
|
||||
self.obj = obj
|
||||
def __lt__(self, other):
|
||||
return mycmp(self.obj, other.obj) < 0
|
||||
def __gt__(self, other):
|
||||
return mycmp(self.obj, other.obj) > 0
|
||||
def __eq__(self, other):
|
||||
return mycmp(self.obj, other.obj) == 0
|
||||
def __le__(self, other):
|
||||
return mycmp(self.obj, other.obj) <= 0
|
||||
def __ge__(self, other):
|
||||
return mycmp(self.obj, other.obj) >= 0
|
||||
def __ne__(self, other):
|
||||
return mycmp(self.obj, other.obj) != 0
|
||||
__hash__ = None
|
||||
return K
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
|
||||
class Container(object):
|
||||
"""
|
||||
@@ -47,13 +46,20 @@ class Container(object):
|
||||
def name(self):
|
||||
return self.dictionary['Name'][1:]
|
||||
|
||||
@property
|
||||
def number(self):
|
||||
try:
|
||||
return int(self.name.split('_')[-1])
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def human_readable_ports(self):
|
||||
self.inspect_if_not_inspected()
|
||||
if not self.dictionary['NetworkSettings']['Ports']:
|
||||
return ''
|
||||
ports = []
|
||||
for private, public in self.dictionary['NetworkSettings']['Ports'].items():
|
||||
for private, public in list(self.dictionary['NetworkSettings']['Ports'].items()):
|
||||
if public:
|
||||
ports.append('%s->%s' % (public[0]['HostPort'], private))
|
||||
return ', '.join(ports)
|
||||
@@ -89,19 +95,15 @@ class Container(object):
|
||||
return self.dictionary['State']['Running']
|
||||
|
||||
def start(self, **options):
|
||||
log.info("Starting %s..." % self.name)
|
||||
return self.client.start(self.id, **options)
|
||||
|
||||
def stop(self, **options):
|
||||
log.info("Stopping %s..." % self.name)
|
||||
return self.client.stop(self.id, **options)
|
||||
|
||||
def kill(self):
|
||||
log.info("Killing %s..." % self.name)
|
||||
return self.client.kill(self.id)
|
||||
|
||||
def remove(self):
|
||||
log.info("Removing %s..." % self.name)
|
||||
return self.client.remove_container(self.id)
|
||||
|
||||
def inspect_if_not_inspected(self):
|
||||
@@ -127,6 +129,9 @@ class Container(object):
|
||||
links.append(bits[2])
|
||||
return links
|
||||
|
||||
def attach(self, *args, **kwargs):
|
||||
return self.client.attach(self.id, *args, **kwargs)
|
||||
|
||||
def attach_socket(self, **kwargs):
|
||||
return self.client.attach_socket(self.id, **kwargs)
|
||||
|
||||
|
||||
0
fig/packages/__init__.py
Normal file
0
fig/packages/__init__.py
Normal file
15
fig/packages/docker/__init__.py
Normal file
15
fig/packages/docker/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .client import Client, APIError # flake8: noqa
|
||||
7
fig/packages/docker/auth/__init__.py
Normal file
7
fig/packages/docker/auth/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from .auth import (
|
||||
INDEX_URL,
|
||||
encode_header,
|
||||
load_config,
|
||||
resolve_authconfig,
|
||||
resolve_repository_name
|
||||
) # flake8: noqa
|
||||
153
fig/packages/docker/auth/auth.py
Normal file
153
fig/packages/docker/auth/auth.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import fileinput
|
||||
import json
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from ..utils import utils
|
||||
|
||||
INDEX_URL = 'https://index.docker.io/v1/'
|
||||
DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
|
||||
|
||||
def swap_protocol(url):
|
||||
if url.startswith('http://'):
|
||||
return url.replace('http://', 'https://', 1)
|
||||
if url.startswith('https://'):
|
||||
return url.replace('https://', 'http://', 1)
|
||||
return url
|
||||
|
||||
|
||||
def expand_registry_url(hostname):
|
||||
if hostname.startswith('http:') or hostname.startswith('https:'):
|
||||
if '/' not in hostname[9:]:
|
||||
hostname = hostname + '/v1/'
|
||||
return hostname
|
||||
if utils.ping('https://' + hostname + '/v1/_ping'):
|
||||
return 'https://' + hostname + '/v1/'
|
||||
return 'http://' + hostname + '/v1/'
|
||||
|
||||
|
||||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise ValueError('Repository name cannot contain a '
|
||||
'scheme ({0})'.format(repo_name))
|
||||
parts = repo_name.split('/', 1)
|
||||
if not '.' in parts[0] and not ':' in parts[0] and parts[0] != 'localhost':
|
||||
# This is a docker index repo (ex: foo/bar or ubuntu)
|
||||
return INDEX_URL, repo_name
|
||||
if len(parts) < 2:
|
||||
raise ValueError('Invalid repository name ({0})'.format(repo_name))
|
||||
|
||||
if 'index.docker.io' in parts[0]:
|
||||
raise ValueError('Invalid repository name,'
|
||||
'try "{0}" instead'.format(parts[1]))
|
||||
|
||||
return expand_registry_url(parts[0]), parts[1]
|
||||
|
||||
|
||||
def resolve_authconfig(authconfig, registry=None):
|
||||
"""Return the authentication data from the given auth configuration for a
|
||||
specific registry. We'll do our best to infer the correct URL for the
|
||||
registry, trying both http and https schemes. Returns an empty dictionnary
|
||||
if no data exists."""
|
||||
# Default to the public index server
|
||||
registry = registry or INDEX_URL
|
||||
|
||||
# Ff its not the index server there are three cases:
|
||||
#
|
||||
# 1. this is a full config url -> it should be used as is
|
||||
# 2. it could be a full url, but with the wrong protocol
|
||||
# 3. it can be the hostname optionally with a port
|
||||
#
|
||||
# as there is only one auth entry which is fully qualified we need to start
|
||||
# parsing and matching
|
||||
if '/' not in registry:
|
||||
registry = registry + '/v1/'
|
||||
if not registry.startswith('http:') and not registry.startswith('https:'):
|
||||
registry = 'https://' + registry
|
||||
|
||||
if registry in authconfig:
|
||||
return authconfig[registry]
|
||||
return authconfig.get(swap_protocol(registry), None)
|
||||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, six.string_types):
|
||||
auth = auth.encode('ascii')
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b':')
|
||||
return login.decode('ascii'), pwd.decode('ascii')
|
||||
|
||||
|
||||
def encode_header(auth):
|
||||
auth_json = json.dumps(auth).encode('ascii')
|
||||
return base64.b64encode(auth_json)
|
||||
|
||||
|
||||
def load_config(root=None):
|
||||
"""Loads authentication data from a Docker configuration file in the given
|
||||
root directory."""
|
||||
conf = {}
|
||||
data = None
|
||||
|
||||
config_file = os.path.join(root or os.environ.get('HOME', '.'),
|
||||
DOCKER_CONFIG_FILENAME)
|
||||
|
||||
# First try as JSON
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
conf = {}
|
||||
for registry, entry in six.iteritems(json.load(f)):
|
||||
username, password = decode_auth(entry['auth'])
|
||||
conf[registry] = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': entry['email'],
|
||||
'serveraddress': registry,
|
||||
}
|
||||
return conf
|
||||
except:
|
||||
pass
|
||||
|
||||
# If that fails, we assume the configuration file contains a single
|
||||
# authentication token for the public registry in the following format:
|
||||
#
|
||||
# auth = AUTH_TOKEN
|
||||
# email = email@domain.com
|
||||
try:
|
||||
data = []
|
||||
for line in fileinput.input(config_file):
|
||||
data.append(line.strip().split(' = ')[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise Exception('Invalid or empty configuration file!')
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
conf[INDEX_URL] = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': data[1],
|
||||
'serveraddress': INDEX_URL,
|
||||
}
|
||||
return conf
|
||||
except:
|
||||
pass
|
||||
|
||||
# If all fails, return an empty config
|
||||
return {}
|
||||
746
fig/packages/docker/client.py
Normal file
746
fig/packages/docker/client.py
Normal file
@@ -0,0 +1,746 @@
|
||||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import re
|
||||
import shlex
|
||||
import struct
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
import six
|
||||
|
||||
from .auth import auth
|
||||
from .unixconn import unixconn
|
||||
from .utils import utils
|
||||
|
||||
if not six.PY3:
|
||||
import websocket
|
||||
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError):
|
||||
def __init__(self, message, response, explanation=None):
|
||||
super(APIError, self).__init__(message, response=response)
|
||||
|
||||
self.explanation = explanation
|
||||
|
||||
if self.explanation is None and response.content:
|
||||
self.explanation = response.content.strip()
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '%s Client Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '%s Server Error: %s' % (
|
||||
self.response.status_code, self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '%s ("%s")' % (message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
def is_client_error(self):
|
||||
return 400 <= self.response.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
return 500 <= self.response.status_code < 600
|
||||
|
||||
|
||||
class Client(requests.Session):
|
||||
def __init__(self, base_url=None, version="1.6",
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS):
|
||||
super(Client, self).__init__()
|
||||
if base_url is None:
|
||||
base_url = "unix://var/run/docker.sock"
|
||||
if base_url.startswith('unix:///'):
|
||||
base_url = base_url.replace('unix:/', 'unix:')
|
||||
if base_url.startswith('tcp:'):
|
||||
base_url = base_url.replace('tcp:', 'http:')
|
||||
if base_url.endswith('/'):
|
||||
base_url = base_url[:-1]
|
||||
self.base_url = base_url
|
||||
self._version = version
|
||||
self._timeout = timeout
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
self.mount('unix://', unixconn.UnixAdapter(base_url, timeout))
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self._timeout)
|
||||
return kwargs
|
||||
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, path):
|
||||
return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
|
||||
|
||||
def _raise_for_status(self, response, explanation=None):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise APIError(e, response, explanation=explanation)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _container_config(self, image, command, hostname=None, user=None,
|
||||
detach=False, stdin_open=False, tty=False,
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False):
|
||||
if isinstance(command, six.string_types):
|
||||
command = shlex.split(str(command))
|
||||
if isinstance(environment, dict):
|
||||
environment = [
|
||||
'{0}={1}'.format(k, v) for k, v in environment.items()
|
||||
]
|
||||
|
||||
if ports and isinstance(ports, list):
|
||||
exposed_ports = {}
|
||||
for port_definition in ports:
|
||||
port = port_definition
|
||||
proto = None
|
||||
if isinstance(port_definition, tuple):
|
||||
if len(port_definition) == 2:
|
||||
proto = port_definition[1]
|
||||
port = port_definition[0]
|
||||
exposed_ports['{0}{1}'.format(
|
||||
port,
|
||||
'/' + proto if proto else ''
|
||||
)] = {}
|
||||
ports = exposed_ports
|
||||
|
||||
if volumes and isinstance(volumes, list):
|
||||
volumes_dict = {}
|
||||
for vol in volumes:
|
||||
volumes_dict[vol] = {}
|
||||
volumes = volumes_dict
|
||||
|
||||
attach_stdin = False
|
||||
attach_stdout = False
|
||||
attach_stderr = False
|
||||
|
||||
if not detach:
|
||||
attach_stdout = True
|
||||
attach_stderr = True
|
||||
|
||||
if stdin_open:
|
||||
attach_stdin = True
|
||||
|
||||
return {
|
||||
'Hostname': hostname,
|
||||
'ExposedPorts': ports,
|
||||
'User': user,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'Memory': mem_limit,
|
||||
'AttachStdin': attach_stdin,
|
||||
'AttachStdout': attach_stdout,
|
||||
'AttachStderr': attach_stderr,
|
||||
'Env': environment,
|
||||
'Cmd': command,
|
||||
'Dns': dns,
|
||||
'Image': image,
|
||||
'Volumes': volumes,
|
||||
'VolumesFrom': volumes_from,
|
||||
'NetworkDisabled': network_disabled
|
||||
}
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 can't unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None:
|
||||
for k, v in six.iteritems(data):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
def _attach_websocket(self, container, params=None):
|
||||
if six.PY3:
|
||||
raise NotImplementedError("This method is not currently supported "
|
||||
"under python 3")
|
||||
url = self._url("/containers/{0}/attach/ws".format(container))
|
||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||
full_url = req.prepare().url
|
||||
full_url = full_url.replace("http://", "ws://", 1)
|
||||
full_url = full_url.replace("https://", "wss://", 1)
|
||||
return self._create_websocket_connection(full_url)
|
||||
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
|
||||
def _stream_result(self, response):
|
||||
"""Generator for straight-out, non chunked-encoded HTTP responses."""
|
||||
self._raise_for_status(response)
|
||||
for line in response.iter_lines(chunk_size=1):
|
||||
# filter out keep-alive new lines
|
||||
if line:
|
||||
yield line + '\n'
|
||||
|
||||
def _stream_result_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
return response.raw._fp.fp._sock
|
||||
|
||||
def _stream_helper(self, response):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
socket_fp = self._stream_result_socket(response)
|
||||
socket_fp.setblocking(1)
|
||||
socket = socket_fp.makefile()
|
||||
while True:
|
||||
size = int(socket.readline(), 16)
|
||||
if size <= 0:
|
||||
break
|
||||
data = socket.readline()
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
walker = 0
|
||||
while True:
|
||||
if len(buf[walker:]) < 8:
|
||||
break
|
||||
_, length = struct.unpack_from('>BxxxL', buf[walker:])
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield str(buf[start:end])
|
||||
|
||||
def _multiplexed_socket_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
socket."""
|
||||
socket = self._stream_result_socket(response)
|
||||
|
||||
def recvall(socket, size):
|
||||
data = ''
|
||||
while size > 0:
|
||||
block = socket.recv(size)
|
||||
if not block:
|
||||
return None
|
||||
|
||||
data += block
|
||||
size -= len(block)
|
||||
return data
|
||||
|
||||
while True:
|
||||
socket.settimeout(None)
|
||||
header = recvall(socket, STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
_, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
break
|
||||
data = recvall(socket, length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def attach(self, container, stdout=True, stderr=True,
|
||||
stream=False, logs=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
params = {
|
||||
'logs': logs and 1 or 0,
|
||||
'stdout': stdout and 1 or 0,
|
||||
'stderr': stderr and 1 or 0,
|
||||
'stream': stream and 1 or 0,
|
||||
}
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
response = self._post(u, params=params, stream=stream)
|
||||
|
||||
# Stream multi-plexing was introduced in API v1.6.
|
||||
if utils.compare_version('1.6', self._version) < 0:
|
||||
return stream and self._stream_result(response) or \
|
||||
self._result(response, binary=True)
|
||||
|
||||
return stream and self._multiplexed_socket_stream_helper(response) or \
|
||||
''.join([x for x in self._multiplexed_buffer_helper(response)])
|
||||
|
||||
def attach_socket(self, container, params=None, ws=False):
|
||||
if params is None:
|
||||
params = {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
if ws:
|
||||
return self._attach_websocket(container, params)
|
||||
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
u = self._url("/containers/{0}/attach".format(container))
|
||||
return self._stream_result_socket(self.post(
|
||||
u, None, params=self._attach_params(params), stream=True))
|
||||
|
||||
def build(self, path=None, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, stream=False, timeout=None):
|
||||
remote = context = headers = None
|
||||
if path is None and fileobj is None:
|
||||
raise Exception("Either path or fileobj needs to be provided.")
|
||||
|
||||
if fileobj is not None:
|
||||
context = utils.mkbuildcontext(fileobj)
|
||||
elif path.startswith(('http://', 'https://', 'git://', 'github.com/')):
|
||||
remote = path
|
||||
else:
|
||||
context = utils.tar(path)
|
||||
|
||||
u = self._url('/build')
|
||||
params = {
|
||||
't': tag,
|
||||
'remote': remote,
|
||||
'q': quiet,
|
||||
'nocache': nocache,
|
||||
'rm': rm
|
||||
}
|
||||
if context is not None:
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
|
||||
response = self._post(
|
||||
u,
|
||||
data=context,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=stream,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if context is not None:
|
||||
context.close()
|
||||
if stream:
|
||||
return self._stream_result(response)
|
||||
else:
|
||||
output = self._result(response)
|
||||
srch = r'Successfully built ([0-9a-f]+)'
|
||||
match = re.search(srch, output)
|
||||
if not match:
|
||||
return None, output
|
||||
return match.group(1), output
|
||||
|
||||
def commit(self, container, repository=None, tag=None, message=None,
|
||||
author=None, conf=None):
|
||||
params = {
|
||||
'container': container,
|
||||
'repo': repository,
|
||||
'tag': tag,
|
||||
'comment': message,
|
||||
'author': author
|
||||
}
|
||||
u = self._url("/commit")
|
||||
return self._result(self._post_json(u, data=conf, params=params),
|
||||
json=True)
|
||||
|
||||
def containers(self, quiet=False, all=False, trunc=True, latest=False,
|
||||
since=None, before=None, limit=-1):
|
||||
params = {
|
||||
'limit': 1 if latest else limit,
|
||||
'all': 1 if all else 0,
|
||||
'trunc_cmd': 1 if trunc else 0,
|
||||
'since': since,
|
||||
'before': before
|
||||
}
|
||||
u = self._url("/containers/json")
|
||||
res = self._result(self._get(u, params=params), True)
|
||||
|
||||
if quiet:
|
||||
return [{'Id': x['Id']} for x in res]
|
||||
return res
|
||||
|
||||
def copy(self, container, resource):
|
||||
res = self._post_json(
|
||||
self._url("/containers/{0}/copy".format(container)),
|
||||
data={"Resource": resource},
|
||||
stream=True
|
||||
)
|
||||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
def create_container(self, image, command=None, hostname=None, user=None,
|
||||
detach=False, stdin_open=False, tty=False,
|
||||
mem_limit=0, ports=None, environment=None, dns=None,
|
||||
volumes=None, volumes_from=None,
|
||||
network_disabled=False, name=None):
|
||||
|
||||
config = self._container_config(
|
||||
image, command, hostname, user, detach, stdin_open, tty, mem_limit,
|
||||
ports, environment, dns, volumes, volumes_from, network_disabled
|
||||
)
|
||||
return self.create_container_from_config(config, name)
|
||||
|
||||
def create_container_from_config(self, config, name=None):
|
||||
u = self._url("/containers/create")
|
||||
params = {
|
||||
'name': name
|
||||
}
|
||||
res = self._post_json(u, data=config, params=params)
|
||||
return self._result(res, True)
|
||||
|
||||
def diff(self, container):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
return self._result(self._get(self._url("/containers/{0}/changes".
|
||||
format(container))), True)
|
||||
|
||||
def events(self):
|
||||
u = self._url("/events")
|
||||
|
||||
socket = self._stream_result_socket(self.get(u, stream=True))
|
||||
|
||||
while True:
|
||||
chunk = socket.recv(4096)
|
||||
if chunk:
|
||||
# Messages come in the format of length, data, newline.
|
||||
length, data = chunk.split("\n", 1)
|
||||
length = int(length, 16)
|
||||
if length > len(data):
|
||||
data += socket.recv(length - len(data))
|
||||
yield json.loads(data)
|
||||
else:
|
||||
break
|
||||
|
||||
def export(self, container):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
res = self._get(self._url("/containers/{0}/export".format(container)),
|
||||
stream=True)
|
||||
self._raise_for_status(res)
|
||||
return res.raw
|
||||
|
||||
def history(self, image):
|
||||
res = self._get(self._url("/images/{0}/history".format(image)))
|
||||
self._raise_for_status(res)
|
||||
return self._result(res)
|
||||
|
||||
def images(self, name=None, quiet=False, all=False, viz=False):
|
||||
if viz:
|
||||
return self._result(self._get(self._url("images/viz")))
|
||||
params = {
|
||||
'filter': name,
|
||||
'only_ids': 1 if quiet else 0,
|
||||
'all': 1 if all else 0,
|
||||
}
|
||||
res = self._result(self._get(self._url("/images/json"), params=params),
|
||||
True)
|
||||
if quiet:
|
||||
return [x['Id'] for x in res]
|
||||
return res
|
||||
|
||||
def import_image(self, src, data=None, repository=None, tag=None):
|
||||
u = self._url("/images/create")
|
||||
params = {
|
||||
'repo': repository,
|
||||
'tag': tag
|
||||
}
|
||||
try:
|
||||
# XXX: this is ways not optimal but the only way
|
||||
# for now to import tarballs through the API
|
||||
fic = open(src)
|
||||
data = fic.read()
|
||||
fic.close()
|
||||
src = "-"
|
||||
except IOError:
|
||||
# file does not exists or not a file (URL)
|
||||
data = None
|
||||
if isinstance(src, six.string_types):
|
||||
params['fromSrc'] = src
|
||||
return self._result(self._post(u, data=data, params=params))
|
||||
|
||||
return self._result(self._post(u, data=src, params=params))
|
||||
|
||||
def info(self):
|
||||
return self._result(self._get(self._url("/info")),
|
||||
True)
|
||||
|
||||
def insert(self, image, url, path):
|
||||
api_url = self._url("/images/" + image + "/insert")
|
||||
params = {
|
||||
'url': url,
|
||||
'path': path
|
||||
}
|
||||
return self._result(self._post(api_url, params=params))
|
||||
|
||||
def inspect_container(self, container):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
return self._result(
|
||||
self._get(self._url("/containers/{0}/json".format(container))),
|
||||
True)
|
||||
|
||||
def inspect_image(self, image_id):
|
||||
return self._result(
|
||||
self._get(self._url("/images/{0}/json".format(image_id))),
|
||||
True
|
||||
)
|
||||
|
||||
def kill(self, container, signal=None):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
url = self._url("/containers/{0}/kill".format(container))
|
||||
params = {}
|
||||
if signal is not None:
|
||||
params['signal'] = signal
|
||||
res = self._post(url, params=params)
|
||||
|
||||
self._raise_for_status(res)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False):
|
||||
# If we don't have any auth data so far, try reloading the config file
|
||||
# one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
|
||||
registry = registry or auth.INDEX_URL
|
||||
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
# If we found an existing auth config for this registry and username
|
||||
# combination, we can return it immediately unless reauth is requested.
|
||||
if authcfg and authcfg.get('username', None) == username \
|
||||
and not reauth:
|
||||
return authcfg
|
||||
|
||||
req_data = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': email,
|
||||
'serveraddress': registry,
|
||||
}
|
||||
|
||||
response = self._post_json(self._url('/auth'), data=req_data)
|
||||
if response.status_code == 200:
|
||||
self._auth_configs[registry] = req_data
|
||||
return self._result(response, json=True)
|
||||
|
||||
def logs(self, container, stdout=True, stderr=True, stream=False):
|
||||
return self.attach(
|
||||
container,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
stream=stream,
|
||||
logs=True
|
||||
)
|
||||
|
||||
def port(self, container, private_port):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
res = self._get(self._url("/containers/{0}/json".format(container)))
|
||||
self._raise_for_status(res)
|
||||
json_ = res.json()
|
||||
s_port = str(private_port)
|
||||
f_port = None
|
||||
if s_port in json_['NetworkSettings']['PortMapping']['Udp']:
|
||||
f_port = json_['NetworkSettings']['PortMapping']['Udp'][s_port]
|
||||
elif s_port in json_['NetworkSettings']['PortMapping']['Tcp']:
|
||||
f_port = json_['NetworkSettings']['PortMapping']['Tcp'][s_port]
|
||||
|
||||
return f_port
|
||||
|
||||
def pull(self, repository, tag=None, stream=False):
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
if repo_name.count(":") == 1:
|
||||
repository, tag = repository.rsplit(":", 1)
|
||||
|
||||
params = {
|
||||
'tag': tag,
|
||||
'fromImage': repository
|
||||
}
|
||||
headers = {}
|
||||
|
||||
if utils.compare_version('1.5', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
|
||||
|
||||
response = self._post(self._url('/images/create'), params=params,
|
||||
headers=headers, stream=stream, timeout=None)
|
||||
|
||||
if stream:
|
||||
return self._stream_helper(response)
|
||||
else:
|
||||
return self._result(response)
|
||||
|
||||
def push(self, repository, stream=False):
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
u = self._url("/images/{0}/push".format(repository))
|
||||
headers = {}
|
||||
|
||||
if utils.compare_version('1.5', self._version) >= 0:
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs:
|
||||
self._auth_configs = auth.load_config()
|
||||
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
|
||||
|
||||
# Do not fail here if no atuhentication exists for this specific
|
||||
# registry as we can have a readonly pull. Just put the header if
|
||||
# we can.
|
||||
if authcfg:
|
||||
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
|
||||
|
||||
response = self._post_json(u, None, headers=headers, stream=stream)
|
||||
else:
|
||||
response = self._post_json(u, authcfg, stream=stream)
|
||||
|
||||
return stream and self._stream_helper(response) \
|
||||
or self._result(response)
|
||||
|
||||
def remove_container(self, container, v=False, link=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
params = {'v': v, 'link': link}
|
||||
res = self._delete(self._url("/containers/" + container),
|
||||
params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def remove_image(self, image):
|
||||
res = self._delete(self._url("/images/" + image))
|
||||
self._raise_for_status(res)
|
||||
|
||||
def restart(self, container, timeout=10):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
params = {'t': timeout}
|
||||
url = self._url("/containers/{0}/restart".format(container))
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def search(self, term):
|
||||
return self._result(self._get(self._url("/images/search"),
|
||||
params={'term': term}),
|
||||
True)
|
||||
|
||||
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
|
||||
publish_all_ports=False, links=None, privileged=False):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
|
||||
if isinstance(lxc_conf, dict):
|
||||
formatted = []
|
||||
for k, v in six.iteritems(lxc_conf):
|
||||
formatted.append({'Key': k, 'Value': str(v)})
|
||||
lxc_conf = formatted
|
||||
|
||||
start_config = {
|
||||
'LxcConf': lxc_conf
|
||||
}
|
||||
if binds:
|
||||
bind_pairs = [
|
||||
'{0}:{1}'.format(host, dest) for host, dest in binds.items()
|
||||
]
|
||||
start_config['Binds'] = bind_pairs
|
||||
|
||||
if port_bindings:
|
||||
start_config['PortBindings'] = utils.convert_port_bindings(
|
||||
port_bindings
|
||||
)
|
||||
|
||||
start_config['PublishAllPorts'] = publish_all_ports
|
||||
|
||||
if links:
|
||||
formatted_links = [
|
||||
'{0}:{1}'.format(k, v) for k, v in sorted(six.iteritems(links))
|
||||
]
|
||||
|
||||
start_config['Links'] = formatted_links
|
||||
|
||||
start_config['Privileged'] = privileged
|
||||
|
||||
url = self._url("/containers/{0}/start".format(container))
|
||||
res = self._post_json(url, data=start_config)
|
||||
self._raise_for_status(res)
|
||||
|
||||
def stop(self, container, timeout=10):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
params = {'t': timeout}
|
||||
url = self._url("/containers/{0}/stop".format(container))
|
||||
res = self._post(url, params=params,
|
||||
timeout=max(timeout, self._timeout))
|
||||
self._raise_for_status(res)
|
||||
|
||||
def tag(self, image, repository, tag=None, force=False):
|
||||
params = {
|
||||
'tag': tag,
|
||||
'repo': repository,
|
||||
'force': 1 if force else 0
|
||||
}
|
||||
url = self._url("/images/{0}/tag".format(image))
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
return res.status_code == 201
|
||||
|
||||
def top(self, container):
|
||||
u = self._url("/containers/{0}/top".format(container))
|
||||
return self._result(self._get(u), True)
|
||||
|
||||
def version(self):
|
||||
return self._result(self._get(self._url("/version")), True)
|
||||
|
||||
def wait(self, container):
|
||||
if isinstance(container, dict):
|
||||
container = container.get('Id')
|
||||
url = self._url("/containers/{0}/wait".format(container))
|
||||
res = self._post(url, timeout=None)
|
||||
self._raise_for_status(res)
|
||||
json_ = res.json()
|
||||
if 'StatusCode' in json_:
|
||||
return json_['StatusCode']
|
||||
return -1
|
||||
1
fig/packages/docker/unixconn/__init__.py
Normal file
1
fig/packages/docker/unixconn/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .unixconn import UnixAdapter # flake8: noqa
|
||||
71
fig/packages/docker/unixconn/unixconn.py
Normal file
71
fig/packages/docker/unixconn/unixconn.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import six
|
||||
|
||||
if six.PY3:
|
||||
import http.client as httplib
|
||||
else:
|
||||
import httplib
|
||||
import requests.adapters
|
||||
import socket
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3.connectionpool as connectionpool
|
||||
except ImportError:
|
||||
import urllib3.connectionpool as connectionpool
|
||||
|
||||
|
||||
class UnixHTTPConnection(httplib.HTTPConnection, object):
|
||||
def __init__(self, base_url, unix_socket, timeout=60):
|
||||
httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.base_url.replace("unix:/", ""))
|
||||
self.sock = sock
|
||||
|
||||
def _extract_path(self, url):
|
||||
#remove the base_url entirely..
|
||||
return url.replace(self.base_url, "")
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
url = self._extract_path(self.unix_socket)
|
||||
super(UnixHTTPConnection, self).request(method, url, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, base_url, socket_path, timeout=60):
|
||||
connectionpool.HTTPConnectionPool.__init__(self, 'localhost',
|
||||
timeout=timeout)
|
||||
self.base_url = base_url
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self):
|
||||
return UnixHTTPConnection(self.base_url, self.socket_path,
|
||||
self.timeout)
|
||||
|
||||
|
||||
class UnixAdapter(requests.adapters.HTTPAdapter):
|
||||
def __init__(self, base_url, timeout=60):
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
super(UnixAdapter, self).__init__()
|
||||
|
||||
def get_connection(self, socket_path, proxies=None):
|
||||
return UnixHTTPConnectionPool(self.base_url, socket_path, self.timeout)
|
||||
3
fig/packages/docker/utils/__init__.py
Normal file
3
fig/packages/docker/utils/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .utils import (
|
||||
compare_version, convert_port_bindings, mkbuildcontext, ping, tar
|
||||
) # flake8: noqa
|
||||
96
fig/packages/docker/utils/utils.py
Normal file
96
fig/packages/docker/utils/utils.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright 2013 dotCloud inc.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import io
|
||||
import tarfile
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
import six
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w', fileobj=f)
|
||||
if isinstance(dockerfile, io.StringIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
if six.PY3:
|
||||
raise TypeError('Please use io.BytesIO to create in-memory '
|
||||
'Dockerfiles with Python 3')
|
||||
else:
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
elif isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
else:
|
||||
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
|
||||
t.addfile(dfinfo, dockerfile)
|
||||
t.close()
|
||||
f.seek(0)
|
||||
return f
|
||||
|
||||
|
||||
def tar(path):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w', fileobj=f)
|
||||
t.add(path, arcname='.')
|
||||
t.close()
|
||||
f.seek(0)
|
||||
return f
|
||||
|
||||
|
||||
def compare_version(v1, v2):
|
||||
return float(v2) - float(v1)
|
||||
|
||||
|
||||
def ping(url):
|
||||
try:
|
||||
res = requests.get(url)
|
||||
return res.status >= 400
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _convert_port_binding(binding):
|
||||
result = {'HostIp': '', 'HostPort': ''}
|
||||
if isinstance(binding, tuple):
|
||||
if len(binding) == 2:
|
||||
result['HostPort'] = binding[1]
|
||||
result['HostIp'] = binding[0]
|
||||
elif isinstance(binding[0], six.string_types):
|
||||
result['HostIp'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding
|
||||
|
||||
if result['HostPort'] is None:
|
||||
result['HostPort'] = ''
|
||||
else:
|
||||
result['HostPort'] = str(result['HostPort'])
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def convert_port_bindings(port_bindings):
|
||||
result = {}
|
||||
for k, v in six.iteritems(port_bindings):
|
||||
key = str(k)
|
||||
if '/' not in key:
|
||||
key = key + '/tcp'
|
||||
if isinstance(v, list):
|
||||
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
@@ -1,4 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
from .service import Service
|
||||
from .compat.functools import cmp_to_key
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def sort_service_dicts(services):
|
||||
# Sort in dependency order
|
||||
@@ -10,7 +16,7 @@ def sort_service_dicts(services):
|
||||
elif y_deps_x and not x_deps_y:
|
||||
return -1
|
||||
return 0
|
||||
return sorted(services, cmp=cmp)
|
||||
return sorted(services, key=cmp_to_key(cmp))
|
||||
|
||||
class Project(object):
|
||||
"""
|
||||
@@ -40,7 +46,7 @@ class Project(object):
|
||||
@classmethod
|
||||
def from_config(cls, name, config, client):
|
||||
dicts = []
|
||||
for service_name, service in config.items():
|
||||
for service_name, service in list(config.items()):
|
||||
service['name'] = service_name
|
||||
dicts.append(service)
|
||||
return cls.from_dicts(name, dicts, client)
|
||||
@@ -73,13 +79,22 @@ class Project(object):
|
||||
unsorted = [self.get_service(name) for name in service_names]
|
||||
return [s for s in self.services if s in unsorted]
|
||||
|
||||
def create_containers(self, service_names=None):
|
||||
def recreate_containers(self, service_names=None):
|
||||
"""
|
||||
For each service, creates a container if there are none.
|
||||
For each service, create or recreate their containers.
|
||||
Returns a tuple with two lists. The first is a list of
|
||||
(service, old_container) tuples; the second is a list
|
||||
of (service, new_container) tuples.
|
||||
"""
|
||||
old = []
|
||||
new = []
|
||||
|
||||
for service in self.get_services(service_names):
|
||||
if len(service.containers(stopped=True)) == 0:
|
||||
service.create_container()
|
||||
(s_old, s_new) = service.recreate_containers()
|
||||
old += [(service, container) for container in s_old]
|
||||
new += [(service, container) for container in s_new]
|
||||
|
||||
return (old, new)
|
||||
|
||||
def start(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
@@ -98,7 +113,7 @@ class Project(object):
|
||||
if service.can_be_built():
|
||||
service.build(**options)
|
||||
else:
|
||||
log.info('%s uses an image, skipping')
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
def remove_stopped(self, service_names=None, **options):
|
||||
for service in self.get_services(service_names):
|
||||
|
||||
130
fig/service.py
130
fig/service.py
@@ -1,4 +1,6 @@
|
||||
from docker.client import APIError
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from .packages.docker.client import APIError
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
@@ -12,6 +14,10 @@ class BuildError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CannotBeScaledError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Service(object):
|
||||
def __init__(self, name, client=None, project='default', links=[], **options):
|
||||
if not re.match('^[a-zA-Z0-9]+$', name):
|
||||
@@ -41,19 +47,57 @@ class Service(object):
|
||||
def start(self, **options):
|
||||
for c in self.containers(stopped=True):
|
||||
if not c.is_running:
|
||||
log.info("Starting %s..." % c.name)
|
||||
self.start_container(c, **options)
|
||||
|
||||
def stop(self, **options):
|
||||
for c in self.containers():
|
||||
log.info("Stopping %s..." % c.name)
|
||||
c.stop(**options)
|
||||
|
||||
def kill(self, **options):
|
||||
for c in self.containers():
|
||||
log.info("Killing %s..." % c.name)
|
||||
c.kill(**options)
|
||||
|
||||
def scale(self, desired_num):
|
||||
if not self.can_be_scaled():
|
||||
raise CannotBeScaledError()
|
||||
|
||||
# Create enough containers
|
||||
containers = self.containers(stopped=True)
|
||||
while len(containers) < desired_num:
|
||||
containers.append(self.create_container())
|
||||
|
||||
running_containers = []
|
||||
stopped_containers = []
|
||||
for c in containers:
|
||||
if c.is_running:
|
||||
running_containers.append(c)
|
||||
else:
|
||||
stopped_containers.append(c)
|
||||
running_containers.sort(key=lambda c: c.number)
|
||||
stopped_containers.sort(key=lambda c: c.number)
|
||||
|
||||
# Stop containers
|
||||
while len(running_containers) > desired_num:
|
||||
c = running_containers.pop()
|
||||
log.info("Stopping %s..." % c.name)
|
||||
c.stop(timeout=1)
|
||||
stopped_containers.append(c)
|
||||
|
||||
# Start containers
|
||||
while len(running_containers) < desired_num:
|
||||
c = stopped_containers.pop(0)
|
||||
log.info("Starting %s..." % c.name)
|
||||
c.start()
|
||||
running_containers.append(c)
|
||||
|
||||
|
||||
def remove_stopped(self, **options):
|
||||
for c in self.containers(stopped=True):
|
||||
if not c.is_running:
|
||||
log.info("Removing %s..." % c.name)
|
||||
c.remove(**options)
|
||||
|
||||
def create_container(self, one_off=False, **override_options):
|
||||
@@ -64,13 +108,55 @@ class Service(object):
|
||||
container_options = self._get_container_options(override_options, one_off=one_off)
|
||||
try:
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError, e:
|
||||
if e.response.status_code == 404 and e.explanation and 'No such image' in e.explanation:
|
||||
except APIError as e:
|
||||
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
|
||||
log.info('Pulling image %s...' % container_options['image'])
|
||||
self.client.pull(container_options['image'])
|
||||
return Container.create(self.client, **container_options)
|
||||
raise
|
||||
|
||||
def recreate_containers(self, **override_options):
|
||||
"""
|
||||
If a container for this service doesn't exist, create one. If there are
|
||||
any, stop them and create new ones. Does not remove the old containers.
|
||||
"""
|
||||
containers = self.containers(stopped=True)
|
||||
|
||||
if len(containers) == 0:
|
||||
log.info("Creating %s..." % self.next_container_name())
|
||||
return ([], [self.create_container(**override_options)])
|
||||
else:
|
||||
old_containers = []
|
||||
new_containers = []
|
||||
|
||||
for c in containers:
|
||||
log.info("Recreating %s..." % c.name)
|
||||
(old_container, new_container) = self.recreate_container(c, **override_options)
|
||||
old_containers.append(old_container)
|
||||
new_containers.append(new_container)
|
||||
|
||||
return (old_containers, new_containers)
|
||||
|
||||
def recreate_container(self, container, **override_options):
|
||||
if container.is_running:
|
||||
container.stop(timeout=1)
|
||||
|
||||
intermediate_container = Container.create(
|
||||
self.client,
|
||||
image='ubuntu',
|
||||
command='echo',
|
||||
volumes_from=container.id,
|
||||
)
|
||||
intermediate_container.start()
|
||||
intermediate_container.wait()
|
||||
container.remove()
|
||||
|
||||
options = dict(override_options)
|
||||
options['volumes_from'] = intermediate_container.id
|
||||
new_container = self.create_container(**options)
|
||||
|
||||
return (intermediate_container, new_container)
|
||||
|
||||
def start_container(self, container=None, **override_options):
|
||||
if container is None:
|
||||
container = self.create_container(**override_options)
|
||||
@@ -82,9 +168,9 @@ class Service(object):
|
||||
|
||||
if options.get('ports', None) is not None:
|
||||
for port in options['ports']:
|
||||
port = unicode(port)
|
||||
port = str(port)
|
||||
if ':' in port:
|
||||
internal_port, external_port = port.split(':', 1)
|
||||
external_port, internal_port = port.split(':', 1)
|
||||
port_bindings[int(internal_port)] = int(external_port)
|
||||
else:
|
||||
port_bindings[int(port)] = None
|
||||
@@ -93,8 +179,9 @@ class Service(object):
|
||||
|
||||
if options.get('volumes', None) is not None:
|
||||
for volume in options['volumes']:
|
||||
external_dir, internal_dir = volume.split(':')
|
||||
volume_bindings[os.path.abspath(external_dir)] = internal_dir
|
||||
if ':' in volume:
|
||||
external_dir, internal_dir = volume.split(':')
|
||||
volume_bindings[os.path.abspath(external_dir)] = internal_dir
|
||||
|
||||
container.start(
|
||||
links=self._get_links(),
|
||||
@@ -107,7 +194,7 @@ class Service(object):
|
||||
bits = [self.project, self.name]
|
||||
if one_off:
|
||||
bits.append('run')
|
||||
return '_'.join(bits + [unicode(self.next_container_number(one_off=one_off))])
|
||||
return '_'.join(bits + [str(self.next_container_number(one_off=one_off))])
|
||||
|
||||
def next_container_number(self, one_off=False):
|
||||
numbers = [parse_name(c.name)[2] for c in self.containers(stopped=True, one_off=one_off)]
|
||||
@@ -132,10 +219,16 @@ class Service(object):
|
||||
container_options['name'] = self.next_container_name(one_off)
|
||||
|
||||
if 'ports' in container_options:
|
||||
container_options['ports'] = [unicode(p).split(':')[0] for p in container_options['ports']]
|
||||
ports = []
|
||||
for port in container_options['ports']:
|
||||
port = str(port)
|
||||
if ':' in port:
|
||||
port = port.split(':')[-1]
|
||||
ports.append(port)
|
||||
container_options['ports'] = ports
|
||||
|
||||
if 'volumes' in container_options:
|
||||
container_options['volumes'] = dict((v.split(':')[1], {}) for v in container_options['volumes'])
|
||||
container_options['volumes'] = dict((split_volume(v)[1], {}) for v in container_options['volumes'])
|
||||
|
||||
if self.can_be_built():
|
||||
if len(self.client.images(name=self._build_tag_name())) == 0:
|
||||
@@ -176,6 +269,12 @@ class Service(object):
|
||||
"""
|
||||
return '%s_%s' % (self.project, self.name)
|
||||
|
||||
def can_be_scaled(self):
|
||||
for port in self.options.get('ports', []):
|
||||
if ':' in str(port):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
|
||||
|
||||
@@ -206,3 +305,14 @@ def get_container_name(container):
|
||||
for name in container['Names']:
|
||||
if len(name.split('/')) == 2:
|
||||
return name[1:]
|
||||
|
||||
|
||||
def split_volume(v):
|
||||
"""
|
||||
If v is of the format EXTERNAL:INTERNAL, returns (EXTERNAL, INTERNAL).
|
||||
If v is of the format INTERNAL, returns (None, INTERNAL).
|
||||
"""
|
||||
if ':' in v:
|
||||
return v.split(':', 1)
|
||||
else:
|
||||
return (None, v)
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
nose
|
||||
nose==1.3.0
|
||||
unittest2==0.5.1
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
docker-py==0.2.3
|
||||
requests==1.2.3
|
||||
websocket-client==0.11.0
|
||||
docopt==0.6.1
|
||||
PyYAML==3.10
|
||||
texttable==0.8.1
|
||||
# docker requires six==1.3.0
|
||||
six==1.3.0
|
||||
|
||||
20
script/travis
Executable file
20
script/travis
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on first error
|
||||
set -ex
|
||||
|
||||
export PYTHON_EGG_CACHE="/tmp/.python-eggs"
|
||||
|
||||
TRAVIS_PYTHON_VERSION=$1
|
||||
source /home/travis/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/activate
|
||||
env
|
||||
|
||||
# Kill background processes on exit
|
||||
trap 'kill $(jobs -p)' SIGINT SIGTERM EXIT
|
||||
|
||||
# Start docker daemon
|
||||
docker -d -H unix:///var/run/docker.sock 2>> /dev/null >> /dev/null &
|
||||
sleep 2
|
||||
|
||||
# $init is set by sekexe
|
||||
cd $(dirname $init)/.. && nosetests
|
||||
18
script/travis-install
Executable file
18
script/travis-install
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
echo exit 101 | sudo tee /usr/sbin/policy-rc.d
|
||||
sudo chmod +x /usr/sbin/policy-rc.d
|
||||
sudo apt-get install -qy slirp lxc lxc-docker-0.7.5
|
||||
git clone git://github.com/jpetazzo/sekexe
|
||||
python setup.py install
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
if [[ $TRAVIS_PYTHON_VERSION == "2.6" ]]; then
|
||||
pip install unittest2
|
||||
fi
|
||||
|
||||
19
setup.py
19
setup.py
@@ -1,16 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from setuptools import setup
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from setuptools import setup, find_packages
|
||||
import re
|
||||
import os
|
||||
import codecs
|
||||
|
||||
|
||||
# Borrowed from
|
||||
# https://github.com/jezdez/django_compressor/blob/develop/setup.py
|
||||
def read(*parts):
|
||||
return codecs.open(os.path.join(os.path.dirname(__file__), *parts)).read()
|
||||
path = os.path.join(os.path.dirname(__file__), *parts)
|
||||
with codecs.open(path, encoding='utf-8') as fobj:
|
||||
return fobj.read()
|
||||
|
||||
|
||||
def find_version(*file_paths):
|
||||
@@ -24,6 +25,9 @@ def find_version(*file_paths):
|
||||
with open('requirements.txt') as f:
|
||||
install_requires = f.read().splitlines()
|
||||
|
||||
with open('requirements-dev.txt') as f:
|
||||
tests_require = f.read().splitlines()
|
||||
|
||||
setup(
|
||||
name='fig',
|
||||
version=find_version("fig", "__init__.py"),
|
||||
@@ -31,10 +35,11 @@ setup(
|
||||
url='https://github.com/orchardup/fig',
|
||||
author='Orchard Laboratories Ltd.',
|
||||
author_email='hello@orchardup.com',
|
||||
packages=['fig', 'fig.cli'],
|
||||
package_data={},
|
||||
packages=find_packages(),
|
||||
include_package_data=True,
|
||||
test_suite='nose.collector',
|
||||
install_requires=install_requires,
|
||||
tests_require=tests_require,
|
||||
entry_points="""
|
||||
[console_scripts]
|
||||
fig=fig.cli.main:main
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (2,7):
|
||||
import unittest
|
||||
else:
|
||||
import unittest2 as unittest
|
||||
|
||||
|
||||
38
tests/cli_test.py
Normal file
38
tests/cli_test.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from . import unittest
|
||||
from fig.cli.main import TopLevelCommand
|
||||
|
||||
class CLITestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.command = TopLevelCommand()
|
||||
self.command.base_dir = 'tests/fixtures/simple-figfile'
|
||||
|
||||
def test_help(self):
|
||||
self.assertRaises(SystemExit, lambda: self.command.dispatch(['-h'], None))
|
||||
|
||||
def test_ps(self):
|
||||
self.command.dispatch(['ps'], None)
|
||||
|
||||
def test_scale(self):
|
||||
project = self.command.project
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=3', 'another=2']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 3)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 2)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=1', 'another=1']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 1)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 1)
|
||||
|
||||
self.command.scale({'SERVICE=NUM': ['simple=0', 'another=0']})
|
||||
self.assertEqual(len(project.get_service('simple').containers()), 0)
|
||||
self.assertEqual(len(project.get_service('another').containers()), 0)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from .testcases import DockerClientTestCase
|
||||
from fig.container import Container
|
||||
|
||||
@@ -34,3 +35,17 @@ class ContainerTest(DockerClientTestCase):
|
||||
'FOO': 'BAR',
|
||||
'BAZ': 'DOGE',
|
||||
})
|
||||
|
||||
def test_number(self):
|
||||
container = Container.from_ps(self.client, {
|
||||
"Id":"abc",
|
||||
"Image":"ubuntu:12.04",
|
||||
"Command":"sleep 300",
|
||||
"Created":1387384730,
|
||||
"Status":"Up 8 seconds",
|
||||
"Ports":None,
|
||||
"SizeRw":0,
|
||||
"SizeRootFs":0,
|
||||
"Names":["/db_1"]
|
||||
}, has_been_inspected=True)
|
||||
self.assertEqual(container.number, 1)
|
||||
|
||||
6
tests/fixtures/simple-figfile/fig.yml
vendored
Normal file
6
tests/fixtures/simple-figfile/fig.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
simple:
|
||||
image: ubuntu
|
||||
command: /bin/sleep 300
|
||||
another:
|
||||
image: ubuntu
|
||||
command: /bin/sleep 300
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from fig.project import Project
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
@@ -41,16 +42,22 @@ class ProjectTest(DockerClientTestCase):
|
||||
project = Project('test', [web], self.client)
|
||||
self.assertEqual(project.get_service('web'), web)
|
||||
|
||||
def test_create_containers(self):
|
||||
def test_recreate_containers(self):
|
||||
web = self.create_service('web')
|
||||
db = self.create_service('db')
|
||||
project = Project('test', [web, db], self.client)
|
||||
|
||||
project.create_containers(service_names=['web'])
|
||||
old_web_container = web.create_container()
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 0)
|
||||
|
||||
project.create_containers()
|
||||
(old, new) = project.recreate_containers()
|
||||
self.assertEqual(len(old), 1)
|
||||
self.assertEqual(old[0][0], web)
|
||||
self.assertEqual(len(new), 2)
|
||||
self.assertEqual(new[0][0], web)
|
||||
self.assertEqual(new[1][0], db)
|
||||
|
||||
self.assertEqual(len(web.containers(stopped=True)), 1)
|
||||
self.assertEqual(len(db.containers(stopped=True)), 1)
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig import Service
|
||||
from fig.service import CannotBeScaledError
|
||||
from .testcases import DockerClientTestCase
|
||||
|
||||
|
||||
@@ -100,6 +103,36 @@ class ServiceTest(DockerClientTestCase):
|
||||
container = db.create_container(one_off=True)
|
||||
self.assertEqual(container.name, 'figtest_db_run_1')
|
||||
|
||||
def test_create_container_with_unspecified_volume(self):
|
||||
service = self.create_service('db', volumes=['/var/db'])
|
||||
container = service.create_container()
|
||||
service.start_container(container)
|
||||
self.assertIn('/var/db', container.inspect()['Volumes'])
|
||||
|
||||
def test_recreate_containers(self):
|
||||
service = self.create_service('db', environment={'FOO': '1'}, volumes=['/var/db'])
|
||||
old_container = service.create_container()
|
||||
self.assertEqual(old_container.dictionary['Config']['Env'], ['FOO=1'])
|
||||
self.assertEqual(old_container.name, 'figtest_db_1')
|
||||
service.start_container(old_container)
|
||||
volume_path = old_container.inspect()['Volumes']['/var/db']
|
||||
|
||||
num_containers_before = len(self.client.containers(all=True))
|
||||
|
||||
service.options['environment']['FOO'] = '2'
|
||||
(old, new) = service.recreate_containers()
|
||||
self.assertEqual(len(old), 1)
|
||||
self.assertEqual(len(new), 1)
|
||||
|
||||
new_container = new[0]
|
||||
self.assertEqual(new_container.dictionary['Config']['Env'], ['FOO=2'])
|
||||
self.assertEqual(new_container.name, 'figtest_db_1')
|
||||
service.start_container(new_container)
|
||||
self.assertEqual(new_container.inspect()['Volumes']['/var/db'], volume_path)
|
||||
|
||||
self.assertEqual(len(self.client.containers(all=True)), num_containers_before + 1)
|
||||
self.assertNotEqual(old_container.id, new_container.id)
|
||||
|
||||
def test_start_container_passes_through_options(self):
|
||||
db = self.create_service('db')
|
||||
db.start_container(environment={'FOO': 'BAR'})
|
||||
@@ -155,4 +188,26 @@ class ServiceTest(DockerClientTestCase):
|
||||
self.assertIn('8000/tcp', container['HostConfig']['PortBindings'])
|
||||
self.assertEqual(container['HostConfig']['PortBindings']['8000/tcp'][0]['HostPort'], '8000')
|
||||
|
||||
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
|
||||
service = self.create_service('web', ports=['8001:8000'])
|
||||
container = service.start_container().inspect()
|
||||
self.assertIn('8000/tcp', container['HostConfig']['PortBindings'])
|
||||
self.assertEqual(container['HostConfig']['PortBindings']['8000/tcp'][0]['HostPort'], '8001')
|
||||
|
||||
def test_scale(self):
|
||||
service = self.create_service('web')
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
service.scale(3)
|
||||
self.assertEqual(len(service.containers()), 3)
|
||||
service.scale(1)
|
||||
self.assertEqual(len(service.containers()), 1)
|
||||
service.scale(0)
|
||||
self.assertEqual(len(service.containers()), 0)
|
||||
|
||||
def test_scale_on_service_that_cannot_be_scaled(self):
|
||||
service = self.create_service('web', ports=['8000:8000'])
|
||||
self.assertRaises(CannotBeScaledError, lambda: service.scale(1))
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
from docker import Client
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from fig.packages.docker import Client
|
||||
from fig.service import Service
|
||||
from fig.cli.utils import docker_url
|
||||
from unittest import TestCase
|
||||
from . import unittest
|
||||
|
||||
|
||||
class DockerClientTestCase(TestCase):
|
||||
class DockerClientTestCase(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.client = Client(docker_url())
|
||||
|
||||
Reference in New Issue
Block a user