mirror of
https://github.com/docker/compose.git
synced 2026-02-16 05:22:33 +08:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
405241920e | ||
|
|
959dd849cf | ||
|
|
a24843e1e4 | ||
|
|
df05472bcc | ||
|
|
ce59a4c223 | ||
|
|
1ff05ac060 | ||
|
|
1192a4e817 | ||
|
|
4092ae5d5a | ||
|
|
0de595f951 | ||
|
|
60514c1adb | ||
|
|
c960b028b9 | ||
|
|
8c81a9da7a | ||
|
|
5340a6d760 | ||
|
|
a85d2bc64c | ||
|
|
50a4afaf17 | ||
|
|
ddec1f61a6 | ||
|
|
fa720787d6 | ||
|
|
a75b6249f8 | ||
|
|
86dad9247d | ||
|
|
c365ac0c11 | ||
|
|
d811500fa0 | ||
|
|
204655be13 |
26
CHANGELOG.md
26
CHANGELOG.md
@@ -1,6 +1,32 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.27.4 (2020-09-24)
|
||||
-------------------
|
||||
|
||||
### Bugs
|
||||
|
||||
- Remove path checks for bind mounts
|
||||
|
||||
- Fix port rendering to output long form syntax for non-v1
|
||||
|
||||
- Add protocol to the docker socket address
|
||||
|
||||
1.27.3 (2020-09-16)
|
||||
-------------------
|
||||
|
||||
### Bugs
|
||||
|
||||
- Merge `max_replicas_per_node` on `docker-compose config`
|
||||
|
||||
- Fix `depends_on` serialization on `docker-compose config`
|
||||
|
||||
- Fix scaling when some containers are not running on `docker-compose up`
|
||||
|
||||
- Enable relative paths for `driver_opts.device` for `local` driver
|
||||
|
||||
- Allow strings for `cpus` fields
|
||||
|
||||
1.27.2 (2020-09-10)
|
||||
-------------------
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = '1.27.2'
|
||||
__version__ = '1.27.4'
|
||||
|
||||
@@ -20,6 +20,7 @@ from ..utils import json_hash
|
||||
from ..utils import parse_bytes
|
||||
from ..utils import parse_nanoseconds_int
|
||||
from ..utils import splitdrive
|
||||
from ..version import ComposeVersion
|
||||
from .environment import env_vars_from_file
|
||||
from .environment import Environment
|
||||
from .environment import split_env
|
||||
@@ -184,6 +185,13 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
def from_filename(cls, filename):
|
||||
return cls(filename, load_yaml(filename))
|
||||
|
||||
@cached_property
|
||||
def config_version(self):
|
||||
version = self.config.get('version', None)
|
||||
if isinstance(version, dict):
|
||||
return V1
|
||||
return ComposeVersion(version) if version else self.version
|
||||
|
||||
@cached_property
|
||||
def version(self):
|
||||
version = self.config.get('version', None)
|
||||
@@ -222,15 +230,13 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
'Version "{}" in "{}" is invalid.'
|
||||
.format(version, self.filename))
|
||||
|
||||
if version.startswith("1"):
|
||||
version = V1
|
||||
|
||||
if version == V1:
|
||||
if version.startswith("1"):
|
||||
raise ConfigurationError(
|
||||
'Version in "{}" is invalid. {}'
|
||||
.format(self.filename, VERSION_EXPLANATION)
|
||||
)
|
||||
return version
|
||||
|
||||
return VERSION
|
||||
|
||||
def get_service(self, name):
|
||||
return self.get_service_dicts()[name]
|
||||
@@ -253,8 +259,10 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
return {} if self.version == V1 else self.config.get('configs', {})
|
||||
|
||||
|
||||
class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
|
||||
class Config(namedtuple('_Config', 'config_version version services volumes networks secrets configs')):
|
||||
"""
|
||||
:param config_version: configuration file version
|
||||
:type config_version: int
|
||||
:param version: configuration version
|
||||
:type version: int
|
||||
:param services: List of service description dictionaries
|
||||
@@ -401,9 +409,8 @@ def load(config_details, interpolate=True):
|
||||
for service_dict in service_dicts:
|
||||
match_named_volumes(service_dict, volumes)
|
||||
|
||||
version = main_file.version
|
||||
|
||||
return Config(version, service_dicts, volumes, networks, secrets, configs)
|
||||
return Config(main_file.config_version, main_file.version,
|
||||
service_dicts, volumes, networks, secrets, configs)
|
||||
|
||||
|
||||
def load_mapping(config_files, get_func, entity_type, working_dir=None):
|
||||
@@ -423,20 +430,36 @@ def load_mapping(config_files, get_func, entity_type, working_dir=None):
|
||||
elif not config.get('name'):
|
||||
config['name'] = name
|
||||
|
||||
if 'driver_opts' in config:
|
||||
config['driver_opts'] = build_string_dict(
|
||||
config['driver_opts']
|
||||
)
|
||||
|
||||
if 'labels' in config:
|
||||
config['labels'] = parse_labels(config['labels'])
|
||||
|
||||
if 'file' in config:
|
||||
config['file'] = expand_path(working_dir, config['file'])
|
||||
|
||||
if 'driver_opts' in config:
|
||||
config['driver_opts'] = build_string_dict(
|
||||
config['driver_opts']
|
||||
)
|
||||
device = format_device_option(entity_type, config)
|
||||
if device:
|
||||
config['driver_opts']['device'] = device
|
||||
return mapping
|
||||
|
||||
|
||||
def format_device_option(entity_type, config):
|
||||
if entity_type != 'Volume':
|
||||
return
|
||||
# default driver is 'local'
|
||||
driver = config.get('driver', 'local')
|
||||
if driver != 'local':
|
||||
return
|
||||
o = config['driver_opts'].get('o')
|
||||
device = config['driver_opts'].get('device')
|
||||
if o and o == 'bind' and device:
|
||||
fullpath = os.path.abspath(os.path.expanduser(device))
|
||||
return fullpath
|
||||
|
||||
|
||||
def validate_external(entity_type, name, config, version):
|
||||
for k in config.keys():
|
||||
if entity_type == 'Network' and k == 'driver':
|
||||
@@ -1114,6 +1137,7 @@ def merge_deploy(base, override):
|
||||
md['resources'] = dict(resources_md)
|
||||
if md.needs_merge('placement'):
|
||||
placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {})
|
||||
placement_md.merge_scalar('max_replicas_per_node')
|
||||
placement_md.merge_field('constraints', merge_unique_items_lists, default=[])
|
||||
placement_md.merge_field('preferences', merge_unique_objects_lists, default=[])
|
||||
md['placement'] = dict(placement_md)
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"cpu_period": {"type": ["number", "string"]},
|
||||
"cpu_rt_period": {"type": ["number", "string"]},
|
||||
"cpu_rt_runtime": {"type": ["number", "string"]},
|
||||
"cpus": {"type": "number", "minimum": 0},
|
||||
"cpus": {"type": ["number", "string"]},
|
||||
"cpuset": {"type": "string"},
|
||||
"credential_spec": {
|
||||
"type": "object",
|
||||
@@ -503,7 +503,7 @@
|
||||
"limits": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpus": {"type": "number", "minimum": 0},
|
||||
"cpus": {"type": ["number", "string"]},
|
||||
"memory": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -512,7 +512,7 @@
|
||||
"reservations": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpus": {"type": "number", "minimum": 0},
|
||||
"cpus": {"type": ["number", "string"]},
|
||||
"memory": {"type": "string"},
|
||||
"generic_resources": {"$ref": "#/definitions/generic_resources"}
|
||||
},
|
||||
|
||||
@@ -44,7 +44,7 @@ yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
|
||||
|
||||
|
||||
def denormalize_config(config, image_digests=None):
|
||||
result = {'version': str(config.version)}
|
||||
result = {'version': str(config.config_version)}
|
||||
denormalized_services = [
|
||||
denormalize_service_dict(
|
||||
service_dict,
|
||||
@@ -121,11 +121,6 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
|
||||
if version == V1 and 'network_mode' not in service_dict:
|
||||
service_dict['network_mode'] = 'bridge'
|
||||
|
||||
if 'depends_on' in service_dict:
|
||||
service_dict['depends_on'] = sorted([
|
||||
svc for svc in service_dict['depends_on'].keys()
|
||||
])
|
||||
|
||||
if 'healthcheck' in service_dict:
|
||||
if 'interval' in service_dict['healthcheck']:
|
||||
service_dict['healthcheck']['interval'] = serialize_ns_time_value(
|
||||
|
||||
@@ -411,7 +411,7 @@ class Service:
|
||||
stopped = [c for c in containers if not c.is_running]
|
||||
|
||||
if stopped:
|
||||
return ConvergencePlan('start', stopped)
|
||||
return ConvergencePlan('start', containers)
|
||||
|
||||
return ConvergencePlan('noop', containers)
|
||||
|
||||
@@ -514,8 +514,9 @@ class Service:
|
||||
self._downscale(containers[scale:], timeout)
|
||||
containers = containers[:scale]
|
||||
if start:
|
||||
stopped = [c for c in containers if not c.is_running]
|
||||
_, errors = parallel_execute(
|
||||
containers,
|
||||
stopped,
|
||||
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
||||
lambda c: c.name,
|
||||
"Starting",
|
||||
|
||||
@@ -15,16 +15,16 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.27.2"
|
||||
VERSION="1.27.4"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
# Setup options for connecting to docker host
|
||||
if [ -z "$DOCKER_HOST" ]; then
|
||||
DOCKER_HOST="/var/run/docker.sock"
|
||||
DOCKER_HOST='unix:///var/run/docker.sock'
|
||||
fi
|
||||
if [ -S "$DOCKER_HOST" ]; then
|
||||
DOCKER_ADDR="-v $DOCKER_HOST:$DOCKER_HOST -e DOCKER_HOST"
|
||||
if [ -S "${DOCKER_HOST#unix://}" ]; then
|
||||
DOCKER_ADDR="-v ${DOCKER_HOST#unix://}:${DOCKER_HOST#unix://} -e DOCKER_HOST"
|
||||
else
|
||||
DOCKER_ADDR="-e DOCKER_HOST -e DOCKER_TLS_VERIFY -e DOCKER_CERT_PATH"
|
||||
fi
|
||||
|
||||
@@ -359,7 +359,7 @@ services:
|
||||
'web': {
|
||||
'command': 'true',
|
||||
'image': 'alpine:latest',
|
||||
'ports': ['5643/tcp', '9999/tcp']
|
||||
'ports': [{'target': 5643}, {'target': 9999}]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -374,7 +374,7 @@ services:
|
||||
'web': {
|
||||
'command': 'false',
|
||||
'image': 'alpine:latest',
|
||||
'ports': ['5644/tcp', '9998/tcp']
|
||||
'ports': [{'target': 5644}, {'target': 9998}]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -389,7 +389,7 @@ services:
|
||||
'web': {
|
||||
'command': 'echo uwu',
|
||||
'image': 'alpine:3.10.1',
|
||||
'ports': ['3341/tcp', '4449/tcp']
|
||||
'ports': [{'target': 3341}, {'target': 4449}]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ from tests.integration.testcases import no_cluster
|
||||
|
||||
def build_config(**kwargs):
|
||||
return config.Config(
|
||||
config_version=kwargs.get('version', VERSION),
|
||||
version=kwargs.get('version', VERSION),
|
||||
services=kwargs.get('services'),
|
||||
volumes=kwargs.get('volumes'),
|
||||
@@ -1347,6 +1348,36 @@ class ProjectTest(DockerClientTestCase):
|
||||
project.up()
|
||||
assert len(project.containers()) == 3
|
||||
|
||||
def test_project_up_scale_with_stopped_containers(self):
|
||||
config_data = build_config(
|
||||
services=[{
|
||||
'name': 'web',
|
||||
'image': BUSYBOX_IMAGE_WITH_TAG,
|
||||
'command': 'top',
|
||||
'scale': 2
|
||||
}]
|
||||
)
|
||||
project = Project.from_config(
|
||||
name='composetest', config_data=config_data, client=self.client
|
||||
)
|
||||
|
||||
project.up()
|
||||
containers = project.containers()
|
||||
assert len(containers) == 2
|
||||
|
||||
self.client.stop(containers[0].id)
|
||||
project.up(scale_override={'web': 2})
|
||||
containers = project.containers()
|
||||
assert len(containers) == 2
|
||||
|
||||
self.client.stop(containers[0].id)
|
||||
project.up(scale_override={'web': 3})
|
||||
assert len(project.containers()) == 3
|
||||
|
||||
self.client.stop(containers[0].id)
|
||||
project.up(scale_override={'web': 1})
|
||||
assert len(project.containers()) == 1
|
||||
|
||||
def test_initialize_volumes(self):
|
||||
vol_name = '{:x}'.format(random.getrandbits(32))
|
||||
full_vol_name = 'composetest_{}'.format(vol_name)
|
||||
|
||||
@@ -375,7 +375,7 @@ class ServiceStateTest(DockerClientTestCase):
|
||||
|
||||
assert [c.is_running for c in containers] == [False, True]
|
||||
|
||||
assert ('start', containers[0:1]) == web.convergence_plan()
|
||||
assert ('start', containers) == web.convergence_plan()
|
||||
|
||||
def test_trigger_recreate_with_config_change(self):
|
||||
web = self.create_service('web', command=["top"])
|
||||
|
||||
@@ -168,12 +168,14 @@ class ConfigTest(unittest.TestCase):
|
||||
}
|
||||
})
|
||||
)
|
||||
assert cfg.config_version == VERSION
|
||||
assert cfg.version == VERSION
|
||||
|
||||
for version in ['2', '2.0', '2.1', '2.2', '2.3',
|
||||
'3', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8']:
|
||||
cfg = config.load(build_config_details({'version': version}))
|
||||
assert cfg.version == version
|
||||
assert cfg.config_version == version
|
||||
assert cfg.version == VERSION
|
||||
|
||||
def test_v1_file_version(self):
|
||||
cfg = config.load(build_config_details({'web': {'image': 'busybox'}}))
|
||||
@@ -2543,6 +2545,7 @@ web:
|
||||
'labels': ['com.docker.compose.a=1', 'com.docker.compose.b=2'],
|
||||
'mode': 'replicated',
|
||||
'placement': {
|
||||
'max_replicas_per_node': 1,
|
||||
'constraints': [
|
||||
'node.role == manager', 'engine.labels.aws == true'
|
||||
],
|
||||
@@ -2599,6 +2602,7 @@ web:
|
||||
'com.docker.compose.c': '3'
|
||||
},
|
||||
'placement': {
|
||||
'max_replicas_per_node': 1,
|
||||
'constraints': [
|
||||
'engine.labels.aws == true', 'engine.labels.dev == true',
|
||||
'node.role == manager', 'node.role == worker'
|
||||
@@ -5267,7 +5271,7 @@ def get_config_filename_for_files(filenames, subdir=None):
|
||||
|
||||
|
||||
class SerializeTest(unittest.TestCase):
|
||||
def test_denormalize_depends_on_v3(self):
|
||||
def test_denormalize_depends(self):
|
||||
service_dict = {
|
||||
'image': 'busybox',
|
||||
'command': 'true',
|
||||
@@ -5277,27 +5281,7 @@ class SerializeTest(unittest.TestCase):
|
||||
}
|
||||
}
|
||||
|
||||
assert denormalize_service_dict(service_dict, VERSION) == {
|
||||
'image': 'busybox',
|
||||
'command': 'true',
|
||||
'depends_on': ['service2', 'service3']
|
||||
}
|
||||
|
||||
def test_denormalize_depends_on_v2_1(self):
|
||||
service_dict = {
|
||||
'image': 'busybox',
|
||||
'command': 'true',
|
||||
'depends_on': {
|
||||
'service2': {'condition': 'service_started'},
|
||||
'service3': {'condition': 'service_started'},
|
||||
}
|
||||
}
|
||||
|
||||
assert denormalize_service_dict(service_dict, VERSION) == {
|
||||
'image': 'busybox',
|
||||
'command': 'true',
|
||||
'depends_on': ['service2', 'service3']
|
||||
}
|
||||
assert denormalize_service_dict(service_dict, VERSION) == service_dict
|
||||
|
||||
def test_serialize_time(self):
|
||||
data = {
|
||||
@@ -5387,7 +5371,7 @@ class SerializeTest(unittest.TestCase):
|
||||
assert serialized_config['secrets']['two'] == {'external': True, 'name': 'two'}
|
||||
|
||||
def test_serialize_ports(self):
|
||||
config_dict = config.Config(version=VERSION, services=[
|
||||
config_dict = config.Config(config_version=VERSION, version=VERSION, services=[
|
||||
{
|
||||
'ports': [types.ServicePort('80', '8080', None, None, None)],
|
||||
'image': 'alpine',
|
||||
@@ -5398,8 +5382,20 @@ class SerializeTest(unittest.TestCase):
|
||||
serialized_config = yaml.safe_load(serialize_config(config_dict))
|
||||
assert [{'published': 8080, 'target': 80}] == serialized_config['services']['web']['ports']
|
||||
|
||||
def test_serialize_ports_v1(self):
|
||||
config_dict = config.Config(config_version=V1, version=V1, services=[
|
||||
{
|
||||
'ports': [types.ServicePort('80', '8080', None, None, None)],
|
||||
'image': 'alpine',
|
||||
'name': 'web'
|
||||
}
|
||||
], volumes={}, networks={}, secrets={}, configs={})
|
||||
|
||||
serialized_config = yaml.safe_load(serialize_config(config_dict))
|
||||
assert ['8080:80/tcp'] == serialized_config['services']['web']['ports']
|
||||
|
||||
def test_serialize_ports_with_ext_ip(self):
|
||||
config_dict = config.Config(version=VERSION, services=[
|
||||
config_dict = config.Config(config_version=VERSION, version=VERSION, services=[
|
||||
{
|
||||
'ports': [types.ServicePort('80', '8080', None, None, '127.0.0.1')],
|
||||
'image': 'alpine',
|
||||
|
||||
@@ -28,6 +28,7 @@ from compose.service import Service
|
||||
|
||||
def build_config(**kwargs):
|
||||
return Config(
|
||||
config_version=kwargs.get('config_version', VERSION),
|
||||
version=kwargs.get('version', VERSION),
|
||||
services=kwargs.get('services'),
|
||||
volumes=kwargs.get('volumes'),
|
||||
|
||||
Reference in New Issue
Block a user