mirror of
https://github.com/docker/compose.git
synced 2026-02-12 11:39:23 +08:00
Compare commits
1 Commits
1.26.x
...
1.24.1-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3fbb9fe51e |
63
.circleci/config.yml
Normal file
63
.circleci/config.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
version: 2
|
||||
jobs:
|
||||
test:
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: setup script
|
||||
command: ./script/setup/osx
|
||||
- run:
|
||||
name: install tox
|
||||
command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
|
||||
- run:
|
||||
name: unit tests
|
||||
command: tox -e py27,py36,py37 -- tests/unit
|
||||
|
||||
build-osx-binary:
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: upgrade python tools
|
||||
command: sudo pip install --upgrade pip virtualenv==16.2.0
|
||||
- run:
|
||||
name: setup script
|
||||
command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
|
||||
- run:
|
||||
name: build script
|
||||
command: ./script/build/osx
|
||||
- store_artifacts:
|
||||
path: dist/docker-compose-Darwin-x86_64
|
||||
destination: docker-compose-Darwin-x86_64
|
||||
- deploy:
|
||||
name: Deploy binary to bintray
|
||||
command: |
|
||||
OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
|
||||
|
||||
build-linux-binary:
|
||||
machine:
|
||||
enabled: true
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build Linux binary
|
||||
command: ./script/build/linux
|
||||
- store_artifacts:
|
||||
path: dist/docker-compose-Linux-x86_64
|
||||
destination: docker-compose-Linux-x86_64
|
||||
- deploy:
|
||||
name: Deploy binary to bintray
|
||||
command: |
|
||||
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
|
||||
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
all:
|
||||
jobs:
|
||||
- test
|
||||
- build-linux-binary
|
||||
- build-osx-binary
|
||||
@@ -11,4 +11,3 @@ docs/_site
|
||||
.tox
|
||||
**/__pycache__
|
||||
*.pyc
|
||||
Jenkinsfile
|
||||
|
||||
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -1,6 +0,0 @@
|
||||
# GitHub code owners
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
#
|
||||
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
|
||||
|
||||
* @ndeloof @rumpl @ulyssessouza
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,9 +1,6 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug encountered while using docker-compose
|
||||
title: ''
|
||||
labels: kind/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/feature_request.md
vendored
3
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,9 +1,6 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea to improve Compose
|
||||
title: ''
|
||||
labels: kind/feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
name: Question about using Compose
|
||||
about: This is not the appropriate channel
|
||||
title: ''
|
||||
labels: kind/question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
59
.github/stale.yml
vendored
59
.github/stale.yml
vendored
@@ -1,59 +0,0 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 180
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 7
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- kind/feature
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: true
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
unmarkComment: >
|
||||
This issue has been automatically marked as not stale anymore due to the recent activity.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
closeComment: >
|
||||
This issue has been automatically closed because it had not recent activity during the stale period.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
249
CHANGELOG.md
249
CHANGELOG.md
@@ -1,254 +1,7 @@
|
||||
Change log
|
||||
==========
|
||||
|
||||
1.26.2 (2020-07-02)
|
||||
-------------------
|
||||
|
||||
### Bugs
|
||||
|
||||
- Enforce `docker-py` 4.2.2 as minimum version when installing with pip
|
||||
|
||||
1.26.1 (2020-06-30)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Bump `docker-py` from 4.2.1 to 4.2.2
|
||||
|
||||
### Bugs
|
||||
|
||||
- Enforce `docker-py` 4.2.1 as minimum version when installing with pip
|
||||
|
||||
- Fix context load for non-docker endpoints
|
||||
|
||||
1.26.0 (2020-06-03)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Add `docker context` support
|
||||
|
||||
- Add missing test dependency `ddt` to `setup.py`
|
||||
|
||||
- Add `--attach-dependencies` to command `up` for attaching to dependencies
|
||||
|
||||
- Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable
|
||||
|
||||
- Bump `Pytest` to 5.3.4 and add refactor compatibility with new version
|
||||
|
||||
- Bump `OpenSSL` from 1.1.1f to 1.1.1g
|
||||
|
||||
- Bump `docker-py` from 4.2.0 to 4.2.1
|
||||
|
||||
### Bugs
|
||||
|
||||
- Properly escape values coming from env_files
|
||||
|
||||
- Sync compose-schemas with upstream (docker/cli)
|
||||
|
||||
- Remove `None` entries on exec command
|
||||
|
||||
- Add `python-dotenv` to delegate `.env` file processing
|
||||
|
||||
- Don't adjust output on terminal width when piped into another command
|
||||
|
||||
- Show an error message when `version` attribute is malformed
|
||||
|
||||
- Fix HTTPS connection when DOCKER_HOST is remote
|
||||
|
||||
1.25.5 (2020-02-04)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Bump OpenSSL from 1.1.1d to 1.1.1f
|
||||
|
||||
- Add 3.8 compose version
|
||||
|
||||
1.25.4 (2020-01-23)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix CI script to enforce the minimal MacOS version to 10.11
|
||||
|
||||
- Fix docker-compose exec for keys with no value
|
||||
|
||||
1.25.3 (2020-01-23)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix CI script to enforce the compilation with Python3
|
||||
|
||||
- Fix binary's sha256 in the release page
|
||||
|
||||
1.25.2 (2020-01-20)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable
|
||||
|
||||
- Bump PyInstaller from 3.5 to 3.6
|
||||
|
||||
- Bump pysocks from 1.6.7 to 1.7.1
|
||||
|
||||
- Bump websocket-client from 0.32.0 to 0.57.0
|
||||
|
||||
- Bump urllib3 from 1.24.2 to 1.25.7
|
||||
|
||||
- Bump jsonschema from 3.0.1 to 3.2.0
|
||||
|
||||
- Bump PyYAML from 4.2b1 to 5.3
|
||||
|
||||
- Bump certifi from 2017.4.17 to 2019.11.28
|
||||
|
||||
- Bump coverage from 4.5.4 to 5.0.3
|
||||
|
||||
- Bump paramiko from 2.6.0 to 2.7.1
|
||||
|
||||
- Bump cached-property from 1.3.0 to 1.5.1
|
||||
|
||||
- Bump minor Linux and MacOSX dependencies
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Validate version format on formats 2+
|
||||
|
||||
- Assume infinite terminal width when not running in a terminal
|
||||
|
||||
1.25.1 (2020-01-06)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Bump `pytest-cov` 2.8.1
|
||||
|
||||
- Bump `flake8` 3.7.9
|
||||
|
||||
- Bump `coverage` 4.5.4
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Decode APIError explanation to unicode before usage on start and create of a container
|
||||
|
||||
- Reports when images that cannot be pulled and must be built
|
||||
|
||||
- Discard label `com.docker.compose.filepaths` having None as value. Typically, when coming from stdin
|
||||
|
||||
- Added OSX binary as a directory to solve slow start up time caused by MacOS Catalina binary scan
|
||||
|
||||
- Passed in HOME env-var in container mode (running with `script/run/run.sh`)
|
||||
|
||||
- Reverted behavior of "only pull images that we can't build" and replace by a warning informing the image we can't pull and must be built
|
||||
|
||||
|
||||
1.25.0 (2019-11-18)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
- Set no-colors to true if CLICOLOR env variable is set to 0
|
||||
|
||||
- Add working dir, config files and env file in service labels
|
||||
|
||||
- Add dependencies for ARM build
|
||||
|
||||
- Add BuildKit support, use `DOCKER_BUILDKIT=1` and `COMPOSE_DOCKER_CLI_BUILD=1`
|
||||
|
||||
- Bump paramiko to 2.6.0
|
||||
|
||||
- Add working dir, config files and env file in service labels
|
||||
|
||||
- Add tag `docker-compose:latest`
|
||||
|
||||
- Add `docker-compose:<version>-alpine` image/tag
|
||||
|
||||
- Add `docker-compose:<version>-debian` image/tag
|
||||
|
||||
- Bumped `docker-py` 4.1.0
|
||||
|
||||
- Supports `requests` up to 2.22.0 version
|
||||
|
||||
- Drops empty tag on `build:cache_from`
|
||||
|
||||
- `Dockerfile` now generates `libmusl` binaries for alpine
|
||||
|
||||
- Only pull images that can't be built
|
||||
|
||||
- Attribute `scale` can now accept `0` as a value
|
||||
|
||||
- Added `--quiet` build flag
|
||||
|
||||
- Added `--no-interpolate` to `docker-compose config`
|
||||
|
||||
- Bump OpenSSL for macOS build (`1.1.0j` to `1.1.1c`)
|
||||
|
||||
- Added `--no-rm` to `build` command
|
||||
|
||||
- Added support for `credential_spec`
|
||||
|
||||
- Resolve digests without pulling image
|
||||
|
||||
- Upgrade `pyyaml` to `4.2b1`
|
||||
|
||||
- Lowered severity to `warning` if `down` tries to remove nonexisting image
|
||||
|
||||
- Use improved API fields for project events when possible
|
||||
|
||||
- Update `setup.py` for modern `pypi/setuptools` and remove `pandoc` dependencies
|
||||
|
||||
- Removed `Dockerfile.armhf` which is no longer needed
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Make container service color deterministic, remove red from chosen colors
|
||||
|
||||
- Fix non ascii chars error. Python2 only
|
||||
|
||||
- Format image size as decimal to be align with Docker CLI
|
||||
|
||||
- Use Python Posix support to get tty size
|
||||
|
||||
- Fix same file 'extends' optimization
|
||||
|
||||
- Use python POSIX support to get tty size
|
||||
|
||||
- Format image size as decimal to be align with Docker CLI
|
||||
|
||||
- Fixed stdin_open
|
||||
|
||||
- Fixed `--remove-orphans` when used with `up --no-start`
|
||||
|
||||
- Fixed `docker-compose ps --all`
|
||||
|
||||
- Fixed `depends_on` dependency recreation behavior
|
||||
|
||||
- Fixed bash completion for `build --memory`
|
||||
|
||||
- Fixed misleading warning concerning env vars when performing an `exec` command
|
||||
|
||||
- Fixed failure check in parallel_execute_watch
|
||||
|
||||
- Fixed race condition after pulling image
|
||||
|
||||
- Fixed error on duplicate mount points
|
||||
|
||||
- Fixed merge on networks section
|
||||
|
||||
- Always connect Compose container to `stdin`
|
||||
|
||||
- Fixed the presentation of failed services on 'docker-compose start' when containers are not available
|
||||
|
||||
1.24.1 (2019-06-24)
|
||||
-------------------
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fixed acceptance tests
|
||||
|
||||
1.24.0 (2019-03-28)
|
||||
1.24.0 (2019-03-22)
|
||||
-------------------
|
||||
|
||||
### Features
|
||||
|
||||
90
Dockerfile
90
Dockerfile
@@ -1,74 +1,36 @@
|
||||
ARG DOCKER_VERSION=19.03.8
|
||||
ARG PYTHON_VERSION=3.7.7
|
||||
ARG BUILD_ALPINE_VERSION=3.11
|
||||
ARG BUILD_DEBIAN_VERSION=slim-stretch
|
||||
ARG RUNTIME_ALPINE_VERSION=3.11.5
|
||||
ARG RUNTIME_DEBIAN_VERSION=stretch-20200414-slim
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM python:3.6
|
||||
|
||||
ARG BUILD_PLATFORM=alpine
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
python-dev \
|
||||
git
|
||||
|
||||
FROM docker:${DOCKER_VERSION} AS docker-cli
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
|
||||
FROM python:${PYTHON_VERSION}-alpine${BUILD_ALPINE_VERSION} AS build-alpine
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
build-base \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libgcc \
|
||||
make \
|
||||
musl-dev \
|
||||
openssl \
|
||||
openssl-dev \
|
||||
python2 \
|
||||
python2-dev \
|
||||
zlib-dev
|
||||
ENV BUILD_BOOTLOADER=1
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
FROM python:${PYTHON_VERSION}-${BUILD_DEBIAN_VERSION} AS build-debian
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libgcc-6-dev \
|
||||
libssl-dev \
|
||||
make \
|
||||
openssl \
|
||||
python2.7-dev \
|
||||
zlib1g-dev
|
||||
|
||||
FROM build-${BUILD_PLATFORM} AS build
|
||||
COPY docker-compose-entrypoint.sh /usr/local/bin/
|
||||
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
|
||||
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
|
||||
RUN pip install virtualenv==16.2.0
|
||||
RUN pip install tox==2.9.1
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
COPY requirements.txt .
|
||||
COPY requirements-dev.txt .
|
||||
COPY .pre-commit-config.yaml .
|
||||
COPY tox.ini .
|
||||
COPY setup.py .
|
||||
COPY README.md .
|
||||
COPY compose compose/
|
||||
ADD requirements.txt /code/
|
||||
ADD requirements-dev.txt /code/
|
||||
ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
ADD README.md /code/
|
||||
RUN tox --notest
|
||||
COPY . .
|
||||
ARG GIT_COMMIT=unknown
|
||||
ENV DOCKER_COMPOSE_GITSHA=$GIT_COMMIT
|
||||
RUN script/build/linux-entrypoint
|
||||
|
||||
FROM alpine:${RUNTIME_ALPINE_VERSION} AS runtime-alpine
|
||||
FROM debian:${RUNTIME_DEBIAN_VERSION} AS runtime-debian
|
||||
FROM runtime-${BUILD_PLATFORM} AS runtime
|
||||
COPY docker-compose-entrypoint.sh /usr/local/bin/
|
||||
ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
|
||||
COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
|
||||
COPY --from=build /usr/local/bin/docker-compose /usr/local/bin/docker-compose
|
||||
ADD . /code/
|
||||
RUN chown -R user /code/
|
||||
|
||||
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||
|
||||
39
Dockerfile.armhf
Normal file
39
Dockerfile.armhf
Normal file
@@ -0,0 +1,39 @@
|
||||
FROM python:3.6
|
||||
|
||||
RUN set -ex; \
|
||||
apt-get update -qq; \
|
||||
apt-get install -y \
|
||||
locales \
|
||||
curl \
|
||||
python-dev \
|
||||
git
|
||||
|
||||
RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
|
||||
SHA256=f8de6378dad825b9fd5c3c2f949e791d22f918623c27a72c84fd6975a0e5d0a2; \
|
||||
echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
|
||||
tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
|
||||
mv docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm dockerbins.tgz
|
||||
|
||||
# Python3 requires a valid locale
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
|
||||
ENV LANG en_US.UTF-8
|
||||
|
||||
RUN useradd -d /home/user -m -s /bin/bash user
|
||||
WORKDIR /code/
|
||||
|
||||
RUN pip install tox==2.1.1
|
||||
|
||||
ADD requirements.txt /code/
|
||||
ADD requirements-dev.txt /code/
|
||||
ADD .pre-commit-config.yaml /code/
|
||||
ADD setup.py /code/
|
||||
ADD tox.ini /code/
|
||||
ADD compose /code/compose/
|
||||
RUN tox --notest
|
||||
|
||||
ADD . /code/
|
||||
RUN chown -R user /code/
|
||||
|
||||
ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
|
||||
19
Dockerfile.run
Normal file
19
Dockerfile.run
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM docker:18.06.1 as docker
|
||||
FROM alpine:3.8
|
||||
|
||||
ENV GLIBC 2.28-r0
|
||||
|
||||
RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
|
||||
curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
|
||||
curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
|
||||
apk add --no-cache glibc-$GLIBC.apk && \
|
||||
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
|
||||
ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
|
||||
ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
|
||||
rm /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
|
||||
apk del curl
|
||||
|
||||
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
|
||||
COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
|
||||
|
||||
ENTRYPOINT ["docker-compose"]
|
||||
15
Dockerfile.s390x
Normal file
15
Dockerfile.s390x
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM s390x/alpine:3.6
|
||||
|
||||
ARG COMPOSE_VERSION=1.16.1
|
||||
|
||||
RUN apk add --update --no-cache \
|
||||
python \
|
||||
py-pip \
|
||||
&& pip install --no-cache-dir docker-compose==$COMPOSE_VERSION \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
WORKDIR /data
|
||||
VOLUME /data
|
||||
|
||||
|
||||
ENTRYPOINT ["docker-compose"]
|
||||
176
Jenkinsfile
vendored
176
Jenkinsfile
vendored
@@ -1,112 +1,84 @@
|
||||
#!groovy
|
||||
|
||||
def dockerVersions = ['19.03.8']
|
||||
def baseImages = ['alpine', 'debian']
|
||||
def pythonVersions = ['py37']
|
||||
def image
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
skipDefaultCheckout(true)
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build test images') {
|
||||
// TODO use declarative 1.5.0 `matrix` once available on CI
|
||||
parallel {
|
||||
stage('alpine') {
|
||||
agent {
|
||||
label 'ubuntu && amd64 && !zfs'
|
||||
}
|
||||
steps {
|
||||
buildImage('alpine')
|
||||
}
|
||||
}
|
||||
stage('debian') {
|
||||
agent {
|
||||
label 'ubuntu && amd64 && !zfs'
|
||||
}
|
||||
steps {
|
||||
buildImage('debian')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
steps {
|
||||
// TODO use declarative 1.5.0 `matrix` once available on CI
|
||||
script {
|
||||
def testMatrix = [:]
|
||||
baseImages.each { baseImage ->
|
||||
dockerVersions.each { dockerVersion ->
|
||||
pythonVersions.each { pythonVersion ->
|
||||
testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parallel testMatrix
|
||||
}
|
||||
}
|
||||
}
|
||||
def buildImage = { ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("build image") {
|
||||
checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${gitCommit()}"
|
||||
image = docker.image(imageName)
|
||||
try {
|
||||
image.pull()
|
||||
} catch (Exception exc) {
|
||||
image = docker.build(imageName, ".")
|
||||
image.push()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def buildImage(baseImage) {
|
||||
def scmvar = checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
|
||||
image = docker.image(imageName)
|
||||
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
try {
|
||||
image.pull()
|
||||
} catch (Exception exc) {
|
||||
ansiColor('xterm') {
|
||||
sh """docker build -t ${imageName} \\
|
||||
--target build \\
|
||||
--build-arg BUILD_PLATFORM="${baseImage}" \\
|
||||
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\
|
||||
.\\
|
||||
"""
|
||||
sh "docker push ${imageName}"
|
||||
}
|
||||
echo "${imageName}"
|
||||
return imageName
|
||||
}
|
||||
}
|
||||
def get_versions = { int number ->
|
||||
def docker_versions
|
||||
wrappedNode(label: "ubuntu && !zfs") {
|
||||
def result = sh(script: """docker run --rm \\
|
||||
--entrypoint=/code/.tox/py27/bin/python \\
|
||||
${image.id} \\
|
||||
/code/script/test/versions.py -n ${number} docker/docker-ce recent
|
||||
""", returnStdout: true
|
||||
)
|
||||
docker_versions = result.split()
|
||||
}
|
||||
return docker_versions
|
||||
}
|
||||
|
||||
def runTests(dockerVersion, pythonVersion, baseImage) {
|
||||
return {
|
||||
stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") {
|
||||
node("ubuntu && amd64 && !zfs") {
|
||||
def scmvar = checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
|
||||
def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim()
|
||||
echo "Using local system's storage driver: ${storageDriver}"
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
sh """docker run \\
|
||||
-t \\
|
||||
--rm \\
|
||||
--privileged \\
|
||||
--volume="\$(pwd)/.git:/code/.git" \\
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
|
||||
-e "TAG=${imageName}" \\
|
||||
-e "STORAGE_DRIVER=${storageDriver}" \\
|
||||
-e "DOCKER_VERSIONS=${dockerVersion}" \\
|
||||
-e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\
|
||||
-e "PY_TEST_VERSIONS=${pythonVersion}" \\
|
||||
--entrypoint="script/test/ci" \\
|
||||
${imageName} \\
|
||||
--verbose
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
def runTests = { Map settings ->
|
||||
def dockerVersions = settings.get("dockerVersions", null)
|
||||
def pythonVersions = settings.get("pythonVersions", null)
|
||||
|
||||
if (!pythonVersions) {
|
||||
throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py36')`")
|
||||
}
|
||||
if (!dockerVersions) {
|
||||
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
|
||||
}
|
||||
|
||||
{ ->
|
||||
wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
|
||||
stage("test python=${pythonVersions} / docker=${dockerVersions}") {
|
||||
checkout(scm)
|
||||
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
|
||||
echo "Using local system's storage driver: ${storageDriver}"
|
||||
sh """docker run \\
|
||||
-t \\
|
||||
--rm \\
|
||||
--privileged \\
|
||||
--volume="\$(pwd)/.git:/code/.git" \\
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
|
||||
-e "TAG=${image.id}" \\
|
||||
-e "STORAGE_DRIVER=${storageDriver}" \\
|
||||
-e "DOCKER_VERSIONS=${dockerVersions}" \\
|
||||
-e "BUILD_NUMBER=\$BUILD_TAG" \\
|
||||
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
|
||||
--entrypoint="script/test/ci" \\
|
||||
${image.id} \\
|
||||
--verbose
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buildImage()
|
||||
|
||||
def testMatrix = [failFast: true]
|
||||
def docker_versions = get_versions(2)
|
||||
|
||||
for (int i = 0; i < docker_versions.length; i++) {
|
||||
def dockerVersion = docker_versions[i]
|
||||
testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
|
||||
testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
|
||||
testMatrix["${dockerVersion}_py37"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py37"])
|
||||
}
|
||||
|
||||
parallel(testMatrix)
|
||||
|
||||
29
MAINTAINERS
29
MAINTAINERS
@@ -11,9 +11,9 @@
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"ndeloof",
|
||||
"rumpl",
|
||||
"ulyssessouza",
|
||||
"mefyl",
|
||||
"mnottale",
|
||||
"shin-",
|
||||
]
|
||||
[Org.Alumni]
|
||||
people = [
|
||||
@@ -34,10 +34,6 @@
|
||||
# including muti-file support, variable interpolation, secrets
|
||||
# emulation and many more
|
||||
"dnephin",
|
||||
|
||||
"shin-",
|
||||
"mefyl",
|
||||
"mnottale",
|
||||
]
|
||||
|
||||
[people]
|
||||
@@ -78,22 +74,7 @@
|
||||
Email = "mazz@houseofmnowster.com"
|
||||
GitHub = "mnowster"
|
||||
|
||||
[people.ndeloof]
|
||||
Name = "Nicolas De Loof"
|
||||
Email = "nicolas.deloof@gmail.com"
|
||||
GitHub = "ndeloof"
|
||||
|
||||
[people.rumpl]
|
||||
Name = "Djordje Lukic"
|
||||
Email = "djordje.lukic@docker.com"
|
||||
GitHub = "rumpl"
|
||||
|
||||
[people.shin-]
|
||||
[People.shin-]
|
||||
Name = "Joffrey F"
|
||||
Email = "f.joffrey@gmail.com"
|
||||
Email = "joffrey@docker.com"
|
||||
GitHub = "shin-"
|
||||
|
||||
[people.ulyssessouza]
|
||||
Name = "Ulysses Domiciano Souza"
|
||||
Email = "ulysses.souza@docker.com"
|
||||
GitHub = "ulyssessouza"
|
||||
|
||||
@@ -2,17 +2,15 @@ Docker Compose
|
||||
==============
|
||||

|
||||
|
||||
## :exclamation: The docker-compose project announces that as Python 2 has reached it's EOL, versions 1.26.x will be the last to support it. For more information, please refer to this [issue](https://github.com/docker/compose/issues/6890).
|
||||
|
||||
Compose is a tool for defining and running multi-container Docker applications.
|
||||
With Compose, you use a Compose file to configure your application's services.
|
||||
Then, using a single command, you create and start all the services
|
||||
from your configuration. To learn more about all the features of Compose
|
||||
see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/index.md#features).
|
||||
see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#features).
|
||||
|
||||
Compose is great for development, testing, and staging environments, as well as
|
||||
CI workflows. You can learn more about each case in
|
||||
[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/index.md#common-use-cases).
|
||||
[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#common-use-cases).
|
||||
|
||||
Using Compose is basically a three-step process.
|
||||
|
||||
@@ -56,7 +54,7 @@ Installation and documentation
|
||||
Contributing
|
||||
------------
|
||||
|
||||
[](https://ci-next.docker.com/public/job/compose/job/master/)
|
||||
[](https://jenkins.dockerproject.org/job/docker/job/compose/job/master/)
|
||||
|
||||
Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md).
|
||||
|
||||
|
||||
@@ -1,304 +0,0 @@
|
||||
#!groovy
|
||||
|
||||
def dockerVersions = ['19.03.8', '18.09.9']
|
||||
def baseImages = ['alpine', 'debian']
|
||||
def pythonVersions = ['py37']
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
skipDefaultCheckout(true)
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build test images') {
|
||||
// TODO use declarative 1.5.0 `matrix` once available on CI
|
||||
parallel {
|
||||
stage('alpine') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
buildImage('alpine')
|
||||
}
|
||||
}
|
||||
stage('debian') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
buildImage('debian')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
steps {
|
||||
// TODO use declarative 1.5.0 `matrix` once available on CI
|
||||
script {
|
||||
def testMatrix = [:]
|
||||
baseImages.each { baseImage ->
|
||||
dockerVersions.each { dockerVersion ->
|
||||
pythonVersions.each { pythonVersion ->
|
||||
testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parallel testMatrix
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Generate Changelog') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
withCredentials([string(credentialsId: 'github-compose-release-test-token', variable: 'GITHUB_TOKEN')]) {
|
||||
sh "./script/release/generate_changelog.sh"
|
||||
}
|
||||
archiveArtifacts artifacts: 'CHANGELOG.md'
|
||||
stash( name: "changelog", includes: 'CHANGELOG.md' )
|
||||
}
|
||||
}
|
||||
stage('Package') {
|
||||
parallel {
|
||||
stage('macosx binary') {
|
||||
agent {
|
||||
label 'mac-python'
|
||||
}
|
||||
environment {
|
||||
DEPLOYMENT_TARGET="10.11"
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
sh './script/setup/osx'
|
||||
sh 'tox -e py37 -- tests/unit'
|
||||
sh './script/build/osx'
|
||||
dir ('dist') {
|
||||
checksum('docker-compose-Darwin-x86_64')
|
||||
checksum('docker-compose-Darwin-x86_64.tgz')
|
||||
}
|
||||
archiveArtifacts artifacts: 'dist/*', fingerprint: true
|
||||
dir("dist") {
|
||||
stash name: "bin-darwin"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('linux binary') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
sh ' ./script/build/linux'
|
||||
dir ('dist') {
|
||||
checksum('docker-compose-Linux-x86_64')
|
||||
}
|
||||
archiveArtifacts artifacts: 'dist/*', fingerprint: true
|
||||
dir("dist") {
|
||||
stash name: "bin-linux"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windows binary') {
|
||||
agent {
|
||||
label 'windows-python'
|
||||
}
|
||||
environment {
|
||||
PATH = "$PATH;C:\\Python37;C:\\Python37\\Scripts"
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
bat 'tox.exe -e py37 -- tests/unit'
|
||||
powershell '.\\script\\build\\windows.ps1'
|
||||
dir ('dist') {
|
||||
checksum('docker-compose-Windows-x86_64.exe')
|
||||
}
|
||||
archiveArtifacts artifacts: 'dist/*', fingerprint: true
|
||||
dir("dist") {
|
||||
stash name: "bin-win"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('alpine image') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
buildRuntimeImage('alpine')
|
||||
}
|
||||
}
|
||||
stage('debian image') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
buildRuntimeImage('debian')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Release') {
|
||||
when {
|
||||
buildingTag()
|
||||
}
|
||||
parallel {
|
||||
stage('Pushing images') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
steps {
|
||||
pushRuntimeImage('alpine')
|
||||
pushRuntimeImage('debian')
|
||||
}
|
||||
}
|
||||
stage('Creating Github Release') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
environment {
|
||||
GITHUB_TOKEN = credentials('github-release-token')
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
sh 'mkdir -p dist'
|
||||
dir("dist") {
|
||||
unstash "bin-darwin"
|
||||
unstash "bin-linux"
|
||||
unstash "bin-win"
|
||||
unstash "changelog"
|
||||
sh("""
|
||||
curl -SfL https://github.com/github/hub/releases/download/v2.13.0/hub-linux-amd64-2.13.0.tgz | tar xzv --wildcards 'hub-*/bin/hub' --strip=2
|
||||
./hub release create --draft --prerelease=${env.TAG_NAME !=~ /v[0-9\.]+/} \\
|
||||
-a docker-compose-Darwin-x86_64 \\
|
||||
-a docker-compose-Darwin-x86_64.sha256 \\
|
||||
-a docker-compose-Darwin-x86_64.tgz \\
|
||||
-a docker-compose-Darwin-x86_64.tgz.sha256 \\
|
||||
-a docker-compose-Linux-x86_64 \\
|
||||
-a docker-compose-Linux-x86_64.sha256 \\
|
||||
-a docker-compose-Windows-x86_64.exe \\
|
||||
-a docker-compose-Windows-x86_64.exe.sha256 \\
|
||||
-a ../script/run/run.sh \\
|
||||
-F CHANGELOG.md \${TAG_NAME}
|
||||
""")
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Publishing Python packages') {
|
||||
agent {
|
||||
label 'linux && docker && ubuntu-2004'
|
||||
}
|
||||
environment {
|
||||
PYPIRC = credentials('pypirc-docker-dsg-cibot')
|
||||
}
|
||||
steps {
|
||||
checkout scm
|
||||
sh """
|
||||
rm -rf build/ dist/
|
||||
pip3 install wheel
|
||||
python3 setup.py sdist bdist_wheel
|
||||
pip3 install twine
|
||||
~/.local/bin/twine upload --config-file ${PYPIRC} ./dist/docker-compose-*.tar.gz ./dist/docker_compose-*-py2.py3-none-any.whl
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def buildImage(baseImage) {
|
||||
def scmvar = checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
|
||||
image = docker.image(imageName)
|
||||
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
try {
|
||||
image.pull()
|
||||
} catch (Exception exc) {
|
||||
ansiColor('xterm') {
|
||||
sh """docker build -t ${imageName} \\
|
||||
--target build \\
|
||||
--build-arg BUILD_PLATFORM="${baseImage}" \\
|
||||
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\
|
||||
.\\
|
||||
"""
|
||||
sh "docker push ${imageName}"
|
||||
}
|
||||
echo "${imageName}"
|
||||
return imageName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def runTests(dockerVersion, pythonVersion, baseImage) {
|
||||
return {
|
||||
stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") {
|
||||
node("linux") {
|
||||
def scmvar = checkout(scm)
|
||||
def imageName = "dockerbuildbot/compose:${baseImage}-${scmvar.GIT_COMMIT}"
|
||||
def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim()
|
||||
echo "Using local system's storage driver: ${storageDriver}"
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
sh """docker run \\
|
||||
-t \\
|
||||
--rm \\
|
||||
--privileged \\
|
||||
--volume="\$(pwd)/.git:/code/.git" \\
|
||||
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
|
||||
-e "TAG=${imageName}" \\
|
||||
-e "STORAGE_DRIVER=${storageDriver}" \\
|
||||
-e "DOCKER_VERSIONS=${dockerVersion}" \\
|
||||
-e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\
|
||||
-e "PY_TEST_VERSIONS=${pythonVersion}" \\
|
||||
--entrypoint="script/test/ci" \\
|
||||
${imageName} \\
|
||||
--verbose
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def buildRuntimeImage(baseImage) {
|
||||
scmvar = checkout scm
|
||||
def imageName = "docker/compose:${baseImage}-${env.BRANCH_NAME}"
|
||||
ansiColor('xterm') {
|
||||
sh """docker build -t ${imageName} \\
|
||||
--build-arg BUILD_PLATFORM="${baseImage}" \\
|
||||
--build-arg GIT_COMMIT="${scmvar.GIT_COMMIT.take(7)}" \\
|
||||
.
|
||||
"""
|
||||
}
|
||||
sh "mkdir -p dist"
|
||||
sh "docker save ${imageName} -o dist/docker-compose-${baseImage}.tar"
|
||||
stash name: "compose-${baseImage}", includes: "dist/docker-compose-${baseImage}.tar"
|
||||
}
|
||||
|
||||
def pushRuntimeImage(baseImage) {
|
||||
unstash "compose-${baseImage}"
|
||||
sh "docker load -i dist/docker-compose-${baseImage}.tar"
|
||||
withDockerRegistry(credentialsId: 'dockerhub-dockerdsgcibot') {
|
||||
sh "docker push docker/compose:${baseImage}-${env.TAG_NAME}"
|
||||
if (baseImage == "alpine" && env.TAG_NAME != null) {
|
||||
sh "docker tag docker/compose:alpine-${env.TAG_NAME} docker/compose:${env.TAG_NAME}"
|
||||
sh "docker push docker/compose:${env.TAG_NAME}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def checksum(filepath) {
|
||||
if (isUnix()) {
|
||||
sh "openssl sha256 -r -out ${filepath}.sha256 ${filepath}"
|
||||
} else {
|
||||
powershell "(Get-FileHash -Path ${filepath} -Algorithm SHA256 | % hash).ToLower() + ' *${filepath}' | Out-File -encoding ascii ${filepath}.sha256"
|
||||
}
|
||||
}
|
||||
24
appveyor.yml
Normal file
24
appveyor.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
|
||||
version: '{branch}-{build}'
|
||||
|
||||
install:
|
||||
- "SET PATH=C:\\Python36-x64;C:\\Python36-x64\\Scripts;%PATH%"
|
||||
- "python --version"
|
||||
- "pip install tox==2.9.1 virtualenv==15.1.0"
|
||||
|
||||
# Build the binary after tests
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- "tox -e py27,py36,py37 -- tests/unit"
|
||||
- ps: ".\\script\\build\\windows.ps1"
|
||||
|
||||
artifacts:
|
||||
- path: .\dist\docker-compose-Windows-x86_64.exe
|
||||
name: "Compose Windows binary"
|
||||
|
||||
deploy:
|
||||
- provider: Environment
|
||||
name: master-builds
|
||||
on:
|
||||
branch: master
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '1.26.2'
|
||||
__version__ = '1.24.0'
|
||||
|
||||
258
compose/bundle.py
Normal file
258
compose/bundle.py
Normal file
@@ -0,0 +1,258 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
import six
|
||||
from docker.utils import split_command
|
||||
from docker.utils.ports import split_port
|
||||
|
||||
from .cli.errors import UserError
|
||||
from .config.serialize import denormalize_config
|
||||
from .network import get_network_defs_for_service
|
||||
from .service import format_environment
|
||||
from .service import NoSuchImageError
|
||||
from .service import parse_repository_tag
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SERVICE_KEYS = {
|
||||
'working_dir': 'WorkingDir',
|
||||
'user': 'User',
|
||||
'labels': 'Labels',
|
||||
}
|
||||
|
||||
IGNORED_KEYS = {'build'}
|
||||
|
||||
SUPPORTED_KEYS = {
|
||||
'image',
|
||||
'ports',
|
||||
'expose',
|
||||
'networks',
|
||||
'command',
|
||||
'environment',
|
||||
'entrypoint',
|
||||
} | set(SERVICE_KEYS)
|
||||
|
||||
VERSION = '0.1'
|
||||
|
||||
|
||||
class NeedsPush(Exception):
|
||||
def __init__(self, image_name):
|
||||
self.image_name = image_name
|
||||
|
||||
|
||||
class NeedsPull(Exception):
|
||||
def __init__(self, image_name, service_name):
|
||||
self.image_name = image_name
|
||||
self.service_name = service_name
|
||||
|
||||
|
||||
class MissingDigests(Exception):
|
||||
def __init__(self, needs_push, needs_pull):
|
||||
self.needs_push = needs_push
|
||||
self.needs_pull = needs_pull
|
||||
|
||||
|
||||
def serialize_bundle(config, image_digests):
|
||||
return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
|
||||
|
||||
|
||||
def get_image_digests(project, allow_push=False):
|
||||
digests = {}
|
||||
needs_push = set()
|
||||
needs_pull = set()
|
||||
|
||||
for service in project.services:
|
||||
try:
|
||||
digests[service.name] = get_image_digest(
|
||||
service,
|
||||
allow_push=allow_push,
|
||||
)
|
||||
except NeedsPush as e:
|
||||
needs_push.add(e.image_name)
|
||||
except NeedsPull as e:
|
||||
needs_pull.add(e.service_name)
|
||||
|
||||
if needs_push or needs_pull:
|
||||
raise MissingDigests(needs_push, needs_pull)
|
||||
|
||||
return digests
|
||||
|
||||
|
||||
def get_image_digest(service, allow_push=False):
|
||||
if 'image' not in service.options:
|
||||
raise UserError(
|
||||
"Service '{s.name}' doesn't define an image tag. An image name is "
|
||||
"required to generate a proper image digest for the bundle. Specify "
|
||||
"an image repo and tag with the 'image' option.".format(s=service))
|
||||
|
||||
_, _, separator = parse_repository_tag(service.options['image'])
|
||||
# Compose file already uses a digest, no lookup required
|
||||
if separator == '@':
|
||||
return service.options['image']
|
||||
|
||||
try:
|
||||
image = service.image()
|
||||
except NoSuchImageError:
|
||||
action = 'build' if 'build' in service.options else 'pull'
|
||||
raise UserError(
|
||||
"Image not found for service '{service}'. "
|
||||
"You might need to run `docker-compose {action} {service}`."
|
||||
.format(service=service.name, action=action))
|
||||
|
||||
if image['RepoDigests']:
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
return image['RepoDigests'][0]
|
||||
|
||||
if 'build' not in service.options:
|
||||
raise NeedsPull(service.image_name, service.name)
|
||||
|
||||
if not allow_push:
|
||||
raise NeedsPush(service.image_name)
|
||||
|
||||
return push_image(service)
|
||||
|
||||
|
||||
def push_image(service):
|
||||
try:
|
||||
digest = service.push()
|
||||
except Exception:
|
||||
log.error(
|
||||
"Failed to push image for service '{s.name}'. Please use an "
|
||||
"image tag that can be pushed to a Docker "
|
||||
"registry.".format(s=service))
|
||||
raise
|
||||
|
||||
if not digest:
|
||||
raise ValueError("Failed to get digest for %s" % service.name)
|
||||
|
||||
repo, _, _ = parse_repository_tag(service.options['image'])
|
||||
identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
|
||||
|
||||
# only do this if RepoDigests isn't already populated
|
||||
image = service.image()
|
||||
if not image['RepoDigests']:
|
||||
# Pull by digest so that image['RepoDigests'] is populated for next time
|
||||
# and we don't have to pull/push again
|
||||
service.client.pull(identifier)
|
||||
log.info("Stored digest for {}".format(service.image_name))
|
||||
|
||||
return identifier
|
||||
|
||||
|
||||
def to_bundle(config, image_digests):
|
||||
if config.networks:
|
||||
log.warn("Unsupported top level key 'networks' - ignoring")
|
||||
|
||||
if config.volumes:
|
||||
log.warn("Unsupported top level key 'volumes' - ignoring")
|
||||
|
||||
config = denormalize_config(config)
|
||||
|
||||
return {
|
||||
'Version': VERSION,
|
||||
'Services': {
|
||||
name: convert_service_to_bundle(
|
||||
name,
|
||||
service_dict,
|
||||
image_digests[name],
|
||||
)
|
||||
for name, service_dict in config['services'].items()
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def convert_service_to_bundle(name, service_dict, image_digest):
|
||||
container_config = {'Image': image_digest}
|
||||
|
||||
for key, value in service_dict.items():
|
||||
if key in IGNORED_KEYS:
|
||||
continue
|
||||
|
||||
if key not in SUPPORTED_KEYS:
|
||||
log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
|
||||
continue
|
||||
|
||||
if key == 'environment':
|
||||
container_config['Env'] = format_environment({
|
||||
envkey: envvalue for envkey, envvalue in value.items()
|
||||
if envvalue
|
||||
})
|
||||
continue
|
||||
|
||||
if key in SERVICE_KEYS:
|
||||
container_config[SERVICE_KEYS[key]] = value
|
||||
continue
|
||||
|
||||
set_command_and_args(
|
||||
container_config,
|
||||
service_dict.get('entrypoint', []),
|
||||
service_dict.get('command', []))
|
||||
container_config['Networks'] = make_service_networks(name, service_dict)
|
||||
|
||||
ports = make_port_specs(service_dict)
|
||||
if ports:
|
||||
container_config['Ports'] = ports
|
||||
|
||||
return container_config
|
||||
|
||||
|
||||
# See https://github.com/docker/swarmkit/blob/agent/exec/container/container.go#L95
|
||||
def set_command_and_args(config, entrypoint, command):
|
||||
if isinstance(entrypoint, six.string_types):
|
||||
entrypoint = split_command(entrypoint)
|
||||
if isinstance(command, six.string_types):
|
||||
command = split_command(command)
|
||||
|
||||
if entrypoint:
|
||||
config['Command'] = entrypoint + command
|
||||
return
|
||||
|
||||
if command:
|
||||
config['Args'] = command
|
||||
|
||||
|
||||
def make_service_networks(name, service_dict):
|
||||
networks = []
|
||||
|
||||
for network_name, network_def in get_network_defs_for_service(service_dict).items():
|
||||
for key in network_def.keys():
|
||||
log.warn(
|
||||
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
|
||||
.format(key, name, network_name))
|
||||
|
||||
networks.append(network_name)
|
||||
|
||||
return networks
|
||||
|
||||
|
||||
def make_port_specs(service_dict):
|
||||
ports = []
|
||||
|
||||
internal_ports = [
|
||||
internal_port
|
||||
for port_def in service_dict.get('ports', [])
|
||||
for internal_port in split_port(port_def)[0]
|
||||
]
|
||||
|
||||
internal_ports += service_dict.get('expose', [])
|
||||
|
||||
for internal_port in internal_ports:
|
||||
spec = make_port_spec(internal_port)
|
||||
if spec not in ports:
|
||||
ports.append(spec)
|
||||
|
||||
return ports
|
||||
|
||||
|
||||
def make_port_spec(value):
|
||||
components = six.text_type(value).partition('/')
|
||||
return {
|
||||
'Protocol': components[2] or 'tcp',
|
||||
'Port': int(components[0]),
|
||||
}
|
||||
@@ -41,9 +41,9 @@ for (name, code) in get_pairs():
|
||||
|
||||
|
||||
def rainbow():
|
||||
cs = ['cyan', 'yellow', 'green', 'magenta', 'blue',
|
||||
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
|
||||
'intense_cyan', 'intense_yellow', 'intense_green',
|
||||
'intense_magenta', 'intense_blue']
|
||||
'intense_magenta', 'intense_red', 'intense_blue']
|
||||
|
||||
for c in cs:
|
||||
yield globals()[c]
|
||||
|
||||
@@ -8,72 +8,38 @@ import re
|
||||
import six
|
||||
|
||||
from . import errors
|
||||
from . import verbose_proxy
|
||||
from .. import config
|
||||
from .. import parallel
|
||||
from ..config.environment import Environment
|
||||
from ..const import API_VERSIONS
|
||||
from ..const import LABEL_CONFIG_FILES
|
||||
from ..const import LABEL_ENVIRONMENT_FILE
|
||||
from ..const import LABEL_WORKING_DIR
|
||||
from ..project import Project
|
||||
from .docker_client import get_client
|
||||
from .docker_client import load_context
|
||||
from .docker_client import make_context
|
||||
from .errors import UserError
|
||||
from .docker_client import docker_client
|
||||
from .docker_client import get_tls_version
|
||||
from .docker_client import tls_config_from_options
|
||||
from .utils import get_version_info
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
SILENT_COMMANDS = {
|
||||
'events',
|
||||
'exec',
|
||||
'kill',
|
||||
'logs',
|
||||
'pause',
|
||||
'ps',
|
||||
'restart',
|
||||
'rm',
|
||||
'start',
|
||||
'stop',
|
||||
'top',
|
||||
'unpause',
|
||||
}
|
||||
|
||||
|
||||
def project_from_options(project_dir, options, additional_options=None):
|
||||
additional_options = additional_options or {}
|
||||
def project_from_options(project_dir, options):
|
||||
override_dir = options.get('--project-directory')
|
||||
environment_file = options.get('--env-file')
|
||||
environment = Environment.from_env_file(override_dir or project_dir, environment_file)
|
||||
environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
|
||||
environment = Environment.from_env_file(override_dir or project_dir)
|
||||
set_parallel_limit(environment)
|
||||
|
||||
# get the context for the run
|
||||
context = None
|
||||
context_name = options.get('--context', None)
|
||||
if context_name:
|
||||
context = load_context(context_name)
|
||||
if not context:
|
||||
raise UserError("Context '{}' not found".format(context_name))
|
||||
|
||||
host = options.get('--host', None)
|
||||
host = options.get('--host')
|
||||
if host is not None:
|
||||
if context:
|
||||
raise UserError(
|
||||
"-H, --host and -c, --context are mutually exclusive. Only one should be set.")
|
||||
host = host.lstrip('=')
|
||||
context = make_context(host, options, environment)
|
||||
|
||||
return get_project(
|
||||
project_dir,
|
||||
get_config_path_from_options(project_dir, options, environment),
|
||||
project_name=options.get('--project-name'),
|
||||
verbose=options.get('--verbose'),
|
||||
context=context,
|
||||
host=host,
|
||||
tls_config=tls_config_from_options(options, environment),
|
||||
environment=environment,
|
||||
override_dir=override_dir,
|
||||
compatibility=compatibility_from_options(project_dir, options, environment),
|
||||
interpolate=(not additional_options.get('--no-interpolate')),
|
||||
environment_file=environment_file
|
||||
compatibility=options.get('--compatibility'),
|
||||
)
|
||||
|
||||
|
||||
@@ -93,18 +59,15 @@ def set_parallel_limit(environment):
|
||||
parallel.GlobalLimit.set_global_limit(parallel_limit)
|
||||
|
||||
|
||||
def get_config_from_options(base_dir, options, additional_options=None):
|
||||
additional_options = additional_options or {}
|
||||
def get_config_from_options(base_dir, options):
|
||||
override_dir = options.get('--project-directory')
|
||||
environment_file = options.get('--env-file')
|
||||
environment = Environment.from_env_file(override_dir or base_dir, environment_file)
|
||||
environment = Environment.from_env_file(override_dir or base_dir)
|
||||
config_path = get_config_path_from_options(
|
||||
base_dir, options, environment
|
||||
)
|
||||
return config.load(
|
||||
config.find(base_dir, config_path, environment, override_dir),
|
||||
compatibility_from_options(config_path, options, environment),
|
||||
not additional_options.get('--no-interpolate')
|
||||
options.get('--compatibility')
|
||||
)
|
||||
|
||||
|
||||
@@ -123,61 +86,49 @@ def get_config_path_from_options(base_dir, options, environment):
|
||||
return None
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
|
||||
client = docker_client(
|
||||
version=version, tls_config=tls_config, host=host,
|
||||
environment=environment, tls_version=get_tls_version(environment)
|
||||
)
|
||||
if verbose:
|
||||
version_info = six.iteritems(client.version())
|
||||
log.info(get_version_info('full'))
|
||||
log.info("Docker base_url: %s", client.base_url)
|
||||
log.info("Docker version: %s",
|
||||
", ".join("%s=%s" % item for item in version_info))
|
||||
return verbose_proxy.VerboseProxy('docker', client)
|
||||
return client
|
||||
|
||||
|
||||
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
|
||||
context=None, environment=None, override_dir=None,
|
||||
compatibility=False, interpolate=True, environment_file=None):
|
||||
host=None, tls_config=None, environment=None, override_dir=None,
|
||||
compatibility=False):
|
||||
if not environment:
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
config_details = config.find(project_dir, config_path, environment, override_dir)
|
||||
project_name = get_project_name(
|
||||
config_details.working_dir, project_name, environment
|
||||
)
|
||||
config_data = config.load(config_details, compatibility, interpolate)
|
||||
config_data = config.load(config_details, compatibility)
|
||||
|
||||
api_version = environment.get(
|
||||
'COMPOSE_API_VERSION',
|
||||
API_VERSIONS[config_data.version])
|
||||
|
||||
client = get_client(
|
||||
verbose=verbose, version=api_version, context=context, environment=environment
|
||||
verbose=verbose, version=api_version, tls_config=tls_config,
|
||||
host=host, environment=environment
|
||||
)
|
||||
|
||||
with errors.handle_connection_errors(client):
|
||||
return Project.from_config(
|
||||
project_name,
|
||||
config_data,
|
||||
client,
|
||||
environment.get('DOCKER_DEFAULT_PLATFORM'),
|
||||
execution_context_labels(config_details, environment_file),
|
||||
project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
|
||||
)
|
||||
|
||||
|
||||
def execution_context_labels(config_details, environment_file):
|
||||
extra_labels = [
|
||||
'{0}={1}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir))
|
||||
]
|
||||
|
||||
if not use_config_from_stdin(config_details):
|
||||
extra_labels.append('{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)))
|
||||
|
||||
if environment_file is not None:
|
||||
extra_labels.append('{0}={1}'.format(LABEL_ENVIRONMENT_FILE,
|
||||
os.path.normpath(environment_file)))
|
||||
return extra_labels
|
||||
|
||||
|
||||
def use_config_from_stdin(config_details):
|
||||
for c in config_details.config_files:
|
||||
if not c.filename:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def config_files_label(config_details):
|
||||
return ",".join(
|
||||
map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
|
||||
|
||||
|
||||
def get_project_name(working_dir, project_name=None, environment=None):
|
||||
def normalize_name(name):
|
||||
return re.sub(r'[^-_a-z0-9]', '', name.lower())
|
||||
@@ -193,13 +144,3 @@ def get_project_name(working_dir, project_name=None, environment=None):
|
||||
return normalize_name(project)
|
||||
|
||||
return 'default'
|
||||
|
||||
|
||||
def compatibility_from_options(working_dir, options=None, environment=None):
|
||||
"""Get compose v3 compatibility from --compatibility option
|
||||
or from COMPOSE_COMPATIBILITY environment variable."""
|
||||
|
||||
compatibility_option = options.get('--compatibility')
|
||||
compatibility_environment = environment.get_boolean('COMPOSE_COMPATIBILITY')
|
||||
|
||||
return compatibility_option or compatibility_environment
|
||||
|
||||
@@ -5,22 +5,17 @@ import logging
|
||||
import os.path
|
||||
import ssl
|
||||
|
||||
import six
|
||||
from docker import APIClient
|
||||
from docker import Context
|
||||
from docker import ContextAPI
|
||||
from docker import TLSConfig
|
||||
from docker.errors import TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
from docker.utils import kwargs_from_env
|
||||
from docker.utils.config import home_dir
|
||||
|
||||
from . import verbose_proxy
|
||||
from ..config.environment import Environment
|
||||
from ..const import HTTP_TIMEOUT
|
||||
from ..utils import unquote_path
|
||||
from .errors import UserError
|
||||
from .utils import generate_user_agent
|
||||
from .utils import get_version_info
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -29,33 +24,6 @@ def default_cert_path():
|
||||
return os.path.join(home_dir(), '.docker')
|
||||
|
||||
|
||||
def make_context(host, options, environment):
|
||||
tls = tls_config_from_options(options, environment)
|
||||
ctx = Context("compose", host=host, tls=tls.verify if tls else False)
|
||||
if tls:
|
||||
ctx.set_endpoint("docker", host, tls, skip_tls_verify=not tls.verify)
|
||||
return ctx
|
||||
|
||||
|
||||
def load_context(name=None):
|
||||
return ContextAPI.get_context(name)
|
||||
|
||||
|
||||
def get_client(environment, verbose=False, version=None, context=None):
|
||||
client = docker_client(
|
||||
version=version, context=context,
|
||||
environment=environment, tls_version=get_tls_version(environment)
|
||||
)
|
||||
if verbose:
|
||||
version_info = six.iteritems(client.version())
|
||||
log.info(get_version_info('full'))
|
||||
log.info("Docker base_url: %s", client.base_url)
|
||||
log.info("Docker version: %s",
|
||||
", ".join("%s=%s" % item for item in version_info))
|
||||
return verbose_proxy.VerboseProxy('docker', client)
|
||||
return client
|
||||
|
||||
|
||||
def get_tls_version(environment):
|
||||
compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
|
||||
if not compose_tls_version:
|
||||
@@ -63,7 +31,7 @@ def get_tls_version(environment):
|
||||
|
||||
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
|
||||
if not hasattr(ssl, tls_attr_name):
|
||||
log.warning(
|
||||
log.warn(
|
||||
'The "{}" protocol is unavailable. You may need to update your '
|
||||
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
|
||||
.format(compose_tls_version)
|
||||
@@ -119,7 +87,8 @@ def tls_config_from_options(options, environment=None):
|
||||
return None
|
||||
|
||||
|
||||
def docker_client(environment, version=None, context=None, tls_version=None):
|
||||
def docker_client(environment, version=None, tls_config=None, host=None,
|
||||
tls_version=None):
|
||||
"""
|
||||
Returns a docker-py client configured using environment variables
|
||||
according to the same logic as the official Docker client.
|
||||
@@ -132,26 +101,10 @@ def docker_client(environment, version=None, context=None, tls_version=None):
|
||||
"and DOCKER_CERT_PATH are set correctly.\n"
|
||||
"You might need to run `eval \"$(docker-machine env default)\"`")
|
||||
|
||||
if not context:
|
||||
# check env for DOCKER_HOST and certs path
|
||||
host = kwargs.get("base_url", None)
|
||||
tls = kwargs.get("tls", None)
|
||||
verify = False if not tls else tls.verify
|
||||
if host:
|
||||
context = Context("compose", host=host, tls=verify)
|
||||
else:
|
||||
context = ContextAPI.get_current_context()
|
||||
if tls:
|
||||
context.set_endpoint("docker", host=host, tls_cfg=tls, skip_tls_verify=not verify)
|
||||
|
||||
if not context.is_docker_host():
|
||||
raise UserError(
|
||||
"The platform targeted with the current context is not supported.\n"
|
||||
"Make sure the context in use targets a Docker Engine.\n")
|
||||
|
||||
kwargs['base_url'] = context.Host
|
||||
if context.TLSConfig:
|
||||
kwargs['tls'] = context.TLSConfig
|
||||
if host:
|
||||
kwargs['base_url'] = host
|
||||
if tls_config:
|
||||
kwargs['tls'] = tls_config
|
||||
|
||||
if version:
|
||||
kwargs['version'] = version
|
||||
|
||||
@@ -2,37 +2,25 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
import shutil
|
||||
import os
|
||||
|
||||
import six
|
||||
import texttable
|
||||
|
||||
from compose.cli import colors
|
||||
|
||||
if hasattr(shutil, "get_terminal_size"):
|
||||
from shutil import get_terminal_size
|
||||
else:
|
||||
from backports.shutil_get_terminal_size import get_terminal_size
|
||||
|
||||
|
||||
def get_tty_width():
|
||||
try:
|
||||
# get_terminal_size can't determine the size if compose is piped
|
||||
# to another command. But in such case it doesn't make sense to
|
||||
# try format the output by terminal size as this output is consumed
|
||||
# by another command. So let's pretend we have a huge terminal so
|
||||
# output is single-lined
|
||||
width, _ = get_terminal_size(fallback=(999, 0))
|
||||
return int(width)
|
||||
except OSError:
|
||||
tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
|
||||
if len(tty_size) != 2:
|
||||
return 0
|
||||
_, width = tty_size
|
||||
return int(width)
|
||||
|
||||
|
||||
class Formatter:
|
||||
class Formatter(object):
|
||||
"""Format tabular data for printing."""
|
||||
|
||||
@staticmethod
|
||||
def table(headers, rows):
|
||||
def table(self, headers, rows):
|
||||
table = texttable.Texttable(max_width=get_tty_width())
|
||||
table.set_cols_dtype(['t' for h in headers])
|
||||
table.add_rows([headers] + rows)
|
||||
|
||||
@@ -134,10 +134,7 @@ def build_thread(container, presenter, queue, log_args):
|
||||
def build_thread_map(initial_containers, presenters, thread_args):
|
||||
return {
|
||||
container.id: build_thread(container, next(presenters), *thread_args)
|
||||
# Container order is unspecified, so they are sorted by name in order to make
|
||||
# container:presenter (log color) assignment deterministic when given a list of containers
|
||||
# with the same names.
|
||||
for container in sorted(initial_containers, key=lambda c: c.name)
|
||||
for container in initial_containers
|
||||
}
|
||||
|
||||
|
||||
@@ -233,13 +230,7 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
|
||||
|
||||
# Container crashed so we should reattach to it
|
||||
if event['id'] in crashed_containers:
|
||||
container = event['container']
|
||||
if not container.is_restarting:
|
||||
try:
|
||||
container.attach_log_stream()
|
||||
except APIError:
|
||||
# Just ignore errors when reattaching to already crashed containers
|
||||
pass
|
||||
event['container'].attach_log_stream()
|
||||
crashed_containers.remove(event['id'])
|
||||
|
||||
thread_map[event['id']] = build_thread(
|
||||
|
||||
@@ -6,7 +6,6 @@ import contextlib
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pipes
|
||||
import re
|
||||
import subprocess
|
||||
@@ -15,12 +14,14 @@ from distutils.spawn import find_executable
|
||||
from inspect import getdoc
|
||||
from operator import attrgetter
|
||||
|
||||
import docker.errors
|
||||
import docker.utils
|
||||
import docker
|
||||
|
||||
from . import errors
|
||||
from . import signals
|
||||
from .. import __version__
|
||||
from ..bundle import get_image_digests
|
||||
from ..bundle import MissingDigests
|
||||
from ..bundle import serialize_bundle
|
||||
from ..config import ConfigurationError
|
||||
from ..config import parse_environment
|
||||
from ..config import parse_labels
|
||||
@@ -32,8 +33,6 @@ from ..const import COMPOSEFILE_V2_2 as V2_2
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
from ..errors import StreamParseError
|
||||
from ..progress_stream import StreamOutputError
|
||||
from ..project import get_image_digests
|
||||
from ..project import MissingDigests
|
||||
from ..project import NoSuchService
|
||||
from ..project import OneOffFilter
|
||||
from ..project import ProjectError
|
||||
@@ -103,9 +102,9 @@ def dispatch():
|
||||
options, handler, command_options = dispatcher.parse(sys.argv[1:])
|
||||
setup_console_handler(console_handler,
|
||||
options.get('--verbose'),
|
||||
set_no_color_if_clicolor(options.get('--no-ansi')),
|
||||
options.get('--no-ansi'),
|
||||
options.get("--log-level"))
|
||||
setup_parallel_logger(set_no_color_if_clicolor(options.get('--no-ansi')))
|
||||
setup_parallel_logger(options.get('--no-ansi'))
|
||||
if options.get('--no-ansi'):
|
||||
command_options['--no-color'] = True
|
||||
return functools.partial(perform_command, options, handler, command_options)
|
||||
@@ -192,7 +191,6 @@ class TopLevelCommand(object):
|
||||
(default: docker-compose.yml)
|
||||
-p, --project-name NAME Specify an alternate project name
|
||||
(default: directory name)
|
||||
-c, --context NAME Specify a context name
|
||||
--verbose Show more output
|
||||
--log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
--no-ansi Do not print ANSI control characters
|
||||
@@ -210,10 +208,10 @@ class TopLevelCommand(object):
|
||||
(default: the path of the Compose file)
|
||||
--compatibility If set, Compose will attempt to convert keys
|
||||
in v3 files to their non-Swarm equivalent
|
||||
--env-file PATH Specify an alternate environment file
|
||||
|
||||
Commands:
|
||||
build Build or rebuild services
|
||||
bundle Generate a Docker bundle from the Compose file
|
||||
config Validate and view the Compose file
|
||||
create Create services
|
||||
down Stop and remove containers, networks, images, and volumes
|
||||
@@ -248,11 +246,6 @@ class TopLevelCommand(object):
|
||||
def project_dir(self):
|
||||
return self.toplevel_options.get('--project-directory') or '.'
|
||||
|
||||
@property
|
||||
def toplevel_environment(self):
|
||||
environment_file = self.toplevel_options.get('--env-file')
|
||||
return Environment.from_env_file(self.project_dir, environment_file)
|
||||
|
||||
def build(self, options):
|
||||
"""
|
||||
Build or rebuild services.
|
||||
@@ -264,18 +257,13 @@ class TopLevelCommand(object):
|
||||
Usage: build [options] [--build-arg key=val...] [SERVICE...]
|
||||
|
||||
Options:
|
||||
--build-arg key=val Set build-time variables for services.
|
||||
--compress Compress the build context using gzip.
|
||||
--force-rm Always remove intermediate containers.
|
||||
-m, --memory MEM Set memory limit for the build container.
|
||||
--no-cache Do not use cache when building the image.
|
||||
--no-rm Do not remove intermediate containers after a successful build.
|
||||
--parallel Build images in parallel.
|
||||
--progress string Set type of progress output (auto, plain, tty).
|
||||
EXPERIMENTAL flag for native builder.
|
||||
To enable, run with COMPOSE_DOCKER_CLI_BUILD=1)
|
||||
--pull Always attempt to pull a newer version of the image.
|
||||
-q, --quiet Don't print anything to STDOUT
|
||||
-m, --memory MEM Sets memory limit for the build container.
|
||||
--build-arg key=val Set build-time variables for services.
|
||||
--parallel Build images in parallel.
|
||||
"""
|
||||
service_names = options['SERVICE']
|
||||
build_args = options.get('--build-arg', None)
|
||||
@@ -285,9 +273,8 @@ class TopLevelCommand(object):
|
||||
'--build-arg is only supported when services are specified for API version < 1.25.'
|
||||
' Please use a Compose file version > 2.2 or specify which services to build.'
|
||||
)
|
||||
build_args = resolve_build_args(build_args, self.toplevel_environment)
|
||||
|
||||
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
build_args = resolve_build_args(build_args, environment)
|
||||
|
||||
self.project.build(
|
||||
service_names=options['SERVICE'],
|
||||
@@ -295,15 +282,43 @@ class TopLevelCommand(object):
|
||||
pull=bool(options.get('--pull', False)),
|
||||
force_rm=bool(options.get('--force-rm', False)),
|
||||
memory=options.get('--memory'),
|
||||
rm=not bool(options.get('--no-rm', False)),
|
||||
build_args=build_args,
|
||||
gzip=options.get('--compress', False),
|
||||
parallel_build=options.get('--parallel', False),
|
||||
silent=options.get('--quiet', False),
|
||||
cli=native_builder,
|
||||
progress=options.get('--progress'),
|
||||
)
|
||||
|
||||
def bundle(self, options):
|
||||
"""
|
||||
Generate a Distributed Application Bundle (DAB) from the Compose file.
|
||||
|
||||
Images must have digests stored, which requires interaction with a
|
||||
Docker registry. If digests aren't stored for all images, you can fetch
|
||||
them with `docker-compose pull` or `docker-compose push`. To push images
|
||||
automatically when bundling, pass `--push-images`. Only services with
|
||||
a `build` option specified will have their images pushed.
|
||||
|
||||
Usage: bundle [options]
|
||||
|
||||
Options:
|
||||
--push-images Automatically push images for any services
|
||||
which have a `build` option specified.
|
||||
|
||||
-o, --output PATH Path to write the bundle file to.
|
||||
Defaults to "<project name>.dab".
|
||||
"""
|
||||
compose_config = get_config_from_options('.', self.toplevel_options)
|
||||
|
||||
output = options["--output"]
|
||||
if not output:
|
||||
output = "{}.dab".format(self.project.name)
|
||||
|
||||
image_digests = image_digests_for_project(self.project, options['--push-images'])
|
||||
|
||||
with open(output, 'w') as f:
|
||||
f.write(serialize_bundle(compose_config, image_digests))
|
||||
|
||||
log.info("Wrote bundle to {}".format(output))
|
||||
|
||||
def config(self, options):
|
||||
"""
|
||||
Validate and view the Compose file.
|
||||
@@ -312,7 +327,6 @@ class TopLevelCommand(object):
|
||||
|
||||
Options:
|
||||
--resolve-image-digests Pin image tags to digests.
|
||||
--no-interpolate Don't interpolate environment variables
|
||||
-q, --quiet Only validate the configuration, don't print
|
||||
anything.
|
||||
--services Print the service names, one per line.
|
||||
@@ -322,12 +336,11 @@ class TopLevelCommand(object):
|
||||
or use the wildcard symbol to display all services
|
||||
"""
|
||||
|
||||
additional_options = {'--no-interpolate': options.get('--no-interpolate')}
|
||||
compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
|
||||
compose_config = get_config_from_options('.', self.toplevel_options)
|
||||
image_digests = None
|
||||
|
||||
if options['--resolve-image-digests']:
|
||||
self.project = project_from_options('.', self.toplevel_options, additional_options)
|
||||
self.project = project_from_options('.', self.toplevel_options)
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
image_digests = image_digests_for_project(self.project)
|
||||
|
||||
@@ -344,14 +357,14 @@ class TopLevelCommand(object):
|
||||
|
||||
if options['--hash'] is not None:
|
||||
h = options['--hash']
|
||||
self.project = project_from_options('.', self.toplevel_options, additional_options)
|
||||
self.project = project_from_options('.', self.toplevel_options)
|
||||
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
|
||||
with errors.handle_connection_errors(self.project.client):
|
||||
for service in self.project.get_services(services):
|
||||
print('{} {}'.format(service.name, service.config_hash))
|
||||
return
|
||||
|
||||
print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
|
||||
print(serialize_config(compose_config, image_digests))
|
||||
|
||||
def create(self, options):
|
||||
"""
|
||||
@@ -370,7 +383,7 @@ class TopLevelCommand(object):
|
||||
"""
|
||||
service_names = options['SERVICE']
|
||||
|
||||
log.warning(
|
||||
log.warn(
|
||||
'The create command is deprecated. '
|
||||
'Use the up command with the --no-start flag instead.'
|
||||
)
|
||||
@@ -409,7 +422,8 @@ class TopLevelCommand(object):
|
||||
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
|
||||
(default: 10)
|
||||
"""
|
||||
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
|
||||
if ignore_orphans and options['--remove-orphans']:
|
||||
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
||||
@@ -466,7 +480,8 @@ class TopLevelCommand(object):
|
||||
not supported in API < 1.25)
|
||||
-w, --workdir DIR Path to workdir directory for this command.
|
||||
"""
|
||||
use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
index = int(options.get('--index'))
|
||||
service = self.project.get_service(options['SERVICE'])
|
||||
detach = options.get('--detach')
|
||||
@@ -489,7 +504,7 @@ class TopLevelCommand(object):
|
||||
if IS_WINDOWS_PLATFORM or use_cli and not detach:
|
||||
sys.exit(call_docker(
|
||||
build_exec_command(options, container.id, command),
|
||||
self.toplevel_options, self.toplevel_environment)
|
||||
self.toplevel_options)
|
||||
)
|
||||
|
||||
create_exec_options = {
|
||||
@@ -589,7 +604,7 @@ class TopLevelCommand(object):
|
||||
image_id,
|
||||
size
|
||||
])
|
||||
print(Formatter.table(headers, rows))
|
||||
print(Formatter().table(headers, rows))
|
||||
|
||||
def kill(self, options):
|
||||
"""
|
||||
@@ -635,7 +650,7 @@ class TopLevelCommand(object):
|
||||
log_printer_from_project(
|
||||
self.project,
|
||||
containers,
|
||||
set_no_color_if_clicolor(options['--no-color']),
|
||||
options['--no-color'],
|
||||
log_args,
|
||||
event_stream=self.project.events(service_names=options['SERVICE'])).run()
|
||||
|
||||
@@ -694,8 +709,7 @@ class TopLevelCommand(object):
|
||||
|
||||
if options['--all']:
|
||||
containers = sorted(self.project.containers(service_names=options['SERVICE'],
|
||||
one_off=OneOffFilter.include, stopped=True),
|
||||
key=attrgetter('name'))
|
||||
one_off=OneOffFilter.include, stopped=True))
|
||||
else:
|
||||
containers = sorted(
|
||||
self.project.containers(service_names=options['SERVICE'], stopped=True) +
|
||||
@@ -723,7 +737,7 @@ class TopLevelCommand(object):
|
||||
container.human_readable_state,
|
||||
container.human_readable_ports,
|
||||
])
|
||||
print(Formatter.table(headers, rows))
|
||||
print(Formatter().table(headers, rows))
|
||||
|
||||
def pull(self, options):
|
||||
"""
|
||||
@@ -739,7 +753,7 @@ class TopLevelCommand(object):
|
||||
--include-deps Also pull services declared as dependencies
|
||||
"""
|
||||
if options.get('--parallel'):
|
||||
log.warning('--parallel option is deprecated and will be removed in future versions.')
|
||||
log.warn('--parallel option is deprecated and will be removed in future versions.')
|
||||
self.project.pull(
|
||||
service_names=options['SERVICE'],
|
||||
ignore_pull_failures=options.get('--ignore-pull-failures'),
|
||||
@@ -780,7 +794,7 @@ class TopLevelCommand(object):
|
||||
-a, --all Deprecated - no effect.
|
||||
"""
|
||||
if options.get('--all'):
|
||||
log.warning(
|
||||
log.warn(
|
||||
'--all flag is obsolete. This is now the default behavior '
|
||||
'of `docker-compose rm`'
|
||||
)
|
||||
@@ -858,12 +872,10 @@ class TopLevelCommand(object):
|
||||
else:
|
||||
command = service.options.get('command')
|
||||
|
||||
options['stdin_open'] = service.options.get('stdin_open', True)
|
||||
|
||||
container_options = build_one_off_container_options(options, detach, command)
|
||||
run_one_off_container(
|
||||
container_options, self.project, service, options,
|
||||
self.toplevel_options, self.toplevel_environment
|
||||
self.toplevel_options, self.project_dir
|
||||
)
|
||||
|
||||
def scale(self, options):
|
||||
@@ -892,7 +904,7 @@ class TopLevelCommand(object):
|
||||
'Use the up command with the --scale flag instead.'
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
log.warn(
|
||||
'The scale command is deprecated. '
|
||||
'Use the up command with the --scale flag instead.'
|
||||
)
|
||||
@@ -963,7 +975,7 @@ class TopLevelCommand(object):
|
||||
rows.append(process)
|
||||
|
||||
print(container.name)
|
||||
print(Formatter.table(headers, rows))
|
||||
print(Formatter().table(headers, rows))
|
||||
|
||||
def unpause(self, options):
|
||||
"""
|
||||
@@ -1013,7 +1025,6 @@ class TopLevelCommand(object):
|
||||
--build Build images before starting containers.
|
||||
--abort-on-container-exit Stops all containers if any container was
|
||||
stopped. Incompatible with -d.
|
||||
--attach-dependencies Attach to dependent containers
|
||||
-t, --timeout TIMEOUT Use this timeout in seconds for container
|
||||
shutdown when attached or when containers are
|
||||
already running. (default: 10)
|
||||
@@ -1035,23 +1046,20 @@ class TopLevelCommand(object):
|
||||
remove_orphans = options['--remove-orphans']
|
||||
detached = options.get('--detach')
|
||||
no_start = options.get('--no-start')
|
||||
attach_dependencies = options.get('--attach-dependencies')
|
||||
|
||||
if detached and (cascade_stop or exit_value_from or attach_dependencies):
|
||||
raise UserError(
|
||||
"-d cannot be combined with --abort-on-container-exit or --attach-dependencies.")
|
||||
if detached and (cascade_stop or exit_value_from):
|
||||
raise UserError("--abort-on-container-exit and -d cannot be combined.")
|
||||
|
||||
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
environment = Environment.from_env_file(self.project_dir)
|
||||
ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
|
||||
|
||||
if ignore_orphans and remove_orphans:
|
||||
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
|
||||
|
||||
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from', '--attach-dependencies']
|
||||
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from']
|
||||
for excluded in [x for x in opts if options.get(x) and no_start]:
|
||||
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
|
||||
|
||||
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
|
||||
|
||||
with up_shutdown_context(self.project, service_names, timeout, detached):
|
||||
warn_for_swarm_mode(self.project.client)
|
||||
|
||||
@@ -1071,7 +1079,6 @@ class TopLevelCommand(object):
|
||||
reset_container_image=rebuild,
|
||||
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
|
||||
silent=options.get('--quiet-pull'),
|
||||
cli=native_builder,
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1080,7 +1087,7 @@ class TopLevelCommand(object):
|
||||
log.error(
|
||||
"The image for the service you're trying to recreate has been removed. "
|
||||
"If you continue, volume data could be lost. Consider backing up your data "
|
||||
"before continuing.\n"
|
||||
"before continuing.\n".format(e.explanation)
|
||||
)
|
||||
res = yesno("Continue with the new image? [yN]", False)
|
||||
if res is None or not res:
|
||||
@@ -1091,15 +1098,12 @@ class TopLevelCommand(object):
|
||||
if detached or no_start:
|
||||
return
|
||||
|
||||
attached_containers = filter_attached_containers(
|
||||
to_attach,
|
||||
service_names,
|
||||
attach_dependencies)
|
||||
attached_containers = filter_containers_to_service_names(to_attach, service_names)
|
||||
|
||||
log_printer = log_printer_from_project(
|
||||
self.project,
|
||||
attached_containers,
|
||||
set_no_color_if_clicolor(options['--no-color']),
|
||||
options['--no-color'],
|
||||
{'follow': True},
|
||||
cascade_stop,
|
||||
event_stream=self.project.events(service_names=service_names))
|
||||
@@ -1190,10 +1194,12 @@ def timeout_from_opts(options):
|
||||
return None if timeout is None else int(timeout)
|
||||
|
||||
|
||||
def image_digests_for_project(project):
|
||||
def image_digests_for_project(project, allow_push=False):
|
||||
try:
|
||||
return get_image_digests(project)
|
||||
|
||||
return get_image_digests(
|
||||
project,
|
||||
allow_push=allow_push
|
||||
)
|
||||
except MissingDigests as e:
|
||||
def list_images(images):
|
||||
return "\n".join(" {}".format(name) for name in sorted(images))
|
||||
@@ -1202,7 +1208,7 @@ def image_digests_for_project(project):
|
||||
|
||||
if e.needs_push:
|
||||
command_hint = (
|
||||
"Use `docker push {}` to push them. "
|
||||
"Use `docker-compose push {}` to push them. "
|
||||
.format(" ".join(sorted(e.needs_push)))
|
||||
)
|
||||
paras += [
|
||||
@@ -1213,7 +1219,7 @@ def image_digests_for_project(project):
|
||||
|
||||
if e.needs_pull:
|
||||
command_hint = (
|
||||
"Use `docker pull {}` to pull them. "
|
||||
"Use `docker-compose pull {}` to pull them. "
|
||||
.format(" ".join(sorted(e.needs_pull)))
|
||||
)
|
||||
|
||||
@@ -1230,7 +1236,7 @@ def exitval_from_opts(options, project):
|
||||
exit_value_from = options.get('--exit-code-from')
|
||||
if exit_value_from:
|
||||
if not options.get('--abort-on-container-exit'):
|
||||
log.warning('using --exit-code-from implies --abort-on-container-exit')
|
||||
log.warn('using --exit-code-from implies --abort-on-container-exit')
|
||||
options['--abort-on-container-exit'] = True
|
||||
if exit_value_from not in [s.name for s in project.get_services()]:
|
||||
log.error('No service named "%s" was found in your compose file.',
|
||||
@@ -1265,7 +1271,7 @@ def build_one_off_container_options(options, detach, command):
|
||||
container_options = {
|
||||
'command': command,
|
||||
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
|
||||
'stdin_open': options.get('stdin_open'),
|
||||
'stdin_open': not detach,
|
||||
'detach': detach,
|
||||
}
|
||||
|
||||
@@ -1308,7 +1314,7 @@ def build_one_off_container_options(options, detach, command):
|
||||
|
||||
|
||||
def run_one_off_container(container_options, project, service, options, toplevel_options,
|
||||
toplevel_environment):
|
||||
project_dir='.'):
|
||||
if not options['--no-deps']:
|
||||
deps = service.get_dependency_names()
|
||||
if deps:
|
||||
@@ -1337,7 +1343,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
if options['--rm']:
|
||||
project.client.remove_container(container.id, force=True, v=True)
|
||||
|
||||
use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
environment = Environment.from_env_file(project_dir)
|
||||
use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
|
||||
|
||||
signals.set_signal_handler_to_shutdown()
|
||||
signals.set_signal_handler_to_hang_up()
|
||||
@@ -1346,8 +1353,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
if IS_WINDOWS_PLATFORM or use_cli:
|
||||
service.connect_container_to_networks(container, use_network_aliases)
|
||||
exit_code = call_docker(
|
||||
get_docker_start_call(container_options, container.id),
|
||||
toplevel_options, toplevel_environment
|
||||
["start", "--attach", "--interactive", container.id],
|
||||
toplevel_options
|
||||
)
|
||||
else:
|
||||
operation = RunOperation(
|
||||
@@ -1373,16 +1380,6 @@ def run_one_off_container(container_options, project, service, options, toplevel
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
def get_docker_start_call(container_options, container_id):
|
||||
docker_call = ["start"]
|
||||
if not container_options.get('detach'):
|
||||
docker_call.append("--attach")
|
||||
if container_options.get('stdin_open'):
|
||||
docker_call.append("--interactive")
|
||||
docker_call.append(container_id)
|
||||
return docker_call
|
||||
|
||||
|
||||
def log_printer_from_project(
|
||||
project,
|
||||
containers,
|
||||
@@ -1399,8 +1396,8 @@ def log_printer_from_project(
|
||||
log_args=log_args)
|
||||
|
||||
|
||||
def filter_attached_containers(containers, service_names, attach_dependencies=False):
|
||||
if attach_dependencies or not service_names:
|
||||
def filter_containers_to_service_names(containers, service_names):
|
||||
if not service_names:
|
||||
return containers
|
||||
|
||||
return [
|
||||
@@ -1437,7 +1434,7 @@ def exit_if(condition, message, exit_code):
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
|
||||
def call_docker(args, dockeropts, environment):
|
||||
def call_docker(args, dockeropts):
|
||||
executable_path = find_executable('docker')
|
||||
if not executable_path:
|
||||
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
|
||||
@@ -1467,12 +1464,7 @@ def call_docker(args, dockeropts, environment):
|
||||
args = [executable_path] + tls_options + args
|
||||
log.debug(" ".join(map(pipes.quote, args)))
|
||||
|
||||
filtered_env = {}
|
||||
for k, v in environment.items():
|
||||
if v is not None:
|
||||
filtered_env[k] = environment[k]
|
||||
|
||||
return subprocess.call(args, env=filtered_env)
|
||||
return subprocess.call(args)
|
||||
|
||||
|
||||
def parse_scale_args(options):
|
||||
@@ -1573,14 +1565,10 @@ def warn_for_swarm_mode(client):
|
||||
# UCP does multi-node scheduling with traditional Compose files.
|
||||
return
|
||||
|
||||
log.warning(
|
||||
log.warn(
|
||||
"The Docker Engine you're using is running in swarm mode.\n\n"
|
||||
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
|
||||
"All containers will be scheduled on the current node.\n\n"
|
||||
"To deploy your application across the swarm, "
|
||||
"use `docker stack deploy`.\n"
|
||||
)
|
||||
|
||||
|
||||
def set_no_color_if_clicolor(no_color_flag):
|
||||
return no_color_flag or os.environ.get('CLICOLOR') == "0"
|
||||
|
||||
@@ -9,7 +9,6 @@ import ssl
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import distro
|
||||
import docker
|
||||
import six
|
||||
|
||||
@@ -74,7 +73,7 @@ def is_mac():
|
||||
|
||||
|
||||
def is_ubuntu():
|
||||
return platform.system() == 'Linux' and distro.linux_distribution()[0] == 'Ubuntu'
|
||||
return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
|
||||
|
||||
|
||||
def is_windows():
|
||||
@@ -134,12 +133,12 @@ def generate_user_agent():
|
||||
|
||||
def human_readable_file_size(size):
|
||||
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
|
||||
order = int(math.log(size, 1000)) if size else 0
|
||||
order = int(math.log(size, 2) / 10) if size else 0
|
||||
if order >= len(suffixes):
|
||||
order = len(suffixes) - 1
|
||||
|
||||
return '{0:.4g} {1}'.format(
|
||||
size / pow(10, order * 3),
|
||||
return '{0:.3g} {1}'.format(
|
||||
size / float(1 << (order * 10)),
|
||||
suffixes[order]
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import functools
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
@@ -199,9 +198,9 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
version = self.config['version']
|
||||
|
||||
if isinstance(version, dict):
|
||||
log.warning('Unexpected type for "version" key in "{}". Assuming '
|
||||
'"version" is the name of a service, and defaulting to '
|
||||
'Compose file version 1.'.format(self.filename))
|
||||
log.warn('Unexpected type for "version" key in "{}". Assuming '
|
||||
'"version" is the name of a service, and defaulting to '
|
||||
'Compose file version 1.'.format(self.filename))
|
||||
return V1
|
||||
|
||||
if not isinstance(version, six.string_types):
|
||||
@@ -215,12 +214,6 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
|
||||
.format(self.filename, VERSION_EXPLANATION)
|
||||
)
|
||||
|
||||
version_pattern = re.compile(r"^[2-9]+(\.\d+)?$")
|
||||
if not version_pattern.match(version):
|
||||
raise ConfigurationError(
|
||||
'Version "{}" in "{}" is invalid.'
|
||||
.format(version, self.filename))
|
||||
|
||||
if version == '2':
|
||||
return const.COMPOSEFILE_V2_0
|
||||
|
||||
@@ -325,8 +318,8 @@ def get_default_config_files(base_dir):
|
||||
winner = candidates[0]
|
||||
|
||||
if len(candidates) > 1:
|
||||
log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warning("Using %s\n", winner)
|
||||
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
|
||||
log.warn("Using %s\n", winner)
|
||||
|
||||
return [os.path.join(path, winner)] + get_default_override_file(path)
|
||||
|
||||
@@ -369,7 +362,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
def check_swarm_only_key(service_dicts, key):
|
||||
services = [s for s in service_dicts if s.get(key)]
|
||||
if services:
|
||||
log.warning(
|
||||
log.warn(
|
||||
warning_template.format(
|
||||
services=", ".join(sorted(s['name'] for s in services)),
|
||||
key=key
|
||||
@@ -380,7 +373,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
|
||||
check_swarm_only_key(service_dicts, 'configs')
|
||||
|
||||
|
||||
def load(config_details, compatibility=False, interpolate=True):
|
||||
def load(config_details, compatibility=False):
|
||||
"""Load the configuration from a working directory and a list of
|
||||
configuration files. Files are loaded in order, and merged on top
|
||||
of each other to create the final configuration.
|
||||
@@ -390,7 +383,7 @@ def load(config_details, compatibility=False, interpolate=True):
|
||||
validate_config_version(config_details.config_files)
|
||||
|
||||
processed_files = [
|
||||
process_config_file(config_file, config_details.environment, interpolate=interpolate)
|
||||
process_config_file(config_file, config_details.environment)
|
||||
for config_file in config_details.config_files
|
||||
]
|
||||
config_details = config_details._replace(config_files=processed_files)
|
||||
@@ -408,7 +401,7 @@ def load(config_details, compatibility=False, interpolate=True):
|
||||
configs = load_mapping(
|
||||
config_details.config_files, 'get_configs', 'Config', config_details.working_dir
|
||||
)
|
||||
service_dicts = load_services(config_details, main_file, compatibility, interpolate=interpolate)
|
||||
service_dicts = load_services(config_details, main_file, compatibility)
|
||||
|
||||
if main_file.version != V1:
|
||||
for service_dict in service_dicts:
|
||||
@@ -460,7 +453,7 @@ def validate_external(entity_type, name, config, version):
|
||||
entity_type, name, ', '.join(k for k in config if k != 'external')))
|
||||
|
||||
|
||||
def load_services(config_details, config_file, compatibility=False, interpolate=True):
|
||||
def load_services(config_details, config_file, compatibility=False):
|
||||
def build_service(service_name, service_dict, service_names):
|
||||
service_config = ServiceConfig.with_abs_paths(
|
||||
config_details.working_dir,
|
||||
@@ -479,8 +472,7 @@ def load_services(config_details, config_file, compatibility=False, interpolate=
|
||||
service_names,
|
||||
config_file.version,
|
||||
config_details.environment,
|
||||
compatibility,
|
||||
interpolate
|
||||
compatibility
|
||||
)
|
||||
return service_dict
|
||||
|
||||
@@ -505,12 +497,15 @@ def load_services(config_details, config_file, compatibility=False, interpolate=
|
||||
file.get_service_dicts() for file in config_details.config_files
|
||||
]
|
||||
|
||||
service_config = functools.reduce(merge_services, service_configs)
|
||||
service_config = service_configs[0]
|
||||
for next_config in service_configs[1:]:
|
||||
service_config = merge_services(service_config, next_config)
|
||||
|
||||
return build_services(service_config)
|
||||
|
||||
|
||||
def interpolate_config_section(config_file, config, section, environment):
|
||||
validate_config_section(config_file.filename, config, section)
|
||||
return interpolate_environment_variables(
|
||||
config_file.version,
|
||||
config,
|
||||
@@ -519,60 +514,38 @@ def interpolate_config_section(config_file, config, section, environment):
|
||||
)
|
||||
|
||||
|
||||
def process_config_section(config_file, config, section, environment, interpolate):
|
||||
validate_config_section(config_file.filename, config, section)
|
||||
if interpolate:
|
||||
return interpolate_environment_variables(
|
||||
config_file.version,
|
||||
config,
|
||||
section,
|
||||
environment
|
||||
)
|
||||
else:
|
||||
return config
|
||||
|
||||
|
||||
def process_config_file(config_file, environment, service_name=None, interpolate=True):
|
||||
services = process_config_section(
|
||||
def process_config_file(config_file, environment, service_name=None):
|
||||
services = interpolate_config_section(
|
||||
config_file,
|
||||
config_file.get_service_dicts(),
|
||||
'service',
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
environment)
|
||||
|
||||
if config_file.version > V1:
|
||||
processed_config = dict(config_file.config)
|
||||
processed_config['services'] = services
|
||||
processed_config['volumes'] = process_config_section(
|
||||
processed_config['volumes'] = interpolate_config_section(
|
||||
config_file,
|
||||
config_file.get_volumes(),
|
||||
'volume',
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
processed_config['networks'] = process_config_section(
|
||||
environment)
|
||||
processed_config['networks'] = interpolate_config_section(
|
||||
config_file,
|
||||
config_file.get_networks(),
|
||||
'network',
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
environment)
|
||||
if config_file.version >= const.COMPOSEFILE_V3_1:
|
||||
processed_config['secrets'] = process_config_section(
|
||||
processed_config['secrets'] = interpolate_config_section(
|
||||
config_file,
|
||||
config_file.get_secrets(),
|
||||
'secret',
|
||||
environment,
|
||||
interpolate,
|
||||
)
|
||||
environment)
|
||||
if config_file.version >= const.COMPOSEFILE_V3_3:
|
||||
processed_config['configs'] = process_config_section(
|
||||
processed_config['configs'] = interpolate_config_section(
|
||||
config_file,
|
||||
config_file.get_configs(),
|
||||
'config',
|
||||
environment,
|
||||
interpolate,
|
||||
environment
|
||||
)
|
||||
else:
|
||||
processed_config = services
|
||||
@@ -621,7 +594,7 @@ class ServiceExtendsResolver(object):
|
||||
config_path = self.get_extended_config_path(extends)
|
||||
service_name = extends['service']
|
||||
|
||||
if config_path == os.path.abspath(self.config_file.filename):
|
||||
if config_path == self.config_file.filename:
|
||||
try:
|
||||
service_config = self.config_file.get_service(service_name)
|
||||
except KeyError:
|
||||
@@ -678,13 +651,13 @@ class ServiceExtendsResolver(object):
|
||||
return filename
|
||||
|
||||
|
||||
def resolve_environment(service_dict, environment=None, interpolate=True):
|
||||
def resolve_environment(service_dict, environment=None):
|
||||
"""Unpack any environment variables from an env_file, if set.
|
||||
Interpolate environment values if set.
|
||||
"""
|
||||
env = {}
|
||||
for env_file in service_dict.get('env_file', []):
|
||||
env.update(env_vars_from_file(env_file, interpolate))
|
||||
env.update(env_vars_from_file(env_file))
|
||||
|
||||
env.update(parse_environment(service_dict.get('environment')))
|
||||
return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
|
||||
@@ -880,12 +853,11 @@ def finalize_service_volumes(service_dict, environment):
|
||||
return service_dict
|
||||
|
||||
|
||||
def finalize_service(service_config, service_names, version, environment, compatibility,
|
||||
interpolate=True):
|
||||
def finalize_service(service_config, service_names, version, environment, compatibility):
|
||||
service_dict = dict(service_config.config)
|
||||
|
||||
if 'environment' in service_dict or 'env_file' in service_dict:
|
||||
service_dict['environment'] = resolve_environment(service_dict, environment, interpolate)
|
||||
service_dict['environment'] = resolve_environment(service_dict, environment)
|
||||
service_dict.pop('env_file', None)
|
||||
|
||||
if 'volumes_from' in service_dict:
|
||||
@@ -928,7 +900,7 @@ def finalize_service(service_config, service_names, version, environment, compat
|
||||
service_dict
|
||||
)
|
||||
if ignored_keys:
|
||||
log.warning(
|
||||
log.warn(
|
||||
'The following deploy sub-keys are not supported in compatibility mode and have'
|
||||
' been ignored: {}'.format(', '.join(ignored_keys))
|
||||
)
|
||||
@@ -990,17 +962,12 @@ def translate_deploy_keys_to_container_config(service_dict):
|
||||
|
||||
deploy_dict = service_dict['deploy']
|
||||
ignored_keys = [
|
||||
k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config']
|
||||
k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config', 'placement']
|
||||
if k in deploy_dict
|
||||
]
|
||||
|
||||
if 'replicas' in deploy_dict and deploy_dict.get('mode', 'replicated') == 'replicated':
|
||||
scale = deploy_dict.get('replicas', 1)
|
||||
max_replicas = deploy_dict.get('placement', {}).get('max_replicas_per_node', scale)
|
||||
service_dict['scale'] = min(scale, max_replicas)
|
||||
if max_replicas < scale:
|
||||
log.warning("Scale is limited to {} ('max_replicas_per_node' field).".format(
|
||||
max_replicas))
|
||||
service_dict['scale'] = deploy_dict['replicas']
|
||||
|
||||
if 'restart_policy' in deploy_dict:
|
||||
service_dict['restart'] = {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,12 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import codecs
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import dotenv
|
||||
import six
|
||||
|
||||
from ..const import IS_WINDOWS_PLATFORM
|
||||
@@ -25,12 +26,12 @@ def split_env(env):
|
||||
key = env
|
||||
if re.search(r'\s', key):
|
||||
raise ConfigurationError(
|
||||
"environment variable name '{}' may not contain whitespace.".format(key)
|
||||
"environment variable name '{}' may not contains whitespace.".format(key)
|
||||
)
|
||||
return key, value
|
||||
|
||||
|
||||
def env_vars_from_file(filename, interpolate=True):
|
||||
def env_vars_from_file(filename):
|
||||
"""
|
||||
Read in a line delimited file of environment variables.
|
||||
"""
|
||||
@@ -38,10 +39,16 @@ def env_vars_from_file(filename, interpolate=True):
|
||||
raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
|
||||
elif not os.path.isfile(filename):
|
||||
raise EnvFileNotFound("{} is not a file.".format(filename))
|
||||
|
||||
env = dotenv.dotenv_values(dotenv_path=filename, encoding='utf-8-sig', interpolate=interpolate)
|
||||
for k, v in env.items():
|
||||
env[k] = v if interpolate else v.replace('$', '$$')
|
||||
env = {}
|
||||
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
try:
|
||||
k, v = split_env(line)
|
||||
env[k] = v
|
||||
except ConfigurationError as e:
|
||||
raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
|
||||
return env
|
||||
|
||||
|
||||
@@ -49,18 +56,14 @@ class Environment(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Environment, self).__init__(*args, **kwargs)
|
||||
self.missing_keys = []
|
||||
self.silent = False
|
||||
|
||||
@classmethod
|
||||
def from_env_file(cls, base_dir, env_file=None):
|
||||
def from_env_file(cls, base_dir):
|
||||
def _initialize():
|
||||
result = cls()
|
||||
if base_dir is None:
|
||||
return result
|
||||
if env_file:
|
||||
env_file_path = os.path.join(base_dir, env_file)
|
||||
else:
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
env_file_path = os.path.join(base_dir, '.env')
|
||||
try:
|
||||
return cls(env_vars_from_file(env_file_path))
|
||||
except EnvFileNotFound:
|
||||
@@ -92,8 +95,8 @@ class Environment(dict):
|
||||
return super(Environment, self).__getitem__(key.upper())
|
||||
except KeyError:
|
||||
pass
|
||||
if not self.silent and key not in self.missing_keys:
|
||||
log.warning(
|
||||
if key not in self.missing_keys:
|
||||
log.warn(
|
||||
"The {} variable is not set. Defaulting to a blank string."
|
||||
.format(key)
|
||||
)
|
||||
|
||||
@@ -64,12 +64,12 @@ def interpolate_value(name, config_key, value, section, interpolator):
|
||||
string=e.string))
|
||||
except UnsetRequiredSubstitution as e:
|
||||
raise ConfigurationError(
|
||||
'Missing mandatory value for "{config_key}" option interpolating {value} '
|
||||
'in {section} "{name}": {err}'.format(config_key=config_key,
|
||||
value=value,
|
||||
name=name,
|
||||
section=section,
|
||||
err=e.err)
|
||||
'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
|
||||
config_key=config_key,
|
||||
name=name,
|
||||
section=section,
|
||||
err=e.err
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -24,12 +24,14 @@ def serialize_dict_type(dumper, data):
|
||||
|
||||
|
||||
def serialize_string(dumper, data):
|
||||
""" Ensure boolean-like strings are quoted in the output """
|
||||
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
|
||||
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
|
||||
|
||||
if isinstance(data, six.binary_type):
|
||||
data = data.decode('utf-8')
|
||||
|
||||
data = data.replace('$', '$$')
|
||||
|
||||
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
|
||||
# Empirically only y/n appears to be an issue, but this might change
|
||||
# depending on which PyYaml version is being used. Err on safe side.
|
||||
@@ -37,12 +39,6 @@ def serialize_string(dumper, data):
|
||||
return representer(data)
|
||||
|
||||
|
||||
def serialize_string_escape_dollar(dumper, data):
|
||||
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
|
||||
data = data.replace('$', '$$')
|
||||
return serialize_string(dumper, data)
|
||||
|
||||
|
||||
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
|
||||
@@ -50,6 +46,8 @@ yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
|
||||
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
|
||||
yaml.SafeDumper.add_representer(str, serialize_string)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
|
||||
|
||||
|
||||
def denormalize_config(config, image_digests=None):
|
||||
@@ -95,13 +93,7 @@ def v3_introduced_name_key(key):
|
||||
return V3_5
|
||||
|
||||
|
||||
def serialize_config(config, image_digests=None, escape_dollar=True):
|
||||
if escape_dollar:
|
||||
yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string_escape_dollar)
|
||||
else:
|
||||
yaml.SafeDumper.add_representer(str, serialize_string)
|
||||
yaml.SafeDumper.add_representer(six.text_type, serialize_string)
|
||||
def serialize_config(config, image_digests=None):
|
||||
return yaml.safe_dump(
|
||||
denormalize_config(config, image_digests),
|
||||
default_flow_style=False,
|
||||
|
||||
@@ -11,9 +11,6 @@ IS_WINDOWS_PLATFORM = (sys.platform == "win32")
|
||||
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
|
||||
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
|
||||
LABEL_PROJECT = 'com.docker.compose.project'
|
||||
LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir'
|
||||
LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files'
|
||||
LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file'
|
||||
LABEL_SERVICE = 'com.docker.compose.service'
|
||||
LABEL_NETWORK = 'com.docker.compose.network'
|
||||
LABEL_VERSION = 'com.docker.compose.version'
|
||||
@@ -41,10 +38,7 @@ COMPOSEFILE_V3_4 = ComposeVersion('3.4')
|
||||
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
|
||||
COMPOSEFILE_V3_6 = ComposeVersion('3.6')
|
||||
COMPOSEFILE_V3_7 = ComposeVersion('3.7')
|
||||
COMPOSEFILE_V3_8 = ComposeVersion('3.8')
|
||||
|
||||
# minimum DOCKER ENGINE API version needed to support
|
||||
# features for each compose schema version
|
||||
API_VERSIONS = {
|
||||
COMPOSEFILE_V1: '1.21',
|
||||
COMPOSEFILE_V2_0: '1.22',
|
||||
@@ -60,7 +54,6 @@ API_VERSIONS = {
|
||||
COMPOSEFILE_V3_5: '1.30',
|
||||
COMPOSEFILE_V3_6: '1.36',
|
||||
COMPOSEFILE_V3_7: '1.38',
|
||||
COMPOSEFILE_V3_8: '1.38',
|
||||
}
|
||||
|
||||
API_VERSION_TO_ENGINE_VERSION = {
|
||||
@@ -78,5 +71,4 @@ API_VERSION_TO_ENGINE_VERSION = {
|
||||
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
|
||||
API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
|
||||
API_VERSIONS[COMPOSEFILE_V3_7]: '18.06.0',
|
||||
API_VERSIONS[COMPOSEFILE_V3_8]: '18.06.0',
|
||||
}
|
||||
|
||||
@@ -226,12 +226,12 @@ def check_remote_network_config(remote, local):
|
||||
raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
|
||||
|
||||
local_labels = local.labels or {}
|
||||
remote_labels = remote.get('Labels') or {}
|
||||
remote_labels = remote.get('Labels', {})
|
||||
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
|
||||
if k.startswith('com.docker.'): # We are only interested in user-specified labels
|
||||
continue
|
||||
if remote_labels.get(k) != local_labels.get(k):
|
||||
log.warning(
|
||||
log.warn(
|
||||
'Network {}: label "{}" has changed. It may need to be'
|
||||
' recreated.'.format(local.true_name, k)
|
||||
)
|
||||
@@ -276,7 +276,7 @@ class ProjectNetworks(object):
|
||||
}
|
||||
unused = set(networks) - set(service_networks) - {'default'}
|
||||
if unused:
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Some networks were defined but are not used by any service: "
|
||||
"{}".format(", ".join(unused)))
|
||||
return cls(service_networks, use_networking)
|
||||
@@ -288,7 +288,7 @@ class ProjectNetworks(object):
|
||||
try:
|
||||
network.remove()
|
||||
except NotFound:
|
||||
log.warning("Network %s not found.", network.true_name)
|
||||
log.warn("Network %s not found.", network.true_name)
|
||||
|
||||
def initialize(self):
|
||||
if not self.use_networking:
|
||||
|
||||
@@ -114,13 +114,3 @@ def get_digest_from_push(events):
|
||||
if digest:
|
||||
return digest
|
||||
return None
|
||||
|
||||
|
||||
def read_status(event):
|
||||
status = event['status'].lower()
|
||||
if 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail and 'total' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total'])
|
||||
status = '{} ({:.1%})'.format(status, percentage)
|
||||
return status
|
||||
|
||||
@@ -6,17 +6,13 @@ import logging
|
||||
import operator
|
||||
import re
|
||||
from functools import reduce
|
||||
from os import path
|
||||
|
||||
import enum
|
||||
import six
|
||||
from docker.errors import APIError
|
||||
from docker.errors import ImageNotFound
|
||||
from docker.errors import NotFound
|
||||
from docker.utils import version_lt
|
||||
|
||||
from . import parallel
|
||||
from .cli.errors import UserError
|
||||
from .config import ConfigurationError
|
||||
from .config.config import V1
|
||||
from .config.sort_services import get_container_name_from_network_mode
|
||||
@@ -28,13 +24,11 @@ from .container import Container
|
||||
from .network import build_networks
|
||||
from .network import get_networks
|
||||
from .network import ProjectNetworks
|
||||
from .progress_stream import read_status
|
||||
from .service import BuildAction
|
||||
from .service import ContainerNetworkMode
|
||||
from .service import ContainerPidMode
|
||||
from .service import ConvergenceStrategy
|
||||
from .service import NetworkMode
|
||||
from .service import NoSuchImageError
|
||||
from .service import parse_repository_tag
|
||||
from .service import PidMode
|
||||
from .service import Service
|
||||
@@ -44,6 +38,7 @@ from .utils import microseconds_from_time_nano
|
||||
from .utils import truncate_string
|
||||
from .volume import ProjectVolumes
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -87,11 +82,10 @@ class Project(object):
|
||||
return labels
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, name, config_data, client, default_platform=None, extra_labels=None):
|
||||
def from_config(cls, name, config_data, client, default_platform=None):
|
||||
"""
|
||||
Construct a Project from a config.Config object.
|
||||
"""
|
||||
extra_labels = extra_labels or []
|
||||
use_networking = (config_data.version and config_data.version != V1)
|
||||
networks = build_networks(name, config_data, client)
|
||||
project_networks = ProjectNetworks.from_services(
|
||||
@@ -141,7 +135,6 @@ class Project(object):
|
||||
pid_mode=pid_mode,
|
||||
platform=service_dict.pop('platform', None),
|
||||
default_platform=default_platform,
|
||||
extra_labels=extra_labels,
|
||||
**service_dict)
|
||||
)
|
||||
|
||||
@@ -362,27 +355,17 @@ class Project(object):
|
||||
return containers
|
||||
|
||||
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
|
||||
build_args=None, gzip=False, parallel_build=False, rm=True, silent=False, cli=False,
|
||||
progress=None):
|
||||
build_args=None, gzip=False, parallel_build=False):
|
||||
|
||||
services = []
|
||||
for service in self.get_services(service_names):
|
||||
if service.can_be_built():
|
||||
services.append(service)
|
||||
elif not silent:
|
||||
else:
|
||||
log.info('%s uses an image, skipping' % service.name)
|
||||
|
||||
if cli:
|
||||
log.warning("Native build is an experimental feature and could change at any time")
|
||||
if parallel_build:
|
||||
log.warning("Flag '--parallel' is ignored when building with "
|
||||
"COMPOSE_DOCKER_CLI_BUILD=1")
|
||||
if gzip:
|
||||
log.warning("Flag '--compress' is ignored when building with "
|
||||
"COMPOSE_DOCKER_CLI_BUILD=1")
|
||||
|
||||
def build_service(service):
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress)
|
||||
service.build(no_cache, pull, force_rm, memory, build_args, gzip)
|
||||
|
||||
if parallel_build:
|
||||
_, errors = parallel.parallel_execute(
|
||||
@@ -527,12 +510,8 @@ class Project(object):
|
||||
reset_container_image=False,
|
||||
renew_anonymous_volumes=False,
|
||||
silent=False,
|
||||
cli=False,
|
||||
):
|
||||
|
||||
if cli:
|
||||
log.warning("Native build is an experimental feature and could change at any time")
|
||||
|
||||
self.initialize()
|
||||
if not ignore_orphans:
|
||||
self.find_orphan_containers(remove_orphans)
|
||||
@@ -545,7 +524,7 @@ class Project(object):
|
||||
include_deps=start_deps)
|
||||
|
||||
for svc in services:
|
||||
svc.ensure_image_exists(do_build=do_build, silent=silent, cli=cli)
|
||||
svc.ensure_image_exists(do_build=do_build, silent=silent)
|
||||
plans = self._get_convergence_plans(
|
||||
services, strategy, always_recreate_deps=always_recreate_deps)
|
||||
|
||||
@@ -608,10 +587,8 @@ class Project(object):
|
||||
", ".join(updated_dependencies))
|
||||
containers_stopped = any(
|
||||
service.containers(stopped=True, filters={'status': ['created', 'exited']}))
|
||||
service_has_links = any(service.get_link_names())
|
||||
container_has_links = any(c.get('HostConfig.Links') for c in service.containers())
|
||||
should_recreate_for_links = service_has_links ^ container_has_links
|
||||
if always_recreate_deps or containers_stopped or should_recreate_for_links:
|
||||
has_links = any(c.get('HostConfig.Links') for c in service.containers())
|
||||
if always_recreate_deps or containers_stopped or not has_links:
|
||||
plan = service.convergence_plan(ConvergenceStrategy.always)
|
||||
else:
|
||||
plan = service.convergence_plan(strategy)
|
||||
@@ -625,68 +602,46 @@ class Project(object):
|
||||
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
|
||||
include_deps=False):
|
||||
services = self.get_services(service_names, include_deps)
|
||||
msg = not silent and 'Pulling' or None
|
||||
|
||||
if parallel_pull:
|
||||
self.parallel_pull(services, silent=silent)
|
||||
def pull_service(service):
|
||||
strm = service.pull(ignore_pull_failures, True, stream=True)
|
||||
if strm is None: # Attempting to pull service with no `image` key is a no-op
|
||||
return
|
||||
|
||||
else:
|
||||
must_build = []
|
||||
for service in services:
|
||||
try:
|
||||
service.pull(ignore_pull_failures, silent=silent)
|
||||
except (ImageNotFound, NotFound):
|
||||
if service.can_be_built():
|
||||
must_build.append(service.name)
|
||||
else:
|
||||
raise
|
||||
|
||||
if len(must_build):
|
||||
log.warning('Some service image(s) must be built from source by running:\n'
|
||||
' docker-compose build {}'
|
||||
.format(' '.join(must_build)))
|
||||
|
||||
def parallel_pull(self, services, ignore_pull_failures=False, silent=False):
|
||||
msg = 'Pulling' if not silent else None
|
||||
must_build = []
|
||||
|
||||
def pull_service(service):
|
||||
strm = service.pull(ignore_pull_failures, True, stream=True)
|
||||
|
||||
if strm is None: # Attempting to pull service with no `image` key is a no-op
|
||||
return
|
||||
|
||||
try:
|
||||
writer = parallel.get_stream_writer()
|
||||
|
||||
for event in strm:
|
||||
if 'status' not in event:
|
||||
continue
|
||||
status = read_status(event)
|
||||
status = event['status'].lower()
|
||||
if 'progressDetail' in event:
|
||||
detail = event['progressDetail']
|
||||
if 'current' in detail and 'total' in detail:
|
||||
percentage = float(detail['current']) / float(detail['total'])
|
||||
status = '{} ({:.1%})'.format(status, percentage)
|
||||
|
||||
writer.write(
|
||||
msg, service.name, truncate_string(status), lambda s: s
|
||||
)
|
||||
except (ImageNotFound, NotFound):
|
||||
if service.can_be_built():
|
||||
must_build.append(service.name)
|
||||
else:
|
||||
raise
|
||||
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
pull_service,
|
||||
operator.attrgetter('name'),
|
||||
msg,
|
||||
limit=5,
|
||||
)
|
||||
_, errors = parallel.parallel_execute(
|
||||
services,
|
||||
pull_service,
|
||||
operator.attrgetter('name'),
|
||||
msg,
|
||||
limit=5,
|
||||
)
|
||||
if len(errors):
|
||||
combined_errors = '\n'.join([
|
||||
e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
|
||||
])
|
||||
raise ProjectError(combined_errors)
|
||||
|
||||
if len(must_build):
|
||||
log.warning('Some service image(s) must be built from source by running:\n'
|
||||
' docker-compose build {}'
|
||||
.format(' '.join(must_build)))
|
||||
if len(errors):
|
||||
combined_errors = '\n'.join([
|
||||
e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
|
||||
])
|
||||
raise ProjectError(combined_errors)
|
||||
else:
|
||||
for service in services:
|
||||
service.pull(ignore_pull_failures, silent=silent)
|
||||
|
||||
def push(self, service_names=None, ignore_push_failures=False):
|
||||
unique_images = set()
|
||||
@@ -731,7 +686,7 @@ class Project(object):
|
||||
|
||||
def find_orphan_containers(self, remove_orphans):
|
||||
def _find():
|
||||
containers = set(self._labeled_containers() + self._labeled_containers(stopped=True))
|
||||
containers = self._labeled_containers()
|
||||
for ctnr in containers:
|
||||
service_name = ctnr.labels.get(LABEL_SERVICE)
|
||||
if service_name not in self.service_names:
|
||||
@@ -742,10 +697,7 @@ class Project(object):
|
||||
if remove_orphans:
|
||||
for ctnr in orphans:
|
||||
log.info('Removing orphan container "{0}"'.format(ctnr.name))
|
||||
try:
|
||||
ctnr.kill()
|
||||
except APIError:
|
||||
pass
|
||||
ctnr.kill()
|
||||
ctnr.remove(force=True)
|
||||
else:
|
||||
log.warning(
|
||||
@@ -773,11 +725,10 @@ class Project(object):
|
||||
|
||||
def build_container_operation_with_timeout_func(self, operation, options):
|
||||
def container_operation_with_timeout(container):
|
||||
_options = options.copy()
|
||||
if _options.get('timeout') is None:
|
||||
if options.get('timeout') is None:
|
||||
service = self.get_service(container.service)
|
||||
_options['timeout'] = service.stop_timeout(None)
|
||||
return getattr(container, operation)(**_options)
|
||||
options['timeout'] = service.stop_timeout(None)
|
||||
return getattr(container, operation)(**options)
|
||||
return container_operation_with_timeout
|
||||
|
||||
|
||||
@@ -820,13 +771,13 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
.format(service=service, secret=secret.source))
|
||||
|
||||
if secret_def.get('external'):
|
||||
log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
|
||||
"External secrets are not available to containers created by "
|
||||
"docker-compose.".format(service=service, secret=secret.source))
|
||||
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
|
||||
"External secrets are not available to containers created by "
|
||||
"docker-compose.".format(service=service, secret=secret.source))
|
||||
continue
|
||||
|
||||
if secret.uid or secret.gid or secret.mode:
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Service \"{service}\" uses secret \"{secret}\" with uid, "
|
||||
"gid, or mode. These fields are not supported by this "
|
||||
"implementation of the Compose file".format(
|
||||
@@ -834,104 +785,11 @@ def get_secrets(service, service_secrets, secret_defs):
|
||||
)
|
||||
)
|
||||
|
||||
secret_file = secret_def.get('file')
|
||||
if not path.isfile(str(secret_file)):
|
||||
log.warning(
|
||||
"Service \"{service}\" uses an undefined secret file \"{secret_file}\", "
|
||||
"the following file should be created \"{secret_file}\"".format(
|
||||
service=service, secret_file=secret_file
|
||||
)
|
||||
)
|
||||
secrets.append({'secret': secret, 'file': secret_file})
|
||||
secrets.append({'secret': secret, 'file': secret_def.get('file')})
|
||||
|
||||
return secrets
|
||||
|
||||
|
||||
def get_image_digests(project):
|
||||
digests = {}
|
||||
needs_push = set()
|
||||
needs_pull = set()
|
||||
|
||||
for service in project.services:
|
||||
try:
|
||||
digests[service.name] = get_image_digest(service)
|
||||
except NeedsPush as e:
|
||||
needs_push.add(e.image_name)
|
||||
except NeedsPull as e:
|
||||
needs_pull.add(e.service_name)
|
||||
|
||||
if needs_push or needs_pull:
|
||||
raise MissingDigests(needs_push, needs_pull)
|
||||
|
||||
return digests
|
||||
|
||||
|
||||
def get_image_digest(service):
|
||||
if 'image' not in service.options:
|
||||
raise UserError(
|
||||
"Service '{s.name}' doesn't define an image tag. An image name is "
|
||||
"required to generate a proper image digest. Specify an image repo "
|
||||
"and tag with the 'image' option.".format(s=service))
|
||||
|
||||
_, _, separator = parse_repository_tag(service.options['image'])
|
||||
# Compose file already uses a digest, no lookup required
|
||||
if separator == '@':
|
||||
return service.options['image']
|
||||
|
||||
digest = get_digest(service)
|
||||
|
||||
if digest:
|
||||
return digest
|
||||
|
||||
if 'build' not in service.options:
|
||||
raise NeedsPull(service.image_name, service.name)
|
||||
|
||||
raise NeedsPush(service.image_name)
|
||||
|
||||
|
||||
def get_digest(service):
|
||||
digest = None
|
||||
try:
|
||||
image = service.image()
|
||||
# TODO: pick a digest based on the image tag if there are multiple
|
||||
# digests
|
||||
if image['RepoDigests']:
|
||||
digest = image['RepoDigests'][0]
|
||||
except NoSuchImageError:
|
||||
try:
|
||||
# Fetch the image digest from the registry
|
||||
distribution = service.get_image_registry_data()
|
||||
|
||||
if distribution['Descriptor']['digest']:
|
||||
digest = '{image_name}@{digest}'.format(
|
||||
image_name=service.image_name,
|
||||
digest=distribution['Descriptor']['digest']
|
||||
)
|
||||
except NoSuchImageError:
|
||||
raise UserError(
|
||||
"Digest not found for service '{service}'. "
|
||||
"Repository does not exist or may require 'docker login'"
|
||||
.format(service=service.name))
|
||||
return digest
|
||||
|
||||
|
||||
class MissingDigests(Exception):
|
||||
def __init__(self, needs_push, needs_pull):
|
||||
self.needs_push = needs_push
|
||||
self.needs_pull = needs_pull
|
||||
|
||||
|
||||
class NeedsPush(Exception):
|
||||
def __init__(self, image_name):
|
||||
self.image_name = image_name
|
||||
|
||||
|
||||
class NeedsPull(Exception):
|
||||
def __init__(self, image_name, service_name):
|
||||
self.image_name = image_name
|
||||
self.service_name = service_name
|
||||
|
||||
|
||||
class NoSuchService(Exception):
|
||||
def __init__(self, name):
|
||||
if isinstance(name, six.binary_type):
|
||||
|
||||
@@ -2,12 +2,10 @@ from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
from collections import namedtuple
|
||||
from collections import OrderedDict
|
||||
from operator import attrgetter
|
||||
@@ -60,15 +58,11 @@ from .utils import parse_bytes
|
||||
from .utils import parse_seconds_float
|
||||
from .utils import truncate_id
|
||||
from .utils import unique_everseen
|
||||
from compose.cli.utils import binarystr_to_unicode
|
||||
|
||||
if six.PY2:
|
||||
import subprocess32 as subprocess
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
HOST_CONFIG_KEYS = [
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
@@ -137,6 +131,7 @@ class NoSuchImageError(Exception):
|
||||
|
||||
ServiceName = namedtuple('ServiceName', 'project service number')
|
||||
|
||||
|
||||
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
|
||||
|
||||
|
||||
@@ -172,21 +167,20 @@ class BuildAction(enum.Enum):
|
||||
|
||||
class Service(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
client=None,
|
||||
project='default',
|
||||
use_networking=False,
|
||||
links=None,
|
||||
volumes_from=None,
|
||||
network_mode=None,
|
||||
networks=None,
|
||||
secrets=None,
|
||||
scale=1,
|
||||
pid_mode=None,
|
||||
default_platform=None,
|
||||
extra_labels=None,
|
||||
**options
|
||||
self,
|
||||
name,
|
||||
client=None,
|
||||
project='default',
|
||||
use_networking=False,
|
||||
links=None,
|
||||
volumes_from=None,
|
||||
network_mode=None,
|
||||
networks=None,
|
||||
secrets=None,
|
||||
scale=None,
|
||||
pid_mode=None,
|
||||
default_platform=None,
|
||||
**options
|
||||
):
|
||||
self.name = name
|
||||
self.client = client
|
||||
@@ -198,10 +192,9 @@ class Service(object):
|
||||
self.pid_mode = pid_mode or PidMode(None)
|
||||
self.networks = networks or {}
|
||||
self.secrets = secrets or []
|
||||
self.scale_num = scale
|
||||
self.scale_num = scale or 1
|
||||
self.default_platform = default_platform
|
||||
self.options = options
|
||||
self.extra_labels = extra_labels or []
|
||||
|
||||
def __repr__(self):
|
||||
return '<Service: {}>'.format(self.name)
|
||||
@@ -216,7 +209,7 @@ class Service(object):
|
||||
for container in self.client.containers(
|
||||
all=stopped,
|
||||
filters=filters)])
|
||||
)
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
@@ -248,15 +241,15 @@ class Service(object):
|
||||
|
||||
def show_scale_warnings(self, desired_num):
|
||||
if self.custom_container_name and desired_num > 1:
|
||||
log.warning('The "%s" service is using the custom container name "%s". '
|
||||
'Docker requires each container to have a unique name. '
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
log.warn('The "%s" service is using the custom container name "%s". '
|
||||
'Docker requires each container to have a unique name. '
|
||||
'Remove the custom name to scale the service.'
|
||||
% (self.name, self.custom_container_name))
|
||||
|
||||
if self.specifies_host_port() and desired_num > 1:
|
||||
log.warning('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
log.warn('The "%s" service specifies a port on the host. If multiple containers '
|
||||
'for this service are created on a single host, the port will clash.'
|
||||
% self.name)
|
||||
|
||||
def scale(self, desired_num, timeout=None):
|
||||
"""
|
||||
@@ -344,11 +337,11 @@ class Service(object):
|
||||
return Container.create(self.client, **container_options)
|
||||
except APIError as ex:
|
||||
raise OperationFailedError("Cannot create container for service %s: %s" %
|
||||
(self.name, binarystr_to_unicode(ex.explanation)))
|
||||
(self.name, ex.explanation))
|
||||
|
||||
def ensure_image_exists(self, do_build=BuildAction.none, silent=False, cli=False):
|
||||
def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
|
||||
if self.can_be_built() and do_build == BuildAction.force:
|
||||
self.build(cli=cli)
|
||||
self.build()
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -364,18 +357,12 @@ class Service(object):
|
||||
if do_build == BuildAction.skip:
|
||||
raise NeedsBuildError(self)
|
||||
|
||||
self.build(cli=cli)
|
||||
log.warning(
|
||||
self.build()
|
||||
log.warn(
|
||||
"Image for service {} was built because it did not already exist. To "
|
||||
"rebuild this image you must use `docker-compose build` or "
|
||||
"`docker-compose up --build`.".format(self.name))
|
||||
|
||||
def get_image_registry_data(self):
|
||||
try:
|
||||
return self.client.inspect_distribution(self.image_name)
|
||||
except APIError:
|
||||
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
|
||||
|
||||
def image(self):
|
||||
try:
|
||||
return self.client.inspect_image(self.image_name)
|
||||
@@ -405,8 +392,8 @@ class Service(object):
|
||||
return ConvergencePlan('start', containers)
|
||||
|
||||
if (
|
||||
strategy is ConvergenceStrategy.always or
|
||||
self._containers_have_diverged(containers)
|
||||
strategy is ConvergenceStrategy.always or
|
||||
self._containers_have_diverged(containers)
|
||||
):
|
||||
return ConvergencePlan('recreate', containers)
|
||||
|
||||
@@ -483,7 +470,6 @@ class Service(object):
|
||||
container, timeout=timeout, attach_logs=not detached,
|
||||
start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
|
||||
)
|
||||
|
||||
containers, errors = parallel_execute(
|
||||
containers,
|
||||
recreate,
|
||||
@@ -625,10 +611,7 @@ class Service(object):
|
||||
try:
|
||||
container.start()
|
||||
except APIError as ex:
|
||||
expl = binarystr_to_unicode(ex.explanation)
|
||||
if "driver failed programming external connectivity" in expl:
|
||||
log.warn("Host is already in use by another container")
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, expl))
|
||||
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
|
||||
return container
|
||||
|
||||
@property
|
||||
@@ -697,7 +680,6 @@ class Service(object):
|
||||
'links': self.get_link_names(),
|
||||
'net': self.network_mode.id,
|
||||
'networks': self.networks,
|
||||
'secrets': self.secrets,
|
||||
'volumes_from': [
|
||||
(v.source.name, v.mode)
|
||||
for v in self.volumes_from if isinstance(v.source, Service)
|
||||
@@ -708,11 +690,11 @@ class Service(object):
|
||||
net_name = self.network_mode.service_name
|
||||
pid_namespace = self.pid_mode.service_name
|
||||
return (
|
||||
self.get_linked_service_names() +
|
||||
self.get_volumes_from_names() +
|
||||
([net_name] if net_name else []) +
|
||||
([pid_namespace] if pid_namespace else []) +
|
||||
list(self.options.get('depends_on', {}).keys())
|
||||
self.get_linked_service_names() +
|
||||
self.get_volumes_from_names() +
|
||||
([net_name] if net_name else []) +
|
||||
([pid_namespace] if pid_namespace else []) +
|
||||
list(self.options.get('depends_on', {}).keys())
|
||||
)
|
||||
|
||||
def get_dependency_configs(self):
|
||||
@@ -902,7 +884,7 @@ class Service(object):
|
||||
|
||||
container_options['labels'] = build_container_labels(
|
||||
container_options.get('labels', {}),
|
||||
self.labels(one_off=one_off) + self.extra_labels,
|
||||
self.labels(one_off=one_off),
|
||||
number,
|
||||
self.config_hash if add_config_hash else None,
|
||||
slug
|
||||
@@ -1061,11 +1043,8 @@ class Service(object):
|
||||
return [build_spec(secret) for secret in self.secrets]
|
||||
|
||||
def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
|
||||
gzip=False, rm=True, silent=False, cli=False, progress=None):
|
||||
output_stream = open(os.devnull, 'w')
|
||||
if not silent:
|
||||
output_stream = sys.stdout
|
||||
log.info('Building %s' % self.name)
|
||||
gzip=False):
|
||||
log.info('Building %s' % self.name)
|
||||
|
||||
build_opts = self.options.get('build', {})
|
||||
|
||||
@@ -1082,16 +1061,15 @@ class Service(object):
|
||||
'Impossible to perform platform-targeted builds for API version < 1.35'
|
||||
)
|
||||
|
||||
builder = self.client if not cli else _CLIBuilder(progress)
|
||||
build_output = builder.build(
|
||||
build_output = self.client.build(
|
||||
path=path,
|
||||
tag=self.image_name,
|
||||
rm=rm,
|
||||
rm=True,
|
||||
forcerm=force_rm,
|
||||
pull=pull,
|
||||
nocache=no_cache,
|
||||
dockerfile=build_opts.get('dockerfile', None),
|
||||
cache_from=self.get_cache_from(build_opts),
|
||||
cache_from=build_opts.get('cache_from', None),
|
||||
labels=build_opts.get('labels', None),
|
||||
buildargs=build_args,
|
||||
network_mode=build_opts.get('network', None),
|
||||
@@ -1107,7 +1085,7 @@ class Service(object):
|
||||
)
|
||||
|
||||
try:
|
||||
all_events = list(stream_output(build_output, output_stream))
|
||||
all_events = list(stream_output(build_output, sys.stdout))
|
||||
except StreamOutputError as e:
|
||||
raise BuildError(self, six.text_type(e))
|
||||
|
||||
@@ -1129,12 +1107,6 @@ class Service(object):
|
||||
|
||||
return image_id
|
||||
|
||||
def get_cache_from(self, build_opts):
|
||||
cache_from = build_opts.get('cache_from', None)
|
||||
if cache_from is not None:
|
||||
cache_from = [tag for tag in cache_from if tag]
|
||||
return cache_from
|
||||
|
||||
def can_be_built(self):
|
||||
return 'build' in self.options
|
||||
|
||||
@@ -1157,7 +1129,7 @@ class Service(object):
|
||||
container_name = build_container_name(
|
||||
self.project, service_name, number, slug,
|
||||
)
|
||||
ext_links_origins = [link.split(':')[0] for link in self.options.get('external_links', [])]
|
||||
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
||||
if container_name in ext_links_origins:
|
||||
raise DependencyError(
|
||||
'Service {0} has a self-referential external link: {1}'.format(
|
||||
@@ -1344,7 +1316,7 @@ class ServicePidMode(PidMode):
|
||||
if containers:
|
||||
return 'container:' + containers[0].id
|
||||
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Service %s is trying to use reuse the PID namespace "
|
||||
"of another service that is not running." % (self.service_name)
|
||||
)
|
||||
@@ -1407,8 +1379,8 @@ class ServiceNetworkMode(object):
|
||||
if containers:
|
||||
return 'container:' + containers[0].id
|
||||
|
||||
log.warning("Service %s is trying to use reuse the network stack "
|
||||
"of another service that is not running." % (self.id))
|
||||
log.warn("Service %s is trying to use reuse the network stack "
|
||||
"of another service that is not running." % (self.id))
|
||||
return None
|
||||
|
||||
|
||||
@@ -1555,11 +1527,11 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
|
||||
|
||||
for volume in volumes_option:
|
||||
if (
|
||||
volume.external and
|
||||
volume.internal in container_volumes and
|
||||
container_volumes.get(volume.internal) != volume.external
|
||||
volume.external and
|
||||
volume.internal in container_volumes and
|
||||
container_volumes.get(volume.internal) != volume.external
|
||||
):
|
||||
log.warning((
|
||||
log.warn((
|
||||
"Service \"{service}\" is using volume \"{volume}\" from the "
|
||||
"previous container. Host mapping \"{host_path}\" has no effect. "
|
||||
"Remove the existing containers (with `docker-compose rm {service}`) "
|
||||
@@ -1604,7 +1576,6 @@ def build_mount(mount_spec):
|
||||
read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
|
||||
)
|
||||
|
||||
|
||||
# Labels
|
||||
|
||||
|
||||
@@ -1659,7 +1630,6 @@ def format_environment(environment):
|
||||
if isinstance(value, six.binary_type):
|
||||
value = value.decode('utf-8')
|
||||
return '{key}={value}'.format(key=key, value=value)
|
||||
|
||||
return [format_env(*item) for item in environment.items()]
|
||||
|
||||
|
||||
@@ -1716,140 +1686,3 @@ def rewrite_build_path(path):
|
||||
path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
class _CLIBuilder(object):
|
||||
def __init__(self, progress):
|
||||
self._progress = progress
|
||||
|
||||
def build(self, path, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, timeout=None,
|
||||
custom_context=False, encoding=None, pull=False,
|
||||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None, cache_from=None, target=None, network_mode=None,
|
||||
squash=None, extra_hosts=None, platform=None, isolation=None,
|
||||
use_config_proxy=True):
|
||||
"""
|
||||
Args:
|
||||
path (str): Path to the directory containing the Dockerfile
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
cache_from (:py:class:`list`): A list of images used for build
|
||||
cache resolution
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
- memory (int): set memory limit for build
|
||||
- memswap (int): Total memory (memory + swap), -1 to disable
|
||||
swap
|
||||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
compressing
|
||||
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
|
||||
containers, as a mapping of hostname to IP address.
|
||||
fileobj: A file object to use as the Dockerfile. (Or a file-like
|
||||
object)
|
||||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
isolation (str): Isolation technology used during build.
|
||||
Default: `None`.
|
||||
labels (dict): A dictionary of labels to set on the image
|
||||
network_mode (str): networking mode for the run commands during
|
||||
build
|
||||
nocache (bool): Don't use the cache when set to ``True``
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
quiet (bool): Whether to return the status
|
||||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB
|
||||
squash (bool): Squash the resulting images layers into a
|
||||
single layer.
|
||||
tag (str): A tag to add to the final image
|
||||
target (str): Name of the build-stage to build in a multi-stage
|
||||
Dockerfile
|
||||
timeout (int): HTTP timeout
|
||||
use_config_proxy (bool): If ``True``, and if the docker client
|
||||
configuration file (``~/.docker/config.json`` by default)
|
||||
contains a proxy configuration, the corresponding environment
|
||||
variables will be set in the container being built.
|
||||
Returns:
|
||||
A generator for the build output.
|
||||
"""
|
||||
if dockerfile:
|
||||
dockerfile = os.path.join(path, dockerfile)
|
||||
iidfile = tempfile.mktemp()
|
||||
|
||||
command_builder = _CommandBuilder()
|
||||
command_builder.add_params("--build-arg", buildargs)
|
||||
command_builder.add_list("--cache-from", cache_from)
|
||||
command_builder.add_arg("--file", dockerfile)
|
||||
command_builder.add_flag("--force-rm", forcerm)
|
||||
command_builder.add_params("--label", labels)
|
||||
command_builder.add_arg("--memory", container_limits.get("memory"))
|
||||
command_builder.add_flag("--no-cache", nocache)
|
||||
command_builder.add_arg("--progress", self._progress)
|
||||
command_builder.add_flag("--pull", pull)
|
||||
command_builder.add_arg("--tag", tag)
|
||||
command_builder.add_arg("--target", target)
|
||||
command_builder.add_arg("--iidfile", iidfile)
|
||||
args = command_builder.build([path])
|
||||
|
||||
magic_word = "Successfully built "
|
||||
appear = False
|
||||
with subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True) as p:
|
||||
while True:
|
||||
line = p.stdout.readline()
|
||||
if not line:
|
||||
break
|
||||
# Fix non ascii chars on Python2. To remove when #6890 is complete.
|
||||
if six.PY2:
|
||||
magic_word = str(magic_word)
|
||||
if line.startswith(magic_word):
|
||||
appear = True
|
||||
yield json.dumps({"stream": line})
|
||||
|
||||
with open(iidfile) as f:
|
||||
line = f.readline()
|
||||
image_id = line.split(":")[1].strip()
|
||||
os.remove(iidfile)
|
||||
|
||||
# In case of `DOCKER_BUILDKIT=1`
|
||||
# there is no success message already present in the output.
|
||||
# Since that's the way `Service::build` gets the `image_id`
|
||||
# it has to be added `manually`
|
||||
if not appear:
|
||||
yield json.dumps({"stream": "{}{}\n".format(magic_word, image_id)})
|
||||
|
||||
|
||||
class _CommandBuilder(object):
|
||||
def __init__(self):
|
||||
self._args = ["docker", "build"]
|
||||
|
||||
def add_arg(self, name, value):
|
||||
if value:
|
||||
self._args.extend([name, str(value)])
|
||||
|
||||
def add_flag(self, name, flag):
|
||||
if flag:
|
||||
self._args.extend([name])
|
||||
|
||||
def add_params(self, name, params):
|
||||
if params:
|
||||
for key, val in params.items():
|
||||
self._args.extend([name, "{}={}".format(key, val)])
|
||||
|
||||
def add_list(self, name, values):
|
||||
if values:
|
||||
for val in values:
|
||||
self._args.extend([name, val])
|
||||
|
||||
def build(self, args):
|
||||
return self._args + args
|
||||
|
||||
@@ -127,7 +127,7 @@ class ProjectVolumes(object):
|
||||
try:
|
||||
volume.remove()
|
||||
except NotFound:
|
||||
log.warning("Volume %s not found.", volume.true_name)
|
||||
log.warn("Volume %s not found.", volume.true_name)
|
||||
|
||||
def initialize(self):
|
||||
try:
|
||||
@@ -209,7 +209,7 @@ def check_remote_volume_config(remote, local):
|
||||
if k.startswith('com.docker.'): # We are only interested in user-specified labels
|
||||
continue
|
||||
if remote_labels.get(k) != local_labels.get(k):
|
||||
log.warning(
|
||||
log.warn(
|
||||
'Volume {}: label "{}" has changed. It may need to be'
|
||||
' recreated.'.format(local.name, k)
|
||||
)
|
||||
|
||||
@@ -110,14 +110,11 @@ _docker_compose_build() {
|
||||
__docker_compose_nospace
|
||||
return
|
||||
;;
|
||||
--memory|-m)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory --no-cache --pull --parallel" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services --filter source=build
|
||||
@@ -126,6 +123,18 @@ _docker_compose_build() {
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_bundle() {
|
||||
case "$prev" in
|
||||
--output|-o)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--push-images --help --output -o" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
_docker_compose_config() {
|
||||
case "$prev" in
|
||||
--hash)
|
||||
@@ -138,7 +147,7 @@ _docker_compose_config() {
|
||||
;;
|
||||
esac
|
||||
|
||||
COMPREPLY=( $( compgen -W "--hash --help --no-interpolate --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--hash --help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
|
||||
}
|
||||
|
||||
|
||||
@@ -172,10 +181,6 @@ _docker_compose_docker_compose() {
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--env-file)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
$(__docker_compose_to_extglob "$daemon_options_with_args") )
|
||||
return
|
||||
;;
|
||||
@@ -545,7 +550,7 @@ _docker_compose_up() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --attach-dependencies --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_compose_complete_services
|
||||
@@ -569,6 +574,7 @@ _docker_compose() {
|
||||
|
||||
local commands=(
|
||||
build
|
||||
bundle
|
||||
config
|
||||
create
|
||||
down
|
||||
@@ -603,7 +609,6 @@ _docker_compose() {
|
||||
--tlsverify
|
||||
"
|
||||
local daemon_options_with_args="
|
||||
--env-file
|
||||
--file -f
|
||||
--host -H
|
||||
--project-directory
|
||||
|
||||
@@ -12,7 +12,6 @@ end
|
||||
|
||||
complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file'
|
||||
complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name'
|
||||
complete -c docker-compose -l env-file -r -d 'Specify an alternate environment file (default: .env)'
|
||||
complete -c docker-compose -l verbose -d 'Show more output'
|
||||
complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to'
|
||||
complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify'
|
||||
|
||||
@@ -113,7 +113,6 @@ __docker-compose_subcommand() {
|
||||
$opts_help \
|
||||
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
|
||||
'--force-rm[Always remove intermediate containers.]' \
|
||||
'(--quiet -q)'{--quiet,-q}'[Curb build output]' \
|
||||
'(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
|
||||
'--no-cache[Do not use cache when building the image.]' \
|
||||
'--pull[Always attempt to pull a newer version of the image.]' \
|
||||
@@ -121,6 +120,12 @@ __docker-compose_subcommand() {
|
||||
'--parallel[Build images in parallel.]' \
|
||||
'*:services:__docker-compose_services_from_build' && ret=0
|
||||
;;
|
||||
(bundle)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'--push-images[Automatically push images for any services which have a `build` option specified.]' \
|
||||
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
|
||||
;;
|
||||
(config)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
@@ -284,7 +289,7 @@ __docker-compose_subcommand() {
|
||||
(up)
|
||||
_arguments \
|
||||
$opts_help \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit and --attach-dependencies.]' \
|
||||
'(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.]' \
|
||||
$opts_no_color \
|
||||
$opts_no_deps \
|
||||
$opts_force_recreate \
|
||||
@@ -292,7 +297,6 @@ __docker-compose_subcommand() {
|
||||
$opts_no_build \
|
||||
"(--no-build)--build[Build images before starting containers.]" \
|
||||
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
|
||||
"(-d)--attach-dependencies[Attach to dependent containers. Incompatible with -d.]" \
|
||||
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
|
||||
'--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \
|
||||
'--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \
|
||||
@@ -336,7 +340,6 @@ _docker-compose() {
|
||||
'(- :)'{-h,--help}'[Get help]' \
|
||||
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
|
||||
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
|
||||
'--env-file[Specify an alternate environment file (default: .env)]:env-file:_files' \
|
||||
"--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
|
||||
'(- :)'{-v,--version}'[Print version and exit]' \
|
||||
'--verbose[Show more output]' \
|
||||
@@ -355,7 +358,6 @@ _docker-compose() {
|
||||
local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options
|
||||
|
||||
relevant_compose_flags=(
|
||||
"--env-file"
|
||||
"--file" "-f"
|
||||
"--host" "-H"
|
||||
"--project-name" "-p"
|
||||
|
||||
@@ -44,7 +44,7 @@ def warn_for_links(name, service):
|
||||
links = service.get('links')
|
||||
if links:
|
||||
example_service = links[0].partition(':')[0]
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Service {name} has links, which no longer create environment "
|
||||
"variables such as {example_service_upper}_PORT. "
|
||||
"If you are using those in your application code, you should "
|
||||
@@ -57,7 +57,7 @@ def warn_for_links(name, service):
|
||||
def warn_for_external_links(name, service):
|
||||
external_links = service.get('external_links')
|
||||
if external_links:
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Service {name} has external_links: {ext}, which now work "
|
||||
"slightly differently. In particular, two containers must be "
|
||||
"connected to at least one network in common in order to "
|
||||
@@ -107,7 +107,7 @@ def rewrite_volumes_from(service, service_names):
|
||||
def create_volumes_section(data):
|
||||
named_volumes = get_named_volumes(data['services'])
|
||||
if named_volumes:
|
||||
log.warning(
|
||||
log.warn(
|
||||
"Named volumes ({names}) must be explicitly declared. Creating a "
|
||||
"'volumes' section with declarations.\n\n"
|
||||
"For backwards-compatibility, they've been declared as external. "
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# first arg is `-f` or `--some-option`
|
||||
if [ "${1#-}" != "$1" ]; then
|
||||
set -- docker-compose "$@"
|
||||
fi
|
||||
|
||||
# if our command is a valid Docker subcommand, let's invoke it through Docker instead
|
||||
# (this allows for "docker run docker ps", etc)
|
||||
if docker-compose help "$1" > /dev/null 2>&1; then
|
||||
set -- docker-compose "$@"
|
||||
fi
|
||||
|
||||
# if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically
|
||||
if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
|
||||
export DOCKER_HOST='tcp://docker:2375'
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
@@ -87,11 +87,6 @@ exe = EXE(pyz,
|
||||
'compose/config/config_schema_v3.7.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.8.json',
|
||||
'compose/config/config_schema_v3.8.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/GITSHA',
|
||||
'compose/GITSHA',
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
a = Analysis(['bin/docker-compose'],
|
||||
pathex=['.'],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
||||
cipher=block_cipher)
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
exclude_binaries=True,
|
||||
name='docker-compose',
|
||||
debug=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=True,
|
||||
bootloader_ignore_signals=True)
|
||||
coll = COLLECT(exe,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[
|
||||
(
|
||||
'compose/config/config_schema_v1.json',
|
||||
'compose/config/config_schema_v1.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.0.json',
|
||||
'compose/config/config_schema_v2.0.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.1.json',
|
||||
'compose/config/config_schema_v2.1.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.2.json',
|
||||
'compose/config/config_schema_v2.2.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.3.json',
|
||||
'compose/config/config_schema_v2.3.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v2.4.json',
|
||||
'compose/config/config_schema_v2.4.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.0.json',
|
||||
'compose/config/config_schema_v3.0.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.1.json',
|
||||
'compose/config/config_schema_v3.1.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.2.json',
|
||||
'compose/config/config_schema_v3.2.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.3.json',
|
||||
'compose/config/config_schema_v3.3.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.4.json',
|
||||
'compose/config/config_schema_v3.4.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.5.json',
|
||||
'compose/config/config_schema_v3.5.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.6.json',
|
||||
'compose/config/config_schema_v3.6.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.7.json',
|
||||
'compose/config/config_schema_v3.7.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/config/config_schema_v3.8.json',
|
||||
'compose/config/config_schema_v3.8.json',
|
||||
'DATA'
|
||||
),
|
||||
(
|
||||
'compose/GITSHA',
|
||||
'compose/GITSHA',
|
||||
'DATA'
|
||||
)
|
||||
],
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name='docker-compose-Darwin-x86_64')
|
||||
@@ -6,9 +6,11 @@ The documentation for Compose has been merged into
|
||||
The docs for Compose are now here:
|
||||
https://github.com/docker/docker.github.io/tree/master/compose
|
||||
|
||||
Please submit pull requests for unreleased features/changes on the `master` branch (https://github.com/docker/docker.github.io/tree/master), please prefix the PR title with `[WIP]` to indicate that it relates to an unreleased change.
|
||||
Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
|
||||
|
||||
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided.
|
||||
If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
|
||||
|
||||
PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
|
||||
|
||||
As always, the docs remain open-source and we appreciate your feedback and
|
||||
pull requests!
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# From http://wiki.musl-libc.org/wiki/FAQ#Q:_where_is_ldd_.3F
|
||||
#
|
||||
# Musl's dynlinker comes with ldd functionality built in. just create a
|
||||
# symlink from ld-musl-$ARCH.so to /bin/ldd. If the dynlinker was started
|
||||
# as "ldd", it will detect that and print the appropriate DSO information.
|
||||
#
|
||||
# Instead, this string replaced "ldd" with the package so that pyinstaller
|
||||
# can find the actual lib.
|
||||
exec /usr/bin/ldd "$@" | \
|
||||
sed -r 's/([^[:space:]]+) => ldd/\1 => \/lib\/\1/g' | \
|
||||
sed -r 's/ldd \(.*\)//g'
|
||||
@@ -1 +1 @@
|
||||
pyinstaller==3.6
|
||||
pyinstaller==3.3.1
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
Click==7.0
|
||||
coverage==5.0.3
|
||||
ddt==1.2.2
|
||||
flake8==3.7.9
|
||||
gitpython==2.1.15
|
||||
mock==3.0.5
|
||||
pytest==5.3.4; python_version >= '3.5'
|
||||
pytest==4.6.5; python_version < '3.5'
|
||||
pytest-cov==2.8.1
|
||||
coverage==4.4.2
|
||||
flake8==3.5.0
|
||||
mock==2.0.0
|
||||
pytest==3.6.3
|
||||
pytest-cov==2.5.1
|
||||
|
||||
@@ -1,29 +1,24 @@
|
||||
backports.shutil_get_terminal_size==1.0.0
|
||||
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
|
||||
cached-property==1.5.1
|
||||
certifi==2020.4.5.1
|
||||
cached-property==1.3.0
|
||||
certifi==2017.4.17
|
||||
chardet==3.0.4
|
||||
colorama==0.4.3; sys_platform == 'win32'
|
||||
distro==1.5.0
|
||||
docker==4.2.2
|
||||
colorama==0.4.0; sys_platform == 'win32'
|
||||
docker==3.7.3
|
||||
docker-pycreds==0.4.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6; python_version < '3.4'
|
||||
functools32==3.2.3.post2; python_version < '3.2'
|
||||
idna==2.8
|
||||
ipaddress==1.0.23
|
||||
jsonschema==3.2.0
|
||||
paramiko==2.7.1
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
jsonschema==2.6.0
|
||||
paramiko==2.4.2
|
||||
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
|
||||
pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
|
||||
PySocks==1.7.1
|
||||
python-dotenv==0.13.0
|
||||
PyYAML==5.3
|
||||
requests==2.22.0
|
||||
six==1.12.0
|
||||
subprocess32==3.5.4; python_version < '3.2'
|
||||
texttable==1.6.2
|
||||
urllib3==1.25.7; python_version == '3.3'
|
||||
wcwidth==0.1.9
|
||||
websocket-client==0.57.0
|
||||
PySocks==1.6.7
|
||||
PyYAML==4.2b1
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
texttable==0.9.1
|
||||
urllib3==1.21.1; python_version == '3.3'
|
||||
websocket-client==0.56.0
|
||||
|
||||
@@ -7,14 +7,11 @@ if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG="$1"
|
||||
TAG=$1
|
||||
|
||||
VERSION="$(python setup.py --version)"
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
./script/build/write-git-sha
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
docker build \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \
|
||||
-t "${TAG}" .
|
||||
./script/build/linux
|
||||
docker build -t docker/compose:$TAG -f Dockerfile.run .
|
||||
|
||||
@@ -4,15 +4,10 @@ set -ex
|
||||
|
||||
./script/clean
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
TAG="docker/compose:tmp-glibc-linux-binary-${DOCKER_COMPOSE_GITSHA}"
|
||||
|
||||
docker build -t "${TAG}" . \
|
||||
--build-arg BUILD_PLATFORM=debian \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
|
||||
TMP_CONTAINER=$(docker create "${TAG}")
|
||||
mkdir -p dist
|
||||
ARCH=$(uname -m)
|
||||
docker cp "${TMP_CONTAINER}":/usr/local/bin/docker-compose "dist/docker-compose-Linux-${ARCH}"
|
||||
docker container rm -f "${TMP_CONTAINER}"
|
||||
docker image rm -f "${TAG}"
|
||||
TAG="docker-compose"
|
||||
docker build -t "$TAG" .
|
||||
docker run \
|
||||
--rm --entrypoint="script/build/linux-entrypoint" \
|
||||
-v $(pwd)/dist:/code/dist \
|
||||
-v $(pwd)/.git:/code/.git \
|
||||
"$TAG"
|
||||
|
||||
@@ -2,39 +2,14 @@
|
||||
|
||||
set -ex
|
||||
|
||||
CODE_PATH=/code
|
||||
VENV="${CODE_PATH}"/.tox/py37
|
||||
TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
|
||||
VENV=/code/.tox/py36
|
||||
|
||||
cd "${CODE_PATH}"
|
||||
mkdir -p dist
|
||||
chmod 777 dist
|
||||
mkdir -p `pwd`/dist
|
||||
chmod 777 `pwd`/dist
|
||||
|
||||
"${VENV}"/bin/pip3 install -q -r requirements-build.txt
|
||||
|
||||
# TODO(ulyssessouza) To check if really needed
|
||||
if [ -z "${DOCKER_COMPOSE_GITSHA}" ]; then
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
export PATH="${CODE_PATH}/pyinstaller:${PATH}"
|
||||
|
||||
if [ ! -z "${BUILD_BOOTLOADER}" ]; then
|
||||
# Build bootloader for alpine; develop is the main branch
|
||||
git clone --single-branch --branch develop https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller
|
||||
cd /tmp/pyinstaller/bootloader
|
||||
# Checkout commit corresponding to version in requirements-build
|
||||
git checkout v3.6
|
||||
"${VENV}"/bin/python3 ./waf configure --no-lsb all
|
||||
"${VENV}"/bin/pip3 install ..
|
||||
cd "${CODE_PATH}"
|
||||
rm -Rf /tmp/pyinstaller
|
||||
else
|
||||
echo "NOT compiling bootloader!!!"
|
||||
fi
|
||||
|
||||
"${VENV}"/bin/pyinstaller --exclude-module pycrypto --exclude-module PyInstaller docker-compose.spec
|
||||
ls -la dist/
|
||||
ldd dist/docker-compose
|
||||
mv dist/docker-compose /usr/local/bin
|
||||
docker-compose version
|
||||
$VENV/bin/pip install -q -r requirements-build.txt
|
||||
./script/build/write-git-sha
|
||||
su -c "$VENV/bin/pyinstaller docker-compose.spec" user
|
||||
mv dist/docker-compose $TARGET
|
||||
$TARGET version
|
||||
|
||||
@@ -5,20 +5,11 @@ TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
|
||||
|
||||
rm -rf venv
|
||||
|
||||
virtualenv -p "${TOOLCHAIN_PATH}"/bin/python3 venv
|
||||
virtualenv -p ${TOOLCHAIN_PATH}/bin/python3 venv
|
||||
venv/bin/pip install -r requirements.txt
|
||||
venv/bin/pip install -r requirements-build.txt
|
||||
venv/bin/pip install --no-deps .
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
# Build as a folder for macOS Catalina.
|
||||
venv/bin/pyinstaller docker-compose_darwin.spec
|
||||
dist/docker-compose-Darwin-x86_64/docker-compose version
|
||||
(cd dist/docker-compose-Darwin-x86_64/ && tar zcvf ../docker-compose-Darwin-x86_64.tgz .)
|
||||
rm -rf dist/docker-compose-Darwin-x86_64
|
||||
|
||||
# Build static binary for legacy.
|
||||
./script/build/write-git-sha
|
||||
venv/bin/pyinstaller docker-compose.spec
|
||||
mv dist/docker-compose dist/docker-compose-Darwin-x86_64
|
||||
dist/docker-compose-Darwin-x86_64 version
|
||||
|
||||
@@ -7,12 +7,11 @@ if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG="$1"
|
||||
IMAGE="docker/compose-tests"
|
||||
TAG=$1
|
||||
|
||||
DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
|
||||
docker build -t "${IMAGE}:${TAG}" . \
|
||||
--target build \
|
||||
--build-arg BUILD_PLATFORM="debian" \
|
||||
--build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
|
||||
docker tag "${IMAGE}":"${TAG}" "${IMAGE}":latest
|
||||
docker build -t docker-compose-tests:tmp .
|
||||
ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp)
|
||||
docker commit $ctnr_id docker/compose-tests:latest
|
||||
docker tag docker/compose-tests:latest docker/compose-tests:$TAG
|
||||
docker rm -f $ctnr_id
|
||||
docker rmi -f docker-compose-tests:tmp
|
||||
|
||||
@@ -6,17 +6,17 @@
|
||||
#
|
||||
# http://git-scm.com/download/win
|
||||
#
|
||||
# 2. Install Python 3.7.x:
|
||||
# 2. Install Python 3.6.4:
|
||||
#
|
||||
# https://www.python.org/downloads/
|
||||
#
|
||||
# 3. Append ";C:\Python37;C:\Python37\Scripts" to the "Path" environment variable:
|
||||
# 3. Append ";C:\Python36;C:\Python36\Scripts" to the "Path" environment variable:
|
||||
#
|
||||
# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
|
||||
#
|
||||
# 4. In Powershell, run the following commands:
|
||||
#
|
||||
# $ pip install 'virtualenv==16.2.0'
|
||||
# $ pip install 'virtualenv>=15.1.0'
|
||||
# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
|
||||
#
|
||||
# 5. Clone the repository:
|
||||
@@ -39,7 +39,7 @@ if (Test-Path venv) {
|
||||
Get-ChildItem -Recurse -Include *.pyc | foreach ($_) { Remove-Item $_.FullName }
|
||||
|
||||
# Create virtualenv
|
||||
virtualenv -p C:\Python37\python.exe .\venv
|
||||
virtualenv .\venv
|
||||
|
||||
# pip and pyinstaller generate lots of warnings, so we need to ignore them
|
||||
$ErrorActionPreference = "Continue"
|
||||
|
||||
@@ -9,4 +9,4 @@ if [[ "${?}" != "0" ]]; then
|
||||
echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
|
||||
DOCKER_COMPOSE_GITSHA="unknown"
|
||||
fi
|
||||
echo "${DOCKER_COMPOSE_GITSHA}"
|
||||
echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \
|
||||
https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH}
|
||||
|
||||
@@ -25,11 +27,3 @@ curl -f -T dist/docker-compose-${OS_NAME}-x86_64 -u$BINTRAY_USERNAME:$BINTRAY_AP
|
||||
-H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \
|
||||
-H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \
|
||||
https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64 || exit 1
|
||||
|
||||
# Upload folder format of docker-compose for macOS in addition to binary.
|
||||
if [ "${OS_NAME}" == "Darwin" ]; then
|
||||
curl -f -T dist/docker-compose-${OS_NAME}-x86_64.tgz -u$BINTRAY_USERNAME:$BINTRAY_API_KEY \
|
||||
-H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \
|
||||
-H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \
|
||||
https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64.tgz || exit 1
|
||||
fi
|
||||
|
||||
@@ -1,18 +1,199 @@
|
||||
# Release HOWTO
|
||||
|
||||
The release process is fully automated by `Release.Jenkinsfile`.
|
||||
This file describes the process of making a public release of `docker-compose`.
|
||||
Please read it carefully before proceeding!
|
||||
|
||||
## Usage
|
||||
## Prerequisites
|
||||
|
||||
1. In the appropriate branch, run `./scripts/release/release tag <version>`
|
||||
The following things are required to bring a release to a successful conclusion
|
||||
|
||||
By appropriate, we mean for a version `1.26.0` or `1.26.0-rc1` you should run the script in the `1.26.x` branch.
|
||||
### Local Docker engine (Linux Containers)
|
||||
|
||||
The script should check the above then ask for changelog modifications.
|
||||
The release script builds images that will be part of the release.
|
||||
|
||||
After the executions, you should have a commit with the proper bumps for `docker-compose version` and `run.sh`
|
||||
### Docker Hub account
|
||||
|
||||
2. Run `git push --tags upstream <version_branch>`
|
||||
This should trigger a new CI build on the new tag. When the CI finishes with the tests and builds a new draft release would be available on github's releases page.
|
||||
You should be logged into a Docker Hub account that allows pushing to the
|
||||
following repositories:
|
||||
|
||||
3. Check and confirm the release on github's release page.
|
||||
- docker/compose
|
||||
- docker/compose-tests
|
||||
|
||||
### Python
|
||||
|
||||
The release script is written in Python and requires Python 3.3 at minimum.
|
||||
|
||||
### A Github account and Github API token
|
||||
|
||||
Your Github account needs to have write access on the `docker/compose` repo.
|
||||
To generate a Github token, head over to the
|
||||
[Personal access tokens](https://github.com/settings/tokens) page in your
|
||||
Github settings and select "Generate new token". Your token should include
|
||||
(at minimum) the following scopes:
|
||||
|
||||
- `repo:status`
|
||||
- `public_repo`
|
||||
|
||||
This API token should be exposed to the release script through the
|
||||
`GITHUB_TOKEN` environment variable.
|
||||
|
||||
### A Bintray account and Bintray API key
|
||||
|
||||
Your Bintray account will need to be an admin member of the
|
||||
[docker-compose organization](https://bintray.com/docker-compose).
|
||||
Additionally, you should generate a personal API key. To do so, click your
|
||||
username in the top-right hand corner and select "Edit profile" ; on the new
|
||||
page, select "API key" in the left-side menu.
|
||||
|
||||
This API key should be exposed to the release script through the
|
||||
`BINTRAY_TOKEN` environment variable.
|
||||
|
||||
### A PyPi account
|
||||
|
||||
Said account needs to be a member of the maintainers group for the
|
||||
[`docker-compose` project](https://pypi.org/project/docker-compose/).
|
||||
|
||||
Moreover, the `~/.pypirc` file should exist on your host and contain the
|
||||
relevant pypi credentials.
|
||||
|
||||
The following is a sample `.pypirc` provided as a guideline:
|
||||
|
||||
```
|
||||
[distutils]
|
||||
index-servers =
|
||||
pypi
|
||||
|
||||
[pypi]
|
||||
username = user
|
||||
password = pass
|
||||
```
|
||||
|
||||
## Start a feature release
|
||||
|
||||
A feature release is a release that includes all changes present in the
|
||||
`master` branch when initiated. It's typically versioned `X.Y.0-rc1`, where
|
||||
Y is the minor version of the previous release incremented by one. A series
|
||||
of one or more Release Candidates (RCs) should be made available to the public
|
||||
to find and squash potential bugs.
|
||||
|
||||
From the root of the Compose repository, run the following command:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> start X.Y.0-rc1
|
||||
```
|
||||
|
||||
After a short initialization period, the script will invite you to edit the
|
||||
`CHANGELOG.md` file. Do so by being careful to respect the same format as
|
||||
previous releases. Once done, the script will display a `diff` of the staged
|
||||
changes for the bump commit. Once you validate these, a bump commit will be
|
||||
created on the newly created release branch and pushed remotely.
|
||||
|
||||
The release tool then waits for the CI to conclude before proceeding.
|
||||
If failures are reported, the release will be aborted until these are fixed.
|
||||
Please refer to the "Resume a draft release" section below for more details.
|
||||
|
||||
Once all resources have been prepared, the release script will exit with a
|
||||
message resembling this one:
|
||||
|
||||
```
|
||||
You're almost done! Please verify that everything is in order and you are ready
|
||||
to make the release public, then run the following command:
|
||||
./script/release/release.sh -b user finalize X.Y.0-rc1
|
||||
```
|
||||
|
||||
Once you are ready to finalize the release (making binaries and other versioned
|
||||
assets public), proceed to the "Finalize a release" section of this guide.
|
||||
|
||||
## Start a patch release
|
||||
|
||||
A patch release is a release that builds off a previous release with discrete
|
||||
additions. This can be an RC release after RC1 (`X.Y.0-rcZ`, `Z > 1`), a GA release
|
||||
based off the final RC (`X.Y.0`), or a bugfix release based off a previous
|
||||
GA release (`X.Y.Z`, `Z > 0`).
|
||||
|
||||
From the root of the Compose repository, run the following command:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> start --patch=BASE_VERSION RELEASE_VERSION
|
||||
```
|
||||
|
||||
The process of starting a patch release is identical to starting a feature
|
||||
release except for one difference ; at the beginning, the script will ask for
|
||||
PR numbers you wish to cherry-pick into the release. These numbers should
|
||||
correspond to existing PRs on the docker/compose repository. Multiple numbers
|
||||
should be separated by whitespace.
|
||||
|
||||
Once you are ready to finalize the release (making binaries and other versioned
|
||||
assets public), proceed to the "Finalize a release" section of this guide.
|
||||
|
||||
## Finalize a release
|
||||
|
||||
Once you're ready to make your release public, you may execute the following
|
||||
command from the root of the Compose repository:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
|
||||
```
|
||||
|
||||
Note that this command will create and publish versioned assets to the public.
|
||||
As a result, it can not be reverted. The command will perform some basic
|
||||
sanity checks before doing so, but it is your responsibility to ensure
|
||||
everything is in order before pushing the button.
|
||||
|
||||
After the command exits, you should make sure:
|
||||
|
||||
- The `docker/compose:VERSION` image is available on Docker Hub and functional
|
||||
- The `pip install -U docker-compose==VERSION` command correctly installs the
|
||||
specified version
|
||||
- The install command on the Github release page installs the new release
|
||||
|
||||
## Resume a draft release
|
||||
|
||||
"Resuming" a release lets you address the following situations occurring before
|
||||
a release is made final:
|
||||
|
||||
- Cherry-pick additional PRs to include in the release
|
||||
- Resume a release that was aborted because of CI failures after they've been
|
||||
addressed
|
||||
- Rebuild / redownload assets after manual changes have been made to the
|
||||
release branch
|
||||
- etc.
|
||||
|
||||
From the root of the Compose repository, run the following command:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> resume RELEASE_VERSION
|
||||
```
|
||||
|
||||
The release tool will attempt to determine what steps it's already been through
|
||||
for the specified release and pick up where it left off. Some steps are
|
||||
executed again no matter what as it's assumed they'll produce different
|
||||
results, like building images or downloading binaries.
|
||||
|
||||
## Cancel a draft release
|
||||
|
||||
If issues snuck into your release branch, it is sometimes easier to start from
|
||||
scratch. Before a release has been finalized, it is possible to cancel it using
|
||||
the following command:
|
||||
```
|
||||
./script/release/release.sh -b <BINTRAY_USERNAME> cancel RELEASE_VERSION
|
||||
```
|
||||
|
||||
This will remove the release branch with this release (locally and remotely),
|
||||
close the associated PR, remove the release page draft on Github and delete
|
||||
the Bintray repository for it, allowing you to start fresh.
|
||||
|
||||
## Manual operations
|
||||
|
||||
Some common, release-related operations are not covered by this tool and should
|
||||
be handled manually by the operator:
|
||||
|
||||
- After any release:
|
||||
- Announce new release on Slack
|
||||
- After a GA release:
|
||||
- Close the release milestone
|
||||
- Merge back `CHANGELOG.md` changes from the `release` branch into `master`
|
||||
- Bump the version in `compose/__init__.py` to the *next* minor version
|
||||
number with `dev` appended. For example, if you just released `1.4.0`,
|
||||
update it to `1.5.0dev`
|
||||
|
||||
## Advanced options
|
||||
|
||||
You can consult the full list of options for the release tool by executing
|
||||
`./script/release/release.sh --help`.
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
|
||||
REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..')
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
## Usage :
|
||||
## changelog PREVIOUS_TAG..HEAD
|
||||
|
||||
# configure refs so we get pull-requests metadata
|
||||
git config --add remote.origin.fetch +refs/pull/*/head:refs/remotes/origin/pull/*
|
||||
git fetch origin
|
||||
|
||||
RANGE=${1:-"$(git describe --tags --abbrev=0 HEAD^)..HEAD"}
|
||||
echo "Generate changelog for range ${RANGE}"
|
||||
echo
|
||||
|
||||
pullrequests() {
|
||||
for commit in $(git log ${RANGE} --format='format:%H'); do
|
||||
# Get the oldest remotes/origin/pull/* branch to include this commit, i.e. the one to introduce it
|
||||
git branch -a --sort=committerdate --contains $commit --list 'origin/pull/*' | head -1 | cut -d'/' -f4
|
||||
done
|
||||
}
|
||||
|
||||
changes=$(pullrequests | uniq)
|
||||
|
||||
echo "pull requests merged within range:"
|
||||
echo $changes
|
||||
|
||||
echo '#Features' > FEATURES.md
|
||||
echo '#Bugs' > BUGS.md
|
||||
for pr in $changes; do
|
||||
curl -fs -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/repos/docker/compose/pulls/${pr} -o PR.json
|
||||
|
||||
cat PR.json | jq -r ' select( .labels[].name | contains("kind/feature") ) | "- "+.title' >> FEATURES.md
|
||||
cat PR.json | jq -r ' select( .labels[].name | contains("kind/bug") ) | "- "+.title' >> BUGS.md
|
||||
done
|
||||
|
||||
echo ${TAG_NAME} > CHANGELOG.md
|
||||
echo >> CHANGELOG.md
|
||||
cat FEATURES.md >> CHANGELOG.md
|
||||
echo >> CHANGELOG.md
|
||||
cat BUGS.md >> CHANGELOG.md
|
||||
74
script/release/push-release
Executable file
74
script/release/push-release
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Create the official release
|
||||
#
|
||||
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
|
||||
|
||||
function usage() {
|
||||
>&2 cat << EOM
|
||||
Publish a release by building all artifacts and pushing them.
|
||||
|
||||
This script requires that 'git config branch.${BRANCH}.release' is set to the
|
||||
release version for the release branch.
|
||||
|
||||
EOM
|
||||
exit 1
|
||||
}
|
||||
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
VERSION="$(git config "branch.${BRANCH}.release")" || usage
|
||||
|
||||
if [ -z "$(command -v jq 2> /dev/null)" ]; then
|
||||
>&2 echo "$0 requires https://stedolan.github.io/jq/"
|
||||
>&2 echo "Please install it and make sure it is available on your \$PATH."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
API=https://api.github.com/repos
|
||||
REPO=docker/compose
|
||||
GITHUB_REPO=git@github.com:$REPO
|
||||
|
||||
# Check the build status is green
|
||||
sha=$(git rev-parse HEAD)
|
||||
url=$API/$REPO/statuses/$sha
|
||||
build_status=$(curl -s $url | jq -r '.[0].state')
|
||||
if [ -n "$SKIP_BUILD_CHECK" ]; then
|
||||
echo "Skipping build status check..."
|
||||
elif [[ "$build_status" != "success" ]]; then
|
||||
>&2 echo "Build status is $build_status, but it should be success."
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "Tagging the release as $VERSION"
|
||||
git tag $VERSION
|
||||
git push $GITHUB_REPO $VERSION
|
||||
|
||||
echo "Uploading the docker image"
|
||||
docker push docker/compose:$VERSION
|
||||
|
||||
echo "Uploading the compose-tests image"
|
||||
docker push docker/compose-tests:latest
|
||||
docker push docker/compose-tests:$VERSION
|
||||
|
||||
echo "Uploading package to PyPI"
|
||||
./script/build/write-git-sha
|
||||
python setup.py sdist bdist_wheel
|
||||
if [ "$(command -v twine 2> /dev/null)" ]; then
|
||||
twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz ./dist/docker_compose-${VERSION/-/}-py2.py3-none-any.whl
|
||||
else
|
||||
python setup.py upload
|
||||
fi
|
||||
|
||||
echo "Testing pip package"
|
||||
deactivate || true
|
||||
virtualenv venv-test
|
||||
source venv-test/bin/activate
|
||||
pip install docker-compose==$VERSION
|
||||
docker-compose version
|
||||
deactivate
|
||||
rm -rf venv-test
|
||||
|
||||
echo "Now publish the github release, and test the downloads."
|
||||
echo "Email maintainers@dockerproject.org and engineering@docker.com about the new release."
|
||||
38
script/release/rebase-bump-commit
Executable file
38
script/release/rebase-bump-commit
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Move the "bump to <version>" commit to the HEAD of the branch
|
||||
#
|
||||
|
||||
. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
|
||||
|
||||
function usage() {
|
||||
>&2 cat << EOM
|
||||
Move the "bump to <version>" commit to the HEAD of the branch
|
||||
|
||||
This script requires that 'git config branch.${BRANCH}.release' is set to the
|
||||
release version for the release branch.
|
||||
|
||||
EOM
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
|
||||
VERSION="$(git config "branch.${BRANCH}.release")" || usage
|
||||
|
||||
|
||||
COMMIT_MSG="Bump $VERSION"
|
||||
sha="$(git log --grep "$COMMIT_MSG\$" --format="%H")"
|
||||
if [ -z "$sha" ]; then
|
||||
>&2 echo "No commit with message \"$COMMIT_MSG\""
|
||||
exit 2
|
||||
fi
|
||||
if [[ "$sha" == "$(git rev-parse HEAD)" ]]; then
|
||||
>&2 echo "Bump commit already at HEAD"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
commits=$(git log --format="%H" "$sha..HEAD" | wc -l | xargs echo)
|
||||
|
||||
git rebase --onto $sha~1 HEAD~$commits $BRANCH
|
||||
git cherry-pick $sha
|
||||
@@ -1,126 +1,383 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
import click
|
||||
from git import Repo
|
||||
from utils import update_init_py_version
|
||||
from utils import update_run_sh_version
|
||||
from utils import yesno
|
||||
|
||||
VALID_VERSION_PATTERN = re.compile(r"^\d+\.\d+\.\d+(-rc\d+)?$")
|
||||
from jinja2 import Template
|
||||
from release.bintray import BintrayAPI
|
||||
from release.const import BINTRAY_ORG
|
||||
from release.const import NAME
|
||||
from release.const import REPO_ROOT
|
||||
from release.downloader import BinaryDownloader
|
||||
from release.images import ImageManager
|
||||
from release.pypi import check_pypirc
|
||||
from release.pypi import pypi_upload
|
||||
from release.repository import delete_assets
|
||||
from release.repository import get_contributors
|
||||
from release.repository import Repository
|
||||
from release.repository import upload_assets
|
||||
from release.utils import branch_name
|
||||
from release.utils import compatibility_matrix
|
||||
from release.utils import read_release_notes_from_changelog
|
||||
from release.utils import ScriptError
|
||||
from release.utils import update_init_py_version
|
||||
from release.utils import update_run_sh_version
|
||||
from release.utils import yesno
|
||||
|
||||
|
||||
class Version(str):
|
||||
def matching_groups(self):
|
||||
match = VALID_VERSION_PATTERN.match(self)
|
||||
if not match:
|
||||
return False
|
||||
def create_initial_branch(repository, args):
|
||||
release_branch = repository.create_release_branch(args.release, args.base)
|
||||
if args.base and args.cherries:
|
||||
print('Detected patch version.')
|
||||
cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
|
||||
repository.cherry_pick_prs(release_branch, cherries.split())
|
||||
|
||||
return match.groups()
|
||||
|
||||
def is_ga_version(self):
|
||||
groups = self.matching_groups()
|
||||
if not groups:
|
||||
return False
|
||||
|
||||
rc_suffix = groups[1]
|
||||
return not rc_suffix
|
||||
|
||||
def validate(self):
|
||||
return len(self.matching_groups()) > 0
|
||||
|
||||
def branch_name(self):
|
||||
if not self.validate():
|
||||
return None
|
||||
|
||||
rc_part = self.matching_groups()[0]
|
||||
ver = self
|
||||
if rc_part:
|
||||
ver = ver[:-len(rc_part)]
|
||||
|
||||
tokens = ver.split(".")
|
||||
tokens[-1] = 'x'
|
||||
|
||||
return ".".join(tokens)
|
||||
return create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
|
||||
|
||||
|
||||
def create_bump_commit(repository, version):
|
||||
print('Creating bump commit...')
|
||||
repository.commit('-a', '-s', '-m "Bump {}"'.format(version), '--no-verify')
|
||||
|
||||
|
||||
def validate_environment(version, repository):
|
||||
if not version.validate():
|
||||
print('Version "{}" has an invalid format. This should follow D+.D+.D+(-rcD+). '
|
||||
'Like: 1.26.0 or 1.26.0-rc1'.format(version))
|
||||
return False
|
||||
|
||||
expected_branch = version.branch_name()
|
||||
if str(repository.active_branch) != expected_branch:
|
||||
print('Cannot tag in this branch with version "{}". '
|
||||
'Please checkout "{}" to tag'.format(version, version.branch_name()))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('version')
|
||||
def tag(version):
|
||||
"""
|
||||
Updates the version related files and tag
|
||||
"""
|
||||
repo = Repo(".")
|
||||
version = Version(version)
|
||||
if not validate_environment(version, repo):
|
||||
return
|
||||
|
||||
update_init_py_version(version)
|
||||
update_run_sh_version(version)
|
||||
def create_bump_commit(repository, release_branch, bintray_user, bintray_org):
|
||||
with release_branch.config_reader() as cfg:
|
||||
release = cfg.get('release')
|
||||
print('Updating version info in __init__.py and run.sh')
|
||||
update_run_sh_version(release)
|
||||
update_init_py_version(release)
|
||||
|
||||
input('Please add the release notes to the CHANGELOG.md file, then press Enter to continue.')
|
||||
proceed = False
|
||||
proceed = None
|
||||
while not proceed:
|
||||
print(repo.git.diff())
|
||||
print(repository.diff())
|
||||
proceed = yesno('Are these changes ok? y/N ', default=False)
|
||||
|
||||
if repo.git.diff():
|
||||
create_bump_commit(repo.git, version)
|
||||
if repository.diff():
|
||||
repository.create_bump_commit(release_branch, release)
|
||||
repository.push_branch_to_remote(release_branch)
|
||||
|
||||
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
|
||||
if not bintray_api.repository_exists(bintray_org, release_branch.name):
|
||||
print('Creating data repository {} on bintray'.format(release_branch.name))
|
||||
bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
|
||||
else:
|
||||
print('No changes to commit. Exiting...')
|
||||
return
|
||||
|
||||
repo.create_tag(version)
|
||||
|
||||
print('Please, check the changes. If everything is OK, you just need to push with:\n'
|
||||
'$ git push --tags upstream {}'.format(version.branch_name()))
|
||||
print('Bintray repository {} already exists. Skipping'.format(release_branch.name))
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('version')
|
||||
def push_latest(version):
|
||||
"""
|
||||
TODO Pushes the latest tag pointing to a certain GA version
|
||||
"""
|
||||
raise NotImplementedError
|
||||
def monitor_pr_status(pr_data):
|
||||
print('Waiting for CI to complete...')
|
||||
last_commit = pr_data.get_commits().reversed[0]
|
||||
while True:
|
||||
status = last_commit.get_combined_status()
|
||||
if status.state == 'pending' or status.state == 'failure':
|
||||
summary = {
|
||||
'pending': 0,
|
||||
'success': 0,
|
||||
'failure': 0,
|
||||
'error': 0,
|
||||
}
|
||||
for detail in status.statuses:
|
||||
if detail.context == 'dco-signed':
|
||||
# dco-signed check breaks on merge remote-tracking ; ignore it
|
||||
continue
|
||||
if detail.state in summary:
|
||||
summary[detail.state] += 1
|
||||
print(
|
||||
'{pending} pending, {success} successes, {failure} failures, '
|
||||
'{error} errors'.format(**summary)
|
||||
)
|
||||
if summary['failure'] > 0 or summary['error'] > 0:
|
||||
raise ScriptError('CI failures detected!')
|
||||
elif summary['pending'] == 0 and summary['success'] > 0:
|
||||
# This check assumes at least 1 non-DCO CI check to avoid race conditions.
|
||||
# If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
|
||||
return True
|
||||
time.sleep(30)
|
||||
elif status.state == 'success':
|
||||
print('{} successes: all clear!'.format(status.total_count))
|
||||
return True
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('version')
|
||||
def ghtemplate(version):
|
||||
"""
|
||||
TODO Generates the github release page content
|
||||
"""
|
||||
version = Version(version)
|
||||
raise NotImplementedError
|
||||
def check_pr_mergeable(pr_data):
|
||||
if pr_data.mergeable is False:
|
||||
# mergeable can also be null, in which case the warning would be a false positive.
|
||||
print(
|
||||
'WARNING!! PR #{} can not currently be merged. You will need to '
|
||||
'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
|
||||
)
|
||||
|
||||
return pr_data.mergeable is True
|
||||
|
||||
|
||||
def create_release_draft(repository, version, pr_data, files):
|
||||
print('Creating Github release draft')
|
||||
with open(os.path.join(os.path.dirname(__file__), 'release.md.tmpl'), 'r') as f:
|
||||
template = Template(f.read())
|
||||
print('Rendering release notes based on template')
|
||||
release_notes = template.render(
|
||||
version=version,
|
||||
compat_matrix=compatibility_matrix(),
|
||||
integrity=files,
|
||||
contributors=get_contributors(pr_data),
|
||||
changelog=read_release_notes_from_changelog(),
|
||||
)
|
||||
gh_release = repository.create_release(
|
||||
version, release_notes, draft=True, prerelease='-rc' in version,
|
||||
target_commitish='release'
|
||||
)
|
||||
print('Release draft initialized')
|
||||
return gh_release
|
||||
|
||||
|
||||
def print_final_instructions(args):
|
||||
print(
|
||||
"You're almost done! Please verify that everything is in order and "
|
||||
"you are ready to make the release public, then run the following "
|
||||
"command:\n{exe} -b {user} finalize {version}".format(
|
||||
exe='./script/release/release.sh', user=args.bintray_user, version=args.release
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def distclean():
|
||||
print('Running distclean...')
|
||||
dirs = [
|
||||
os.path.join(REPO_ROOT, 'build'), os.path.join(REPO_ROOT, 'dist'),
|
||||
os.path.join(REPO_ROOT, 'docker-compose.egg-info')
|
||||
]
|
||||
files = []
|
||||
for base, dirnames, fnames in os.walk(REPO_ROOT):
|
||||
for fname in fnames:
|
||||
path = os.path.normpath(os.path.join(base, fname))
|
||||
if fname.endswith('.pyc'):
|
||||
files.append(path)
|
||||
elif fname.startswith('.coverage.'):
|
||||
files.append(path)
|
||||
for dirname in dirnames:
|
||||
path = os.path.normpath(os.path.join(base, dirname))
|
||||
if dirname == '__pycache__':
|
||||
dirs.append(path)
|
||||
elif dirname == '.coverage-binfiles':
|
||||
dirs.append(path)
|
||||
|
||||
for file in files:
|
||||
os.unlink(file)
|
||||
|
||||
for folder in dirs:
|
||||
shutil.rmtree(folder, ignore_errors=True)
|
||||
|
||||
|
||||
def resume(args):
|
||||
try:
|
||||
distclean()
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
br_name = branch_name(args.release)
|
||||
if not repository.branch_exists(br_name):
|
||||
raise ScriptError('No local branch exists for this release.')
|
||||
gh_release = repository.find_release(args.release)
|
||||
if gh_release and not gh_release.draft:
|
||||
print('WARNING!! Found non-draft (public) release for this version!')
|
||||
proceed = yesno(
|
||||
'Are you sure you wish to proceed? Modifying an already '
|
||||
'released version is dangerous! y/N ', default=False
|
||||
)
|
||||
if proceed.lower() is not True:
|
||||
raise ScriptError('Aborting release')
|
||||
|
||||
release_branch = repository.checkout_branch(br_name)
|
||||
if args.cherries:
|
||||
cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
|
||||
repository.cherry_pick_prs(release_branch, cherries.split())
|
||||
|
||||
create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
|
||||
pr_data = repository.find_release_pr(args.release)
|
||||
if not pr_data:
|
||||
pr_data = repository.create_release_pull_request(args.release)
|
||||
check_pr_mergeable(pr_data)
|
||||
if not args.skip_ci:
|
||||
monitor_pr_status(pr_data)
|
||||
downloader = BinaryDownloader(args.destination)
|
||||
files = downloader.download_all(args.release)
|
||||
if not gh_release:
|
||||
gh_release = create_release_draft(repository, args.release, pr_data, files)
|
||||
delete_assets(gh_release)
|
||||
upload_assets(gh_release, files)
|
||||
img_manager = ImageManager(args.release)
|
||||
img_manager.build_images(repository, files)
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
|
||||
print_final_instructions(args)
|
||||
return 0
|
||||
|
||||
|
||||
def cancel(args):
|
||||
try:
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
repository.close_release_pr(args.release)
|
||||
repository.remove_release(args.release)
|
||||
repository.remove_bump_branch(args.release)
|
||||
bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], args.bintray_user)
|
||||
print('Removing Bintray data repository for {}'.format(args.release))
|
||||
bintray_api.delete_repository(args.bintray_org, branch_name(args.release))
|
||||
distclean()
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
print('Release cancellation complete.')
|
||||
return 0
|
||||
|
||||
|
||||
def start(args):
|
||||
distclean()
|
||||
try:
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
create_initial_branch(repository, args)
|
||||
pr_data = repository.create_release_pull_request(args.release)
|
||||
check_pr_mergeable(pr_data)
|
||||
if not args.skip_ci:
|
||||
monitor_pr_status(pr_data)
|
||||
downloader = BinaryDownloader(args.destination)
|
||||
files = downloader.download_all(args.release)
|
||||
gh_release = create_release_draft(repository, args.release, pr_data, files)
|
||||
upload_assets(gh_release, files)
|
||||
img_manager = ImageManager(args.release)
|
||||
img_manager.build_images(repository, files)
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
|
||||
print_final_instructions(args)
|
||||
return 0
|
||||
|
||||
|
||||
def finalize(args):
|
||||
distclean()
|
||||
try:
|
||||
check_pypirc()
|
||||
repository = Repository(REPO_ROOT, args.repo)
|
||||
img_manager = ImageManager(args.release)
|
||||
pr_data = repository.find_release_pr(args.release)
|
||||
if not pr_data:
|
||||
raise ScriptError('No PR found for {}'.format(args.release))
|
||||
if not check_pr_mergeable(pr_data):
|
||||
raise ScriptError('Can not finalize release with an unmergeable PR')
|
||||
if not img_manager.check_images():
|
||||
raise ScriptError('Missing release image')
|
||||
br_name = branch_name(args.release)
|
||||
if not repository.branch_exists(br_name):
|
||||
raise ScriptError('No local branch exists for this release.')
|
||||
gh_release = repository.find_release(args.release)
|
||||
if not gh_release:
|
||||
raise ScriptError('No Github release draft for this version')
|
||||
|
||||
repository.checkout_branch(br_name)
|
||||
|
||||
os.system('python {setup_script} sdist bdist_wheel'.format(
|
||||
setup_script=os.path.join(REPO_ROOT, 'setup.py')))
|
||||
|
||||
merge_status = pr_data.merge()
|
||||
if not merge_status.merged and not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Unable to merge PR #{}: {}'.format(pr_data.number, merge_status.message)
|
||||
)
|
||||
|
||||
pypi_upload(args)
|
||||
|
||||
img_manager.push_images()
|
||||
repository.publish_release(gh_release)
|
||||
except ScriptError as e:
|
||||
print(e)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
ACTIONS = [
|
||||
'start',
|
||||
'cancel',
|
||||
'resume',
|
||||
'finalize',
|
||||
]
|
||||
|
||||
EPILOG = '''Example uses:
|
||||
* Start a new feature release (includes all changes currently in master)
|
||||
release.sh -b user start 1.23.0
|
||||
* Start a new patch release
|
||||
release.sh -b user --patch 1.21.0 start 1.21.1
|
||||
* Cancel / rollback an existing release draft
|
||||
release.sh -b user cancel 1.23.0
|
||||
* Restart a previously aborted patch release
|
||||
release.sh -b user -p 1.21.0 resume 1.21.1
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
if 'GITHUB_TOKEN' not in os.environ:
|
||||
print('GITHUB_TOKEN environment variable must be set')
|
||||
return 1
|
||||
|
||||
if 'BINTRAY_TOKEN' not in os.environ:
|
||||
print('BINTRAY_TOKEN environment variable must be set')
|
||||
return 1
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Orchestrate a new release of docker/compose. This tool assumes that you have '
|
||||
'obtained a Github API token and Bintray API key and set the GITHUB_TOKEN and '
|
||||
'BINTRAY_TOKEN environment variables accordingly.',
|
||||
epilog=EPILOG, formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser.add_argument(
|
||||
'action', choices=ACTIONS, help='The action to be performed for this release'
|
||||
)
|
||||
parser.add_argument('release', help='Release number, e.g. 1.9.0-rc1, 2.1.1')
|
||||
parser.add_argument(
|
||||
'--patch', '-p', dest='base',
|
||||
help='Which version is being patched by this release'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--repo', '-r', dest='repo', default=NAME,
|
||||
help='Start a release for the given repo (default: {})'.format(NAME)
|
||||
)
|
||||
parser.add_argument(
|
||||
'-b', dest='bintray_user', required=True, metavar='USER',
|
||||
help='Username associated with the Bintray API key'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--bintray-org', dest='bintray_org', metavar='ORG', default=BINTRAY_ORG,
|
||||
help='Organization name on bintray where the data repository will be created.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--destination', '-o', metavar='DIR', default='binaries',
|
||||
help='Directory where release binaries will be downloaded relative to the project root'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-cherries', '-C', dest='cherries', action='store_false',
|
||||
help='If set, the program will not prompt the user for PR numbers to cherry-pick'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--skip-ci-checks', dest='skip_ci', action='store_true',
|
||||
help='If set, the program will not wait for CI jobs to complete'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--finalize-resume', dest='finalize_resume', action='store_true',
|
||||
help='If set, finalize will continue through steps that have already been completed.'
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.action == 'start':
|
||||
return start(args)
|
||||
elif args.action == 'resume':
|
||||
return resume(args)
|
||||
elif args.action == 'cancel':
|
||||
return cancel(args)
|
||||
elif args.action == 'finalize':
|
||||
return finalize(args)
|
||||
|
||||
print('Unexpected action "{}"'.format(args.action), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cli()
|
||||
sys.exit(main())
|
||||
|
||||
13
script/release/release.sh
Executable file
13
script/release/release.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test -d ${VENV_DIR:-./.release-venv}; then
|
||||
true
|
||||
else
|
||||
./script/release/setup-venv.sh
|
||||
fi
|
||||
|
||||
if test -z "$*"; then
|
||||
args="--help"
|
||||
fi
|
||||
|
||||
${VENV_DIR:-./.release-venv}/bin/python ./script/release/release.py "$@"
|
||||
0
script/release/release/__init__.py
Normal file
0
script/release/release/__init__.py
Normal file
50
script/release/release/bintray.py
Normal file
50
script/release/release/bintray.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from .const import NAME
|
||||
|
||||
|
||||
class BintrayAPI(requests.Session):
|
||||
def __init__(self, api_key, user, *args, **kwargs):
|
||||
super(BintrayAPI, self).__init__(*args, **kwargs)
|
||||
self.auth = (user, api_key)
|
||||
self.base_url = 'https://api.bintray.com/'
|
||||
|
||||
def create_repository(self, subject, repo_name, repo_type='generic'):
|
||||
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
data = {
|
||||
'name': repo_name,
|
||||
'type': repo_type,
|
||||
'private': False,
|
||||
'desc': 'Automated release for {}: {}'.format(NAME, repo_name),
|
||||
'labels': ['docker-compose', 'docker', 'release-bot'],
|
||||
}
|
||||
return self.post_json(url, data)
|
||||
|
||||
def repository_exists(self, subject, repo_name):
|
||||
url = '{base}/repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
result = self.get(url)
|
||||
if result.status_code == 404:
|
||||
return False
|
||||
result.raise_for_status()
|
||||
return True
|
||||
|
||||
def delete_repository(self, subject, repo_name):
|
||||
url = '{base}repos/{subject}/{repo_name}'.format(
|
||||
base=self.base_url, subject=subject, repo_name=repo_name,
|
||||
)
|
||||
return self.delete(url)
|
||||
|
||||
def post_json(self, url, data, **kwargs):
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self.post(url, data=json.dumps(data), **kwargs)
|
||||
9
script/release/release/const.py
Normal file
9
script/release/release/const.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
|
||||
REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
|
||||
NAME = 'docker/compose'
|
||||
BINTRAY_ORG = 'docker-compose'
|
||||
72
script/release/release/downloader.py
Normal file
72
script/release/release/downloader.py
Normal file
@@ -0,0 +1,72 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
from .const import BINTRAY_ORG
|
||||
from .const import NAME
|
||||
from .const import REPO_ROOT
|
||||
from .utils import branch_name
|
||||
|
||||
|
||||
class BinaryDownloader(requests.Session):
|
||||
base_bintray_url = 'https://dl.bintray.com/{}'.format(BINTRAY_ORG)
|
||||
base_appveyor_url = 'https://ci.appveyor.com/api/projects/{}/artifacts/'.format(NAME)
|
||||
|
||||
def __init__(self, destination, *args, **kwargs):
|
||||
super(BinaryDownloader, self).__init__(*args, **kwargs)
|
||||
self.destination = destination
|
||||
os.makedirs(self.destination, exist_ok=True)
|
||||
|
||||
def download_from_bintray(self, repo_name, filename):
|
||||
print('Downloading {} from bintray'.format(filename))
|
||||
url = '{base}/{repo_name}/{filename}'.format(
|
||||
base=self.base_bintray_url, repo_name=repo_name, filename=filename
|
||||
)
|
||||
full_dest = os.path.join(REPO_ROOT, self.destination, filename)
|
||||
return self._download(url, full_dest)
|
||||
|
||||
def download_from_appveyor(self, branch_name, filename):
|
||||
print('Downloading {} from appveyor'.format(filename))
|
||||
url = '{base}/dist%2F{filename}?branch={branch_name}'.format(
|
||||
base=self.base_appveyor_url, filename=filename, branch_name=branch_name
|
||||
)
|
||||
full_dest = os.path.join(REPO_ROOT, self.destination, filename)
|
||||
return self._download(url, full_dest)
|
||||
|
||||
def _download(self, url, full_dest):
|
||||
m = hashlib.sha256()
|
||||
with open(full_dest, 'wb') as f:
|
||||
r = self.get(url, stream=True)
|
||||
for chunk in r.iter_content(chunk_size=1024 * 600, decode_unicode=False):
|
||||
print('.', end='', flush=True)
|
||||
m.update(chunk)
|
||||
f.write(chunk)
|
||||
|
||||
print(' download complete')
|
||||
hex_digest = m.hexdigest()
|
||||
with open(full_dest + '.sha256', 'w') as f:
|
||||
f.write('{} {}\n'.format(hex_digest, os.path.basename(full_dest)))
|
||||
return full_dest, hex_digest
|
||||
|
||||
def download_all(self, version):
|
||||
files = {
|
||||
'docker-compose-Darwin-x86_64': None,
|
||||
'docker-compose-Linux-x86_64': None,
|
||||
'docker-compose-Windows-x86_64.exe': None,
|
||||
}
|
||||
|
||||
for filename in files.keys():
|
||||
if 'Windows' in filename:
|
||||
files[filename] = self.download_from_appveyor(
|
||||
branch_name(version), filename
|
||||
)
|
||||
else:
|
||||
files[filename] = self.download_from_bintray(
|
||||
branch_name(version), filename
|
||||
)
|
||||
return files
|
||||
92
script/release/release/images.py
Normal file
92
script/release/release/images.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import docker
|
||||
|
||||
from .const import REPO_ROOT
|
||||
from .utils import ScriptError
|
||||
|
||||
|
||||
class ImageManager(object):
|
||||
def __init__(self, version):
|
||||
self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
|
||||
self.version = version
|
||||
if 'HUB_CREDENTIALS' in os.environ:
|
||||
print('HUB_CREDENTIALS found in environment, issuing login')
|
||||
credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
|
||||
self.docker_client.login(
|
||||
username=credentials['Username'], password=credentials['Password']
|
||||
)
|
||||
|
||||
def build_images(self, repository, files):
|
||||
print("Building release images...")
|
||||
repository.write_git_sha()
|
||||
distdir = os.path.join(REPO_ROOT, 'dist')
|
||||
os.makedirs(distdir, exist_ok=True)
|
||||
shutil.copy(files['docker-compose-Linux-x86_64'][0], distdir)
|
||||
os.chmod(os.path.join(distdir, 'docker-compose-Linux-x86_64'), 0o755)
|
||||
print('Building docker/compose image')
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker/compose:{}'.format(self.version), dockerfile='Dockerfile.run',
|
||||
decode=True
|
||||
)
|
||||
for chunk in logstream:
|
||||
if 'error' in chunk:
|
||||
raise ScriptError('Build error: {}'.format(chunk['error']))
|
||||
if 'stream' in chunk:
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
print('Building test image (for UCP e2e)')
|
||||
logstream = self.docker_client.build(
|
||||
REPO_ROOT, tag='docker-compose-tests:tmp', decode=True
|
||||
)
|
||||
for chunk in logstream:
|
||||
if 'error' in chunk:
|
||||
raise ScriptError('Build error: {}'.format(chunk['error']))
|
||||
if 'stream' in chunk:
|
||||
print(chunk['stream'], end='')
|
||||
|
||||
container = self.docker_client.create_container(
|
||||
'docker-compose-tests:tmp', entrypoint='tox'
|
||||
)
|
||||
self.docker_client.commit(container, 'docker/compose-tests', 'latest')
|
||||
self.docker_client.tag(
|
||||
'docker/compose-tests:latest', 'docker/compose-tests:{}'.format(self.version)
|
||||
)
|
||||
self.docker_client.remove_container(container, force=True)
|
||||
self.docker_client.remove_image('docker-compose-tests:tmp', force=True)
|
||||
|
||||
@property
|
||||
def image_names(self):
|
||||
return [
|
||||
'docker/compose-tests:latest',
|
||||
'docker/compose-tests:{}'.format(self.version),
|
||||
'docker/compose:{}'.format(self.version)
|
||||
]
|
||||
|
||||
def check_images(self):
|
||||
for name in self.image_names:
|
||||
try:
|
||||
self.docker_client.inspect_image(name)
|
||||
except docker.errors.ImageNotFound:
|
||||
print('Expected image {} was not found'.format(name))
|
||||
return False
|
||||
return True
|
||||
|
||||
def push_images(self):
|
||||
for name in self.image_names:
|
||||
print('Pushing {} to Docker Hub'.format(name))
|
||||
logstream = self.docker_client.push(name, stream=True, decode=True)
|
||||
for chunk in logstream:
|
||||
if 'status' in chunk:
|
||||
print(chunk['status'])
|
||||
if 'error' in chunk:
|
||||
raise ScriptError(
|
||||
'Error pushing {name}: {err}'.format(name=name, err=chunk['error'])
|
||||
)
|
||||
44
script/release/release/pypi.py
Normal file
44
script/release/release/pypi.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from configparser import Error
|
||||
from requests.exceptions import HTTPError
|
||||
from twine.commands.upload import main as twine_upload
|
||||
from twine.utils import get_config
|
||||
|
||||
from .utils import ScriptError
|
||||
|
||||
|
||||
def pypi_upload(args):
|
||||
print('Uploading to PyPi')
|
||||
try:
|
||||
rel = args.release.replace('-rc', 'rc')
|
||||
twine_upload([
|
||||
'dist/docker_compose-{}*.whl'.format(rel),
|
||||
'dist/docker-compose-{}*.tar.gz'.format(rel)
|
||||
])
|
||||
except HTTPError as e:
|
||||
if e.response.status_code == 400 and 'File already exists' in str(e):
|
||||
if not args.finalize_resume:
|
||||
raise ScriptError(
|
||||
'Package already uploaded on PyPi.'
|
||||
)
|
||||
print('Skipping PyPi upload - package already uploaded')
|
||||
else:
|
||||
raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
|
||||
|
||||
|
||||
def check_pypirc():
|
||||
try:
|
||||
config = get_config()
|
||||
except Error as e:
|
||||
raise ScriptError('Failed to parse .pypirc file: {}'.format(e))
|
||||
|
||||
if config is None:
|
||||
raise ScriptError('Failed to parse .pypirc file')
|
||||
|
||||
if 'pypi' not in config:
|
||||
raise ScriptError('Missing [pypi] section in .pypirc file')
|
||||
|
||||
if not (config['pypi'].get('username') and config['pypi'].get('password')):
|
||||
raise ScriptError('Missing login/password pair for pypi repo')
|
||||
245
script/release/release/repository.py
Normal file
245
script/release/release/repository.py
Normal file
@@ -0,0 +1,245 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
from git import GitCommandError
|
||||
from git import Repo
|
||||
from github import Github
|
||||
|
||||
from .const import NAME
|
||||
from .const import REPO_ROOT
|
||||
from .utils import branch_name
|
||||
from .utils import read_release_notes_from_changelog
|
||||
from .utils import ScriptError
|
||||
|
||||
|
||||
class Repository(object):
|
||||
def __init__(self, root=None, gh_name=None):
|
||||
if root is None:
|
||||
root = REPO_ROOT
|
||||
if gh_name is None:
|
||||
gh_name = NAME
|
||||
self.git_repo = Repo(root)
|
||||
self.gh_client = Github(os.environ['GITHUB_TOKEN'])
|
||||
self.gh_repo = self.gh_client.get_repo(gh_name)
|
||||
|
||||
def create_release_branch(self, version, base=None):
|
||||
print('Creating release branch {} based on {}...'.format(version, base or 'master'))
|
||||
remote = self.find_remote(self.gh_repo.full_name)
|
||||
br_name = branch_name(version)
|
||||
remote.fetch()
|
||||
if self.branch_exists(br_name):
|
||||
raise ScriptError(
|
||||
"Branch {} already exists locally. Please remove it before "
|
||||
"running the release script, or use `resume` instead.".format(
|
||||
br_name
|
||||
)
|
||||
)
|
||||
if base is not None:
|
||||
base = self.git_repo.tag('refs/tags/{}'.format(base))
|
||||
else:
|
||||
base = 'refs/remotes/{}/master'.format(remote.name)
|
||||
release_branch = self.git_repo.create_head(br_name, commit=base)
|
||||
release_branch.checkout()
|
||||
self.git_repo.git.merge('--strategy=ours', '--no-edit', '{}/release'.format(remote.name))
|
||||
with release_branch.config_writer() as cfg:
|
||||
cfg.set_value('release', version)
|
||||
return release_branch
|
||||
|
||||
def find_remote(self, remote_name=None):
|
||||
if not remote_name:
|
||||
remote_name = self.gh_repo.full_name
|
||||
for remote in self.git_repo.remotes:
|
||||
for url in remote.urls:
|
||||
if remote_name in url:
|
||||
return remote
|
||||
return None
|
||||
|
||||
def create_bump_commit(self, bump_branch, version):
|
||||
print('Creating bump commit...')
|
||||
bump_branch.checkout()
|
||||
self.git_repo.git.commit('-a', '-s', '-m "Bump {}"'.format(version), '--no-verify')
|
||||
|
||||
def diff(self):
|
||||
return self.git_repo.git.diff()
|
||||
|
||||
def checkout_branch(self, name):
|
||||
return self.git_repo.branches[name].checkout()
|
||||
|
||||
def push_branch_to_remote(self, branch, remote_name=None):
|
||||
print('Pushing branch {} to remote...'.format(branch.name))
|
||||
remote = self.find_remote(remote_name)
|
||||
remote.push(refspec=branch, force=True)
|
||||
|
||||
def branch_exists(self, name):
|
||||
return name in [h.name for h in self.git_repo.heads]
|
||||
|
||||
def create_release_pull_request(self, version):
|
||||
return self.gh_repo.create_pull(
|
||||
title='Bump {}'.format(version),
|
||||
body='Automated release for docker-compose {}\n\n{}'.format(
|
||||
version, read_release_notes_from_changelog()
|
||||
),
|
||||
base='release',
|
||||
head=branch_name(version),
|
||||
)
|
||||
|
||||
def create_release(self, version, release_notes, **kwargs):
|
||||
return self.gh_repo.create_git_release(
|
||||
tag=version, name=version, message=release_notes, **kwargs
|
||||
)
|
||||
|
||||
def find_release(self, version):
|
||||
print('Retrieving release draft for {}'.format(version))
|
||||
releases = self.gh_repo.get_releases()
|
||||
for release in releases:
|
||||
if release.tag_name == version and release.title == version:
|
||||
return release
|
||||
return None
|
||||
|
||||
def publish_release(self, release):
|
||||
release.update_release(
|
||||
name=release.title,
|
||||
message=release.body,
|
||||
draft=False,
|
||||
prerelease=release.prerelease
|
||||
)
|
||||
|
||||
def remove_release(self, version):
|
||||
print('Removing release draft for {}'.format(version))
|
||||
releases = self.gh_repo.get_releases()
|
||||
for release in releases:
|
||||
if release.tag_name == version and release.title == version:
|
||||
if not release.draft:
|
||||
print(
|
||||
'The release at {} is no longer a draft. If you TRULY intend '
|
||||
'to remove it, please do so manually.'.format(release.url)
|
||||
)
|
||||
continue
|
||||
release.delete_release()
|
||||
|
||||
def remove_bump_branch(self, version, remote_name=None):
|
||||
name = branch_name(version)
|
||||
if not self.branch_exists(name):
|
||||
return False
|
||||
print('Removing local branch "{}"'.format(name))
|
||||
if self.git_repo.active_branch.name == name:
|
||||
print('Active branch is about to be deleted. Checking out to master...')
|
||||
try:
|
||||
self.checkout_branch('master')
|
||||
except GitCommandError:
|
||||
raise ScriptError(
|
||||
'Unable to checkout master. Try stashing local changes before proceeding.'
|
||||
)
|
||||
self.git_repo.branches[name].delete(self.git_repo, name, force=True)
|
||||
print('Removing remote branch "{}"'.format(name))
|
||||
remote = self.find_remote(remote_name)
|
||||
try:
|
||||
remote.push(name, delete=True)
|
||||
except GitCommandError as e:
|
||||
if 'remote ref does not exist' in str(e):
|
||||
return False
|
||||
raise ScriptError(
|
||||
'Error trying to remove remote branch: {}'.format(e)
|
||||
)
|
||||
return True
|
||||
|
||||
def find_release_pr(self, version):
|
||||
print('Retrieving release PR for {}'.format(version))
|
||||
name = branch_name(version)
|
||||
open_prs = self.gh_repo.get_pulls(state='open')
|
||||
for pr in open_prs:
|
||||
if pr.head.ref == name:
|
||||
print('Found matching PR #{}'.format(pr.number))
|
||||
return pr
|
||||
print('No open PR for this release branch.')
|
||||
return None
|
||||
|
||||
def close_release_pr(self, version):
|
||||
print('Retrieving and closing release PR for {}'.format(version))
|
||||
name = branch_name(version)
|
||||
open_prs = self.gh_repo.get_pulls(state='open')
|
||||
count = 0
|
||||
for pr in open_prs:
|
||||
if pr.head.ref == name:
|
||||
print('Found matching PR #{}'.format(pr.number))
|
||||
pr.edit(state='closed')
|
||||
count += 1
|
||||
if count == 0:
|
||||
print('No open PR for this release branch.')
|
||||
return count
|
||||
|
||||
def write_git_sha(self):
|
||||
with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f:
|
||||
f.write(self.git_repo.head.commit.hexsha[:7])
|
||||
|
||||
def cherry_pick_prs(self, release_branch, ids):
|
||||
if not ids:
|
||||
return
|
||||
release_branch.checkout()
|
||||
for i in ids:
|
||||
try:
|
||||
i = int(i)
|
||||
except ValueError as e:
|
||||
raise ScriptError('Invalid PR id: {}'.format(e))
|
||||
print('Retrieving PR#{}'.format(i))
|
||||
pr = self.gh_repo.get_pull(i)
|
||||
patch_data = requests.get(pr.patch_url).text
|
||||
self.apply_patch(patch_data)
|
||||
|
||||
def apply_patch(self, patch_data):
|
||||
with tempfile.NamedTemporaryFile(mode='w', prefix='_compose_cherry', encoding='utf-8') as f:
|
||||
f.write(patch_data)
|
||||
f.flush()
|
||||
self.git_repo.git.am('--3way', f.name)
|
||||
|
||||
def get_prs_in_milestone(self, version):
|
||||
milestones = self.gh_repo.get_milestones(state='open')
|
||||
milestone = None
|
||||
for ms in milestones:
|
||||
if ms.title == version:
|
||||
milestone = ms
|
||||
break
|
||||
if not milestone:
|
||||
print('Didn\'t find a milestone matching "{}"'.format(version))
|
||||
return None
|
||||
|
||||
issues = self.gh_repo.get_issues(milestone=milestone, state='all')
|
||||
prs = []
|
||||
for issue in issues:
|
||||
if issue.pull_request is not None:
|
||||
prs.append(issue.number)
|
||||
return sorted(prs)
|
||||
|
||||
|
||||
def get_contributors(pr_data):
|
||||
commits = pr_data.get_commits()
|
||||
authors = {}
|
||||
for commit in commits:
|
||||
if not commit.author:
|
||||
continue
|
||||
author = commit.author.login
|
||||
authors[author] = authors.get(author, 0) + 1
|
||||
return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
|
||||
|
||||
|
||||
def upload_assets(gh_release, files):
|
||||
print('Uploading binaries and hash sums')
|
||||
for filename, filedata in files.items():
|
||||
print('Uploading {}...'.format(filename))
|
||||
gh_release.upload_asset(filedata[0], content_type='application/octet-stream')
|
||||
gh_release.upload_asset('{}.sha256'.format(filedata[0]), content_type='text/plain')
|
||||
print('Uploading run.sh...')
|
||||
gh_release.upload_asset(
|
||||
os.path.join(REPO_ROOT, 'script', 'run', 'run.sh'), content_type='text/plain'
|
||||
)
|
||||
|
||||
|
||||
def delete_assets(gh_release):
|
||||
print('Removing previously uploaded assets')
|
||||
for asset in gh_release.get_assets():
|
||||
print('Deleting asset {}'.format(asset.name))
|
||||
asset.delete_asset()
|
||||
@@ -4,7 +4,36 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
import re
|
||||
|
||||
from const import REPO_ROOT
|
||||
from .const import REPO_ROOT
|
||||
from compose import const as compose_const
|
||||
|
||||
section_header_re = re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+ \([0-9]{4}-[01][0-9]-[0-3][0-9]\)$')
|
||||
|
||||
|
||||
class ScriptError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def branch_name(version):
|
||||
return 'bump-{}'.format(version)
|
||||
|
||||
|
||||
def read_release_notes_from_changelog():
|
||||
with open(os.path.join(REPO_ROOT, 'CHANGELOG.md'), 'r') as f:
|
||||
lines = f.readlines()
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if section_header_re.match(lines[i]):
|
||||
break
|
||||
i += 1
|
||||
|
||||
j = i + 1
|
||||
while j < len(lines):
|
||||
if section_header_re.match(lines[j]):
|
||||
break
|
||||
j += 1
|
||||
|
||||
return ''.join(lines[i + 2:j - 1])
|
||||
|
||||
|
||||
def update_init_py_version(version):
|
||||
@@ -25,6 +54,15 @@ def update_run_sh_version(version):
|
||||
f.write(contents)
|
||||
|
||||
|
||||
def compatibility_matrix():
|
||||
result = {}
|
||||
for engine_version in compose_const.API_VERSION_TO_ENGINE_VERSION.values():
|
||||
result[engine_version] = []
|
||||
for fmt, api_version in compose_const.API_VERSIONS.items():
|
||||
result[compose_const.API_VERSION_TO_ENGINE_VERSION[api_version]].append(fmt.vstring)
|
||||
return result
|
||||
|
||||
|
||||
def yesno(prompt, default=None):
|
||||
"""
|
||||
Prompt the user for a yes or no.
|
||||
47
script/release/setup-venv.sh
Executable file
47
script/release/setup-venv.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
debian_based() { test -f /etc/debian_version; }
|
||||
|
||||
if test -z $VENV_DIR; then
|
||||
VENV_DIR=./.release-venv
|
||||
fi
|
||||
|
||||
if test -z $PYTHONBIN; then
|
||||
PYTHONBIN=$(which python3)
|
||||
if test -z $PYTHONBIN; then
|
||||
PYTHONBIN=$(which python)
|
||||
fi
|
||||
fi
|
||||
|
||||
VERSION=$($PYTHONBIN -c "import sys; print('{}.{}'.format(*sys.version_info[0:2]))")
|
||||
if test $(echo $VERSION | cut -d. -f1) -lt 3; then
|
||||
echo "Python 3.3 or above is required"
|
||||
fi
|
||||
|
||||
if test $(echo $VERSION | cut -d. -f2) -lt 3; then
|
||||
echo "Python 3.3 or above is required"
|
||||
fi
|
||||
|
||||
# Debian / Ubuntu workaround:
|
||||
# https://askubuntu.com/questions/879437/ensurepip-is-disabled-in-debian-ubuntu-for-the-system-python
|
||||
if debian_based; then
|
||||
VENV_FLAGS="$VENV_FLAGS --without-pip"
|
||||
fi
|
||||
|
||||
$PYTHONBIN -m venv $VENV_DIR $VENV_FLAGS
|
||||
|
||||
VENV_PYTHONBIN=$VENV_DIR/bin/python
|
||||
|
||||
if debian_based; then
|
||||
curl https://bootstrap.pypa.io/get-pip.py -o $VENV_DIR/get-pip.py
|
||||
$VENV_PYTHONBIN $VENV_DIR/get-pip.py
|
||||
fi
|
||||
|
||||
$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
|
||||
PyGithub==1.39 \
|
||||
GitPython==2.1.9 \
|
||||
requests==2.18.4 \
|
||||
setuptools==40.6.2 \
|
||||
twine==1.11.0
|
||||
|
||||
$VENV_PYTHONBIN setup.py develop
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VERSION="1.26.2"
|
||||
VERSION="1.24.0"
|
||||
IMAGE="docker/compose:$VERSION"
|
||||
|
||||
|
||||
@@ -36,19 +36,19 @@ if [ "$(pwd)" != '/' ]; then
|
||||
fi
|
||||
if [ -n "$COMPOSE_FILE" ]; then
|
||||
COMPOSE_OPTIONS="$COMPOSE_OPTIONS -e COMPOSE_FILE=$COMPOSE_FILE"
|
||||
compose_dir=$(realpath "$(dirname "$COMPOSE_FILE")")
|
||||
compose_dir=$(realpath $(dirname $COMPOSE_FILE))
|
||||
fi
|
||||
# TODO: also check --file argument
|
||||
if [ -n "$compose_dir" ]; then
|
||||
VOLUMES="$VOLUMES -v $compose_dir:$compose_dir"
|
||||
fi
|
||||
if [ -n "$HOME" ]; then
|
||||
VOLUMES="$VOLUMES -v $HOME:$HOME -e HOME" # Pass in HOME to share docker.config and allow ~/-relative paths to work.
|
||||
VOLUMES="$VOLUMES -v $HOME:$HOME -v $HOME:/root" # mount $HOME in /root to share docker.config
|
||||
fi
|
||||
|
||||
# Only allocate tty if we detect one
|
||||
if [ -t 0 ] && [ -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
if [ -t 0 -a -t 1 ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
|
||||
fi
|
||||
|
||||
# Always set -i to support piped and terminal input in run/exec
|
||||
@@ -56,9 +56,8 @@ DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
|
||||
|
||||
|
||||
# Handle userns security
|
||||
if docker info --format '{{json .SecurityOptions}}' 2>/dev/null | grep -q 'name=userns'; then
|
||||
if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
|
||||
DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
|
||||
|
||||
@@ -13,13 +13,13 @@ if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
|
||||
SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
|
||||
fi
|
||||
|
||||
OPENSSL_VERSION=1.1.1g
|
||||
OPENSSL_VERSION=1.1.0j
|
||||
OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
|
||||
OPENSSL_SHA1=b213a293f2127ec3e323fb3cfc0c9807664fd997
|
||||
OPENSSL_SHA1=dcad1efbacd9a4ed67d4514470af12bbe2a1d60a
|
||||
|
||||
PYTHON_VERSION=3.7.7
|
||||
PYTHON_VERSION=3.6.8
|
||||
PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
|
||||
PYTHON_SHA1=8e9968663a214aea29659ba9dfa959e8a7d82b39
|
||||
PYTHON_SHA1=09fcc4edaef0915b4dedbfb462f1cd15f82d3a6f
|
||||
|
||||
#
|
||||
# Install prerequisites.
|
||||
@@ -36,7 +36,7 @@ if ! [ -x "$(command -v python3)" ]; then
|
||||
brew install python3
|
||||
fi
|
||||
if ! [ -x "$(command -v virtualenv)" ]; then
|
||||
pip3 install virtualenv==16.2.0
|
||||
pip install virtualenv
|
||||
fi
|
||||
|
||||
#
|
||||
@@ -50,7 +50,7 @@ mkdir -p ${TOOLCHAIN_PATH}
|
||||
#
|
||||
# Set macOS SDK.
|
||||
#
|
||||
if [[ ${SDK_FETCH} && ! -f ${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk/SDKSettings.plist ]]; then
|
||||
if [ ${SDK_FETCH} ]; then
|
||||
SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
|
||||
fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
|
||||
else
|
||||
@@ -61,7 +61,7 @@ fi
|
||||
# Build OpenSSL.
|
||||
#
|
||||
OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
|
||||
if ! [[ $(${TOOLCHAIN_PATH}/bin/openssl version) == *"${OPENSSL_VERSION}"* ]]; then
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/openssl ]; then
|
||||
rm -rf ${OPENSSL_SRC_PATH}
|
||||
fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
|
||||
(
|
||||
@@ -77,7 +77,7 @@ fi
|
||||
# Build Python.
|
||||
#
|
||||
PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
|
||||
if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]]; then
|
||||
if ! [ -f ${TOOLCHAIN_PATH}/bin/python3 ]; then
|
||||
rm -rf ${PYTHON_SRC_PATH}
|
||||
fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
|
||||
(
|
||||
@@ -87,10 +87,9 @@ if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]];
|
||||
--datarootdir=${TOOLCHAIN_PATH}/share \
|
||||
--datadir=${TOOLCHAIN_PATH}/share \
|
||||
--enable-framework=${TOOLCHAIN_PATH}/Frameworks \
|
||||
--with-openssl=${TOOLCHAIN_PATH} \
|
||||
MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
|
||||
CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
|
||||
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}/include" \
|
||||
CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}include" \
|
||||
LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
|
||||
make -j 4
|
||||
make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
|
||||
@@ -98,11 +97,6 @@ if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]];
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Smoke test built Python.
|
||||
#
|
||||
openssl_version ${TOOLCHAIN_PATH}
|
||||
|
||||
echo ""
|
||||
echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
|
||||
echo "*** Using SDK ${SDK_PATH}"
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
pytest --conformity --binary ${1:-docker-compose} tests/acceptance/
|
||||
@@ -8,7 +8,8 @@ set -e
|
||||
docker run --rm \
|
||||
--tty \
|
||||
${GIT_VOLUME} \
|
||||
"$TAG" tox -e pre-commit
|
||||
--entrypoint="tox" \
|
||||
"$TAG" -e pre-commit
|
||||
|
||||
get_versions="docker run --rm
|
||||
--entrypoint=/code/.tox/py27/bin/python
|
||||
@@ -23,7 +24,7 @@ fi
|
||||
|
||||
|
||||
BUILD_NUMBER=${BUILD_NUMBER-$USER}
|
||||
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py37}
|
||||
PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py36}
|
||||
|
||||
for version in $DOCKER_VERSIONS; do
|
||||
>&2 echo "Running tests against Docker $version"
|
||||
|
||||
@@ -20,3 +20,6 @@ export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
|
||||
|
||||
GIT_VOLUME="--volumes-from=$(hostname)"
|
||||
. script/test/all
|
||||
|
||||
>&2 echo "Building Linux binary"
|
||||
. script/build/linux-entrypoint
|
||||
|
||||
@@ -3,18 +3,17 @@
|
||||
|
||||
set -ex
|
||||
|
||||
TAG="docker-compose:alpine-$(git rev-parse --short HEAD)"
|
||||
TAG="docker-compose:$(git rev-parse --short HEAD)"
|
||||
|
||||
# By default use the Dockerfile, but can be overridden to use an alternative file
|
||||
# e.g DOCKERFILE=Dockerfile.s390x script/test/default
|
||||
# e.g DOCKERFILE=Dockerfile.armhf script/test/default
|
||||
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
|
||||
DOCKER_BUILD_TARGET="${DOCKER_BUILD_TARGET:-build}"
|
||||
|
||||
rm -rf coverage-html
|
||||
# Create the host directory so it's owned by $USER
|
||||
mkdir -p coverage-html
|
||||
|
||||
docker build -f "${DOCKERFILE}" -t "${TAG}" --target "${DOCKER_BUILD_TARGET}" .
|
||||
docker build -f ${DOCKERFILE} -t "$TAG" .
|
||||
|
||||
GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
|
||||
. script/test/all
|
||||
|
||||
32
setup.py
32
setup.py
@@ -31,38 +31,32 @@ def find_version(*file_paths):
|
||||
|
||||
install_requires = [
|
||||
'cached-property >= 1.2.0, < 2',
|
||||
'docopt >= 0.6.1, < 1',
|
||||
'PyYAML >= 3.10, < 6',
|
||||
'requests >= 2.20.0, < 3',
|
||||
'texttable >= 0.9.0, < 2',
|
||||
'websocket-client >= 0.32.0, < 1',
|
||||
'distro >= 1.5.0, < 2',
|
||||
'docker[ssh] >= 4.2.2, < 5',
|
||||
'dockerpty >= 0.4.1, < 1',
|
||||
'docopt >= 0.6.1, < 0.7',
|
||||
'PyYAML >= 3.10, < 4.3',
|
||||
'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.21',
|
||||
'texttable >= 0.9.0, < 0.10',
|
||||
'websocket-client >= 0.32.0, < 1.0',
|
||||
'docker[ssh] >= 3.7.0, < 4.0',
|
||||
'dockerpty >= 0.4.1, < 0.5',
|
||||
'six >= 1.3.0, < 2',
|
||||
'jsonschema >= 2.5.1, < 4',
|
||||
'python-dotenv >= 0.13.0, < 1',
|
||||
'jsonschema >= 2.5.1, < 3',
|
||||
]
|
||||
|
||||
|
||||
tests_require = [
|
||||
'ddt >= 1.2.2, < 2',
|
||||
'pytest < 6',
|
||||
'pytest',
|
||||
]
|
||||
|
||||
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
tests_require.append('mock >= 1.0.1, < 4')
|
||||
tests_require.append('mock >= 1.0.1')
|
||||
|
||||
extras_require = {
|
||||
':python_version < "3.2"': ['subprocess32 >= 3.5.4, < 4'],
|
||||
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5, < 4'],
|
||||
':python_version < "3.3"': ['backports.shutil_get_terminal_size == 1.0.0',
|
||||
'ipaddress >= 1.0.16, < 2'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 1'],
|
||||
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
|
||||
':python_version < "3.3"': ['ipaddress >= 1.0.16'],
|
||||
':sys_platform == "win32"': ['colorama >= 0.4, < 0.5'],
|
||||
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
|
||||
'tests': tests_require,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import subprocess
|
||||
import time
|
||||
from collections import Counter
|
||||
from collections import namedtuple
|
||||
from functools import reduce
|
||||
from operator import attrgetter
|
||||
|
||||
import pytest
|
||||
@@ -20,7 +19,6 @@ import yaml
|
||||
from docker import errors
|
||||
|
||||
from .. import mock
|
||||
from ..helpers import BUSYBOX_IMAGE_WITH_TAG
|
||||
from ..helpers import create_host_file
|
||||
from compose.cli.command import get_project
|
||||
from compose.config.errors import DuplicateOverrideFileFound
|
||||
@@ -38,37 +36,16 @@ from tests.integration.testcases import v2_2_only
|
||||
from tests.integration.testcases import v2_only
|
||||
from tests.integration.testcases import v3_only
|
||||
|
||||
DOCKER_COMPOSE_EXECUTABLE = 'docker-compose'
|
||||
|
||||
ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
|
||||
|
||||
|
||||
BUILD_CACHE_TEXT = 'Using cache'
|
||||
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
|
||||
COMPOSE_COMPATIBILITY_DICT = {
|
||||
'version': '2.3',
|
||||
'volumes': {'foo': {'driver': 'default'}},
|
||||
'networks': {'bar': {}},
|
||||
'services': {
|
||||
'foo': {
|
||||
'command': '/bin/true',
|
||||
'image': 'alpine:3.10.1',
|
||||
'scale': 3,
|
||||
'restart': 'always:7',
|
||||
'mem_limit': '300M',
|
||||
'mem_reservation': '100M',
|
||||
'cpus': 0.7,
|
||||
'volumes': ['foo:/bar:rw'],
|
||||
'networks': {'bar': None},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def start_process(base_dir, options):
|
||||
proc = subprocess.Popen(
|
||||
[DOCKER_COMPOSE_EXECUTABLE] + options,
|
||||
stdin=subprocess.PIPE,
|
||||
['docker-compose'] + options,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
cwd=base_dir)
|
||||
@@ -76,8 +53,8 @@ def start_process(base_dir, options):
|
||||
return proc
|
||||
|
||||
|
||||
def wait_on_process(proc, returncode=0, stdin=None):
|
||||
stdout, stderr = proc.communicate(input=stdin)
|
||||
def wait_on_process(proc, returncode=0):
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != returncode:
|
||||
print("Stderr: {}".format(stderr))
|
||||
print("Stdout: {}".format(stdout))
|
||||
@@ -85,12 +62,6 @@ def wait_on_process(proc, returncode=0, stdin=None):
|
||||
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
|
||||
|
||||
|
||||
def dispatch(base_dir, options, project_options=None, returncode=0, stdin=None):
|
||||
project_options = project_options or []
|
||||
proc = start_process(base_dir, project_options + options)
|
||||
return wait_on_process(proc, returncode=returncode, stdin=stdin)
|
||||
|
||||
|
||||
def wait_on_condition(condition, delay=0.1, timeout=40):
|
||||
start_time = time.time()
|
||||
while not condition():
|
||||
@@ -177,8 +148,10 @@ class CLITestCase(DockerClientTestCase):
|
||||
self._project = get_project(self.base_dir, override_dir=self.override_dir)
|
||||
return self._project
|
||||
|
||||
def dispatch(self, options, project_options=None, returncode=0, stdin=None):
|
||||
return dispatch(self.base_dir, options, project_options, returncode, stdin)
|
||||
def dispatch(self, options, project_options=None, returncode=0):
|
||||
project_options = project_options or []
|
||||
proc = start_process(self.base_dir, project_options + options)
|
||||
return wait_on_process(proc, returncode=returncode)
|
||||
|
||||
def execute(self, container, cmd):
|
||||
# Remove once Hijack and CloseNotifier sign a peace treaty
|
||||
@@ -197,13 +170,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
# Prevent tearDown from trying to create a project
|
||||
self.base_dir = None
|
||||
|
||||
def test_quiet_build(self):
|
||||
self.base_dir = 'tests/fixtures/build-args'
|
||||
result = self.dispatch(['build'], None)
|
||||
quietResult = self.dispatch(['build', '-q'], None)
|
||||
assert result.stdout != ""
|
||||
assert quietResult.stdout == ""
|
||||
|
||||
def test_help_nonexistent(self):
|
||||
self.base_dir = 'tests/fixtures/no-composefile'
|
||||
result = self.dispatch(['help', 'foobar'], returncode=1)
|
||||
@@ -262,17 +228,6 @@ class CLITestCase(DockerClientTestCase):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
assert self.dispatch(['config', '--quiet']).stdout == ''
|
||||
|
||||
def test_config_stdin(self):
|
||||
config = b"""version: "3.7"
|
||||
services:
|
||||
web:
|
||||
image: nginx
|
||||
other:
|
||||
image: alpine
|
||||
"""
|
||||
result = self.dispatch(['-f', '-', 'config', '--services'], stdin=config)
|
||||
assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
|
||||
|
||||
def test_config_with_hash_option(self):
|
||||
self.base_dir = 'tests/fixtures/v2-full'
|
||||
result = self.dispatch(['config', '--hash=*'])
|
||||
@@ -289,7 +244,7 @@ services:
|
||||
# assert there are no python objects encoded in the output
|
||||
assert '!!' not in result.stdout
|
||||
|
||||
output = yaml.safe_load(result.stdout)
|
||||
output = yaml.load(result.stdout)
|
||||
expected = {
|
||||
'version': '2.0',
|
||||
'volumes': {'data': {'driver': 'local'}},
|
||||
@@ -303,7 +258,7 @@ services:
|
||||
'volumes_from': ['service:other:rw'],
|
||||
},
|
||||
'other': {
|
||||
'image': BUSYBOX_IMAGE_WITH_TAG,
|
||||
'image': 'busybox:latest',
|
||||
'command': 'top',
|
||||
'volumes': ['/data'],
|
||||
},
|
||||
@@ -314,7 +269,7 @@ services:
|
||||
def test_config_restart(self):
|
||||
self.base_dir = 'tests/fixtures/restart'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.safe_load(result.stdout) == {
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.0',
|
||||
'services': {
|
||||
'never': {
|
||||
@@ -343,7 +298,7 @@ services:
|
||||
def test_config_external_network(self):
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
result = self.dispatch(['-f', 'external-networks.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'networks' in json_result
|
||||
assert json_result['networks'] == {
|
||||
'networks_foo': {
|
||||
@@ -357,7 +312,7 @@ services:
|
||||
def test_config_with_dot_env(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
@@ -369,30 +324,15 @@ services:
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_with_env_file(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['--env-file', '.env2', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'false',
|
||||
'image': 'alpine:latest',
|
||||
'ports': ['5644/tcp', '9998/tcp']
|
||||
}
|
||||
},
|
||||
'version': '2.4'
|
||||
}
|
||||
|
||||
def test_config_with_dot_env_and_override_dir(self):
|
||||
self.base_dir = 'tests/fixtures/default-env-file'
|
||||
result = self.dispatch(['--project-directory', 'alt/', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert json_result == {
|
||||
'services': {
|
||||
'web': {
|
||||
'command': 'echo uwu',
|
||||
'image': 'alpine:3.10.1',
|
||||
'image': 'alpine:3.4',
|
||||
'ports': ['3341/tcp', '4449/tcp']
|
||||
}
|
||||
},
|
||||
@@ -402,7 +342,7 @@ services:
|
||||
def test_config_external_volume_v2(self):
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'volumes' in json_result
|
||||
assert json_result['volumes'] == {
|
||||
'foo': {
|
||||
@@ -418,7 +358,7 @@ services:
|
||||
def test_config_external_volume_v2_x(self):
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'volumes' in json_result
|
||||
assert json_result['volumes'] == {
|
||||
'foo': {
|
||||
@@ -434,7 +374,7 @@ services:
|
||||
def test_config_external_volume_v3_x(self):
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'volumes' in json_result
|
||||
assert json_result['volumes'] == {
|
||||
'foo': {
|
||||
@@ -450,7 +390,7 @@ services:
|
||||
def test_config_external_volume_v3_4(self):
|
||||
self.base_dir = 'tests/fixtures/volumes'
|
||||
result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'volumes' in json_result
|
||||
assert json_result['volumes'] == {
|
||||
'foo': {
|
||||
@@ -466,7 +406,7 @@ services:
|
||||
def test_config_external_network_v3_5(self):
|
||||
self.base_dir = 'tests/fixtures/networks'
|
||||
result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config'])
|
||||
json_result = yaml.safe_load(result.stdout)
|
||||
json_result = yaml.load(result.stdout)
|
||||
assert 'networks' in json_result
|
||||
assert json_result['networks'] == {
|
||||
'foo': {
|
||||
@@ -482,7 +422,7 @@ services:
|
||||
def test_config_v1(self):
|
||||
self.base_dir = 'tests/fixtures/v1-config'
|
||||
result = self.dispatch(['config'])
|
||||
assert yaml.safe_load(result.stdout) == {
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.1',
|
||||
'services': {
|
||||
'net': {
|
||||
@@ -507,7 +447,7 @@ services:
|
||||
self.base_dir = 'tests/fixtures/v3-full'
|
||||
result = self.dispatch(['config'])
|
||||
|
||||
assert yaml.safe_load(result.stdout) == {
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '3.5',
|
||||
'volumes': {
|
||||
'foobar': {
|
||||
@@ -584,23 +524,24 @@ services:
|
||||
self.base_dir = 'tests/fixtures/compatibility-mode'
|
||||
result = self.dispatch(['--compatibility', 'config'])
|
||||
|
||||
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_config_compatibility_mode_from_env(self):
|
||||
self.base_dir = 'tests/fixtures/compatibility-mode'
|
||||
os.environ['COMPOSE_COMPATIBILITY'] = 'true'
|
||||
result = self.dispatch(['config'])
|
||||
|
||||
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_config_compatibility_mode_from_env_and_option_precedence(self):
|
||||
self.base_dir = 'tests/fixtures/compatibility-mode'
|
||||
os.environ['COMPOSE_COMPATIBILITY'] = 'false'
|
||||
result = self.dispatch(['--compatibility', 'config'])
|
||||
|
||||
assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT
|
||||
assert yaml.load(result.stdout) == {
|
||||
'version': '2.3',
|
||||
'volumes': {'foo': {'driver': 'default'}},
|
||||
'networks': {'bar': {}},
|
||||
'services': {
|
||||
'foo': {
|
||||
'command': '/bin/true',
|
||||
'image': 'alpine:3.7',
|
||||
'scale': 3,
|
||||
'restart': 'always:7',
|
||||
'mem_limit': '300M',
|
||||
'mem_reservation': '100M',
|
||||
'cpus': 0.7,
|
||||
'volumes': ['foo:/bar:rw'],
|
||||
'networks': {'bar': None},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def test_ps(self):
|
||||
self.project.get_service('simple').create_container()
|
||||
@@ -675,7 +616,7 @@ services:
|
||||
def test_pull_with_digest(self):
|
||||
result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
|
||||
|
||||
assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
|
||||
assert 'Pulling simple (busybox:latest)...' in result.stderr
|
||||
assert ('Pulling digest (busybox@'
|
||||
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
|
||||
'04ee8502d)...') in result.stderr
|
||||
@@ -686,7 +627,7 @@ services:
|
||||
'pull', '--ignore-pull-failures', '--no-parallel']
|
||||
)
|
||||
|
||||
assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
|
||||
assert 'Pulling simple (busybox:latest)...' in result.stderr
|
||||
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
|
||||
assert ('repository nonexisting-image not found' in result.stderr or
|
||||
'image library/nonexisting-image:latest not found' in result.stderr or
|
||||
@@ -713,14 +654,6 @@ services:
|
||||
result.stderr
|
||||
)
|
||||
|
||||
def test_pull_can_build(self):
|
||||
result = self.dispatch([
|
||||
'-f', 'can-build-pull-failures.yml', 'pull'],
|
||||
returncode=0
|
||||
)
|
||||
assert 'Some service image(s) must be built from source' in result.stderr
|
||||
assert 'docker-compose build can_build' in result.stderr
|
||||
|
||||
def test_pull_with_no_deps(self):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
result = self.dispatch(['pull', '--no-parallel', 'web'])
|
||||
@@ -814,27 +747,6 @@ services:
|
||||
]
|
||||
assert not containers
|
||||
|
||||
@pytest.mark.xfail(True, reason='Flaky on local')
|
||||
def test_build_rm(self):
|
||||
containers = [
|
||||
Container.from_ps(self.project.client, c)
|
||||
for c in self.project.client.containers(all=True)
|
||||
]
|
||||
|
||||
assert not containers
|
||||
|
||||
self.base_dir = 'tests/fixtures/simple-dockerfile'
|
||||
self.dispatch(['build', '--no-rm', 'simple'], returncode=0)
|
||||
|
||||
containers = [
|
||||
Container.from_ps(self.project.client, c)
|
||||
for c in self.project.client.containers(all=True)
|
||||
]
|
||||
assert containers
|
||||
|
||||
for c in self.project.client.containers(all=True):
|
||||
self.addCleanup(self.project.client.remove_container, c, force=True)
|
||||
|
||||
def test_build_shm_size_build_option(self):
|
||||
pull_busybox(self.client)
|
||||
self.base_dir = 'tests/fixtures/build-shm-size'
|
||||
@@ -874,6 +786,32 @@ services:
|
||||
)
|
||||
assert 'Favorite Touhou Character: hong.meiling' in result.stdout
|
||||
|
||||
def test_bundle_with_digests(self):
|
||||
self.base_dir = 'tests/fixtures/bundle-with-digests/'
|
||||
tmpdir = pytest.ensuretemp('cli_test_bundle')
|
||||
self.addCleanup(tmpdir.remove)
|
||||
filename = str(tmpdir.join('example.dab'))
|
||||
|
||||
self.dispatch(['bundle', '--output', filename])
|
||||
with open(filename, 'r') as fh:
|
||||
bundle = json.load(fh)
|
||||
|
||||
assert bundle == {
|
||||
'Version': '0.1',
|
||||
'Services': {
|
||||
'web': {
|
||||
'Image': ('dockercloud/hello-world@sha256:fe79a2cfbd17eefc3'
|
||||
'44fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d'),
|
||||
'Networks': ['default'],
|
||||
},
|
||||
'redis': {
|
||||
'Image': ('redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d'
|
||||
'374b2b7392de1e7d77be26ef8f7b'),
|
||||
'Networks': ['default'],
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def test_build_override_dir(self):
|
||||
self.base_dir = 'tests/fixtures/build-path-override-dir'
|
||||
self.override_dir = os.path.abspath('tests/fixtures')
|
||||
@@ -1170,22 +1108,6 @@ services:
|
||||
]
|
||||
assert len(remote_volumes) > 0
|
||||
|
||||
@v2_only()
|
||||
def test_up_no_start_remove_orphans(self):
|
||||
self.base_dir = 'tests/fixtures/v2-simple'
|
||||
self.dispatch(['up', '--no-start'], None)
|
||||
|
||||
services = self.project.get_services()
|
||||
|
||||
stopped = reduce((lambda prev, next: prev.containers(
|
||||
stopped=True) + next.containers(stopped=True)), services)
|
||||
assert len(stopped) == 2
|
||||
|
||||
self.dispatch(['-f', 'one-container.yml', 'up', '--no-start', '--remove-orphans'], None)
|
||||
stopped2 = reduce((lambda prev, next: prev.containers(
|
||||
stopped=True) + next.containers(stopped=True)), services)
|
||||
assert len(stopped2) == 1
|
||||
|
||||
@v2_only()
|
||||
def test_up_no_ansi(self):
|
||||
self.base_dir = 'tests/fixtures/v2-simple'
|
||||
@@ -1458,7 +1380,7 @@ services:
|
||||
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
|
||||
]
|
||||
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == {volume_with_label}
|
||||
assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
|
||||
assert 'label_key' in volumes[0]['Labels']
|
||||
assert volumes[0]['Labels']['label_key'] == 'label_val'
|
||||
|
||||
@@ -1573,26 +1495,6 @@ services:
|
||||
assert len(db.containers()) == 0
|
||||
assert len(console.containers()) == 0
|
||||
|
||||
def test_up_with_attach_dependencies(self):
|
||||
self.base_dir = 'tests/fixtures/echo-services-dependencies'
|
||||
result = self.dispatch(['up', '--attach-dependencies', '--no-color', 'simple'], None)
|
||||
simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
|
||||
another_name = self.project.get_service('another').containers(
|
||||
stopped=True
|
||||
)[0].name_without_project
|
||||
|
||||
assert '{} | simple'.format(simple_name) in result.stdout
|
||||
assert '{} | another'.format(another_name) in result.stdout
|
||||
|
||||
def test_up_handles_aborted_dependencies(self):
|
||||
self.base_dir = 'tests/fixtures/abort-on-container-exit-dependencies'
|
||||
proc = start_process(
|
||||
self.base_dir,
|
||||
['up', 'simple', '--attach-dependencies', '--abort-on-container-exit'])
|
||||
wait_on_condition(ContainerCountCondition(self.project, 0))
|
||||
proc.wait()
|
||||
assert proc.returncode == 1
|
||||
|
||||
def test_up_with_force_recreate(self):
|
||||
self.dispatch(['up', '-d'], None)
|
||||
service = self.project.get_service('simple')
|
||||
@@ -1713,17 +1615,6 @@ services:
|
||||
assert stderr == ""
|
||||
assert stdout == "/\n"
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_exec_novalue_var_dotenv_file(self):
|
||||
os.environ['MYVAR'] = 'SUCCESS'
|
||||
self.base_dir = 'tests/fixtures/exec-novalue-var'
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(self.project.containers()) == 1
|
||||
|
||||
stdout, stderr = self.dispatch(['exec', '-T', 'nginx', 'env'])
|
||||
assert 'CHECK_VAR=SUCCESS' in stdout
|
||||
assert not stderr
|
||||
|
||||
def test_exec_detach_long_form(self):
|
||||
self.base_dir = 'tests/fixtures/links-composefile'
|
||||
self.dispatch(['up', '--detach', 'console'])
|
||||
@@ -2154,7 +2045,7 @@ services:
|
||||
for _, config in networks.items():
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - {container.short_id}
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
@v2_only()
|
||||
@@ -2174,7 +2065,7 @@ services:
|
||||
for _, config in networks.items():
|
||||
# TODO: once we drop support for API <1.24, this can be changed to:
|
||||
# assert config['Aliases'] == [container.short_id]
|
||||
aliases = set(config['Aliases'] or []) - {container.short_id}
|
||||
aliases = set(config['Aliases'] or []) - set([container.short_id])
|
||||
assert not aliases
|
||||
|
||||
assert self.lookup(container, 'app')
|
||||
@@ -2410,7 +2301,6 @@ services:
|
||||
assert 'another' in result.stdout
|
||||
assert 'exited with code 0' in result.stdout
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_follow_logs_from_new_containers(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
self.dispatch(['up', '-d', 'simple'])
|
||||
@@ -2437,7 +2327,6 @@ services:
|
||||
assert '{} exited with code 0'.format(another_name) in result.stdout
|
||||
assert '{} exited with code 137'.format(simple_name) in result.stdout
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_follow_logs_from_restarted_containers(self):
|
||||
self.base_dir = 'tests/fixtures/logs-restart-composefile'
|
||||
proc = start_process(self.base_dir, ['up'])
|
||||
@@ -2458,7 +2347,6 @@ services:
|
||||
) == 3
|
||||
assert result.stdout.count('world') == 3
|
||||
|
||||
@pytest.mark.skip(reason="race condition between up and logs")
|
||||
def test_logs_default(self):
|
||||
self.base_dir = 'tests/fixtures/logs-composefile'
|
||||
self.dispatch(['up', '-d'])
|
||||
@@ -2585,12 +2473,10 @@ services:
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'worker=1'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3'])
|
||||
assert len(project.get_service('web').containers()) == 3
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 1
|
||||
|
||||
def test_up_scale_scale_down(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
@@ -2599,26 +2485,22 @@ services:
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=1'])
|
||||
assert len(project.get_service('web').containers()) == 1
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_up_scale_reset(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
project = self.project
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3', '--scale', 'worker=3'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
|
||||
assert len(project.get_service('web').containers()) == 3
|
||||
assert len(project.get_service('db').containers()) == 3
|
||||
assert len(project.get_service('worker').containers()) == 3
|
||||
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_up_scale_to_zero(self):
|
||||
self.base_dir = 'tests/fixtures/scale'
|
||||
@@ -2627,12 +2509,10 @@ services:
|
||||
self.dispatch(['up', '-d'])
|
||||
assert len(project.get_service('web').containers()) == 2
|
||||
assert len(project.get_service('db').containers()) == 1
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0', '--scale', 'worker=0'])
|
||||
self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
|
||||
assert len(project.get_service('web').containers()) == 0
|
||||
assert len(project.get_service('db').containers()) == 0
|
||||
assert len(project.get_service('worker').containers()) == 0
|
||||
|
||||
def test_port(self):
|
||||
self.base_dir = 'tests/fixtures/ports-composefile'
|
||||
@@ -2784,7 +2664,7 @@ services:
|
||||
self.base_dir = 'tests/fixtures/extends'
|
||||
self.dispatch(['up', '-d'], None)
|
||||
|
||||
assert set([s.name for s in self.project.services]) == {'mydb', 'myweb'}
|
||||
assert set([s.name for s in self.project.services]) == set(['mydb', 'myweb'])
|
||||
|
||||
# Sort by name so we get [db, web]
|
||||
containers = sorted(
|
||||
@@ -2796,9 +2676,15 @@ services:
|
||||
web = containers[1]
|
||||
db_name = containers[0].name_without_project
|
||||
|
||||
assert set(get_links(web)) == {'db', db_name, 'extends_{}'.format(db_name)}
|
||||
assert set(get_links(web)) == set(
|
||||
['db', db_name, 'extends_{}'.format(db_name)]
|
||||
)
|
||||
|
||||
expected_env = {"FOO=1", "BAR=2", "BAZ=2"}
|
||||
expected_env = set([
|
||||
"FOO=1",
|
||||
"BAR=2",
|
||||
"BAZ=2",
|
||||
])
|
||||
assert expected_env <= set(web.get('Config.Env'))
|
||||
|
||||
def test_top_services_not_running(self):
|
||||
@@ -2853,8 +2739,8 @@ services:
|
||||
result = self.dispatch(['images'])
|
||||
|
||||
assert 'busybox' in result.stdout
|
||||
assert '_another_1' in result.stdout
|
||||
assert '_simple_1' in result.stdout
|
||||
assert 'multiple-composefiles_another_1' in result.stdout
|
||||
assert 'multiple-composefiles_simple_1' in result.stdout
|
||||
|
||||
@mock.patch.dict(os.environ)
|
||||
def test_images_tagless_image(self):
|
||||
@@ -2902,4 +2788,4 @@ services:
|
||||
|
||||
assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None
|
||||
assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None
|
||||
assert re.search(r'foo3.+test[ \t]+latest', result.stdout) is not None
|
||||
assert re.search(r'foo3.+_foo3[ \t]+latest', result.stdout) is not None
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
from docker import ContextAPI
|
||||
|
||||
from tests.acceptance.cli_test import dispatch
|
||||
|
||||
|
||||
class ContextTestCase(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.docker_dir = os.path.join(os.environ.get("HOME", "/tmp"), '.docker')
|
||||
if not os.path.exists(cls.docker_dir):
|
||||
os.makedirs(cls.docker_dir)
|
||||
f = open(os.path.join(cls.docker_dir, "config.json"), "w")
|
||||
f.write("{}")
|
||||
f.close()
|
||||
cls.docker_config = os.path.join(cls.docker_dir, "config.json")
|
||||
os.environ['DOCKER_CONFIG'] = cls.docker_config
|
||||
ContextAPI.create_context("testcontext", host="tcp://doesnotexist:8000")
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
shutil.rmtree(cls.docker_dir, ignore_errors=True)
|
||||
|
||||
def setUp(self):
|
||||
self.base_dir = 'tests/fixtures/simple-composefile'
|
||||
self.override_dir = None
|
||||
|
||||
def dispatch(self, options, project_options=None, returncode=0, stdin=None):
|
||||
return dispatch(self.base_dir, options, project_options, returncode, stdin)
|
||||
|
||||
def test_help(self):
|
||||
result = self.dispatch(['help'], returncode=0)
|
||||
assert '-c, --context NAME' in result.stdout
|
||||
|
||||
def test_fail_on_both_host_and_context_opt(self):
|
||||
result = self.dispatch(['-H', 'unix://', '-c', 'default', 'up'], returncode=1)
|
||||
assert '-H, --host and -c, --context are mutually exclusive' in result.stderr
|
||||
|
||||
def test_fail_run_on_inexistent_context(self):
|
||||
result = self.dispatch(['-c', 'testcontext', 'up', '-d'], returncode=1)
|
||||
assert "Couldn't connect to Docker daemon" in result.stderr
|
||||
@@ -1,243 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
import tests.acceptance.cli_test
|
||||
|
||||
# FIXME Skipping all the acceptance tests when in `--conformity`
|
||||
non_conformity_tests = [
|
||||
"test_build_failed",
|
||||
"test_build_failed_forcerm",
|
||||
"test_build_log_level",
|
||||
"test_build_memory_build_option",
|
||||
"test_build_no_cache",
|
||||
"test_build_no_cache_pull",
|
||||
"test_build_override_dir",
|
||||
"test_build_override_dir_invalid_path",
|
||||
"test_build_parallel",
|
||||
"test_build_plain",
|
||||
"test_build_pull",
|
||||
"test_build_rm",
|
||||
"test_build_shm_size_build_option",
|
||||
"test_build_with_buildarg_cli_override",
|
||||
"test_build_with_buildarg_from_compose_file",
|
||||
"test_build_with_buildarg_old_api_version",
|
||||
"test_config_compatibility_mode",
|
||||
"test_config_compatibility_mode_from_env",
|
||||
"test_config_compatibility_mode_from_env_and_option_precedence",
|
||||
"test_config_default",
|
||||
"test_config_external_network",
|
||||
"test_config_external_network_v3_5",
|
||||
"test_config_external_volume_v2",
|
||||
"test_config_external_volume_v2_x",
|
||||
"test_config_external_volume_v3_4",
|
||||
"test_config_external_volume_v3_x",
|
||||
"test_config_list_services",
|
||||
"test_config_list_volumes",
|
||||
"test_config_quiet",
|
||||
"test_config_quiet_with_error",
|
||||
"test_config_restart",
|
||||
"test_config_stdin",
|
||||
"test_config_v1",
|
||||
"test_config_v3",
|
||||
"test_config_with_dot_env",
|
||||
"test_config_with_dot_env_and_override_dir",
|
||||
"test_config_with_env_file",
|
||||
"test_config_with_hash_option",
|
||||
"test_create",
|
||||
"test_create_with_force_recreate",
|
||||
"test_create_with_force_recreate_and_no_recreate",
|
||||
"test_create_with_no_recreate",
|
||||
"test_down",
|
||||
"test_down_invalid_rmi_flag",
|
||||
"test_down_signal",
|
||||
"test_down_timeout",
|
||||
"test_env_file_relative_to_compose_file",
|
||||
"test_events_human_readable",
|
||||
"test_events_json",
|
||||
"test_exec_custom_user",
|
||||
"test_exec_detach_long_form",
|
||||
"test_exec_novalue_var_dotenv_file",
|
||||
"test_exec_service_with_environment_overridden",
|
||||
"test_exec_without_tty",
|
||||
"test_exec_workdir",
|
||||
"test_exit_code_from_signal_stop",
|
||||
"test_expanded_port",
|
||||
"test_forward_exitval",
|
||||
"test_help",
|
||||
"test_help_nonexistent",
|
||||
"test_home_and_env_var_in_volume_path",
|
||||
"test_host_not_reachable",
|
||||
"test_host_not_reachable_volumes_from_container",
|
||||
"test_host_not_reachable_volumes_from_container",
|
||||
"test_images",
|
||||
"test_images_default_composefile",
|
||||
"test_images_tagless_image",
|
||||
"test_images_use_service_tag",
|
||||
"test_kill",
|
||||
"test_kill_signal_sigstop",
|
||||
"test_kill_stopped_service",
|
||||
"test_logs_default",
|
||||
"test_logs_follow",
|
||||
"test_logs_follow_logs_from_new_containers",
|
||||
"test_logs_follow_logs_from_restarted_containers",
|
||||
"test_logs_invalid_service_name",
|
||||
"test_logs_on_stopped_containers_exits",
|
||||
"test_logs_tail",
|
||||
"test_logs_timestamps",
|
||||
"test_pause_no_containers",
|
||||
"test_pause_unpause",
|
||||
"test_port",
|
||||
"test_port_with_scale",
|
||||
"test_ps",
|
||||
"test_ps_all",
|
||||
"test_ps_alternate_composefile",
|
||||
"test_ps_default_composefile",
|
||||
"test_ps_services_filter_option",
|
||||
"test_ps_services_filter_status",
|
||||
"test_pull",
|
||||
"test_pull_can_build",
|
||||
"test_pull_with_digest",
|
||||
"test_pull_with_ignore_pull_failures",
|
||||
"test_pull_with_include_deps",
|
||||
"test_pull_with_no_deps",
|
||||
"test_pull_with_parallel_failure",
|
||||
"test_pull_with_quiet",
|
||||
"test_quiet_build",
|
||||
"test_restart",
|
||||
"test_restart_no_containers",
|
||||
"test_restart_stopped_container",
|
||||
"test_rm",
|
||||
"test_rm_all",
|
||||
"test_rm_stop",
|
||||
"test_run_detached_connects_to_network",
|
||||
"test_run_does_not_recreate_linked_containers",
|
||||
"test_run_env_values_from_system",
|
||||
"test_run_handles_sighup",
|
||||
"test_run_handles_sigint",
|
||||
"test_run_handles_sigterm",
|
||||
"test_run_interactive_connects_to_network",
|
||||
"test_run_label_flag",
|
||||
"test_run_one_off_with_multiple_volumes",
|
||||
"test_run_one_off_with_volume",
|
||||
"test_run_one_off_with_volume_merge",
|
||||
"test_run_rm",
|
||||
"test_run_service_with_compose_file_entrypoint",
|
||||
"test_run_service_with_compose_file_entrypoint_and_command_overridden",
|
||||
"test_run_service_with_compose_file_entrypoint_and_empty_string_command",
|
||||
"test_run_service_with_compose_file_entrypoint_overridden",
|
||||
"test_run_service_with_dependencies",
|
||||
"test_run_service_with_dockerfile_entrypoint",
|
||||
"test_run_service_with_dockerfile_entrypoint_and_command_overridden",
|
||||
"test_run_service_with_dockerfile_entrypoint_overridden",
|
||||
"test_run_service_with_environment_overridden",
|
||||
"test_run_service_with_explicitly_mapped_ip_ports",
|
||||
"test_run_service_with_explicitly_mapped_ports",
|
||||
"test_run_service_with_links",
|
||||
"test_run_service_with_map_ports",
|
||||
"test_run_service_with_scaled_dependencies",
|
||||
"test_run_service_with_unset_entrypoint",
|
||||
"test_run_service_with_use_aliases",
|
||||
"test_run_service_with_user_overridden",
|
||||
"test_run_service_with_user_overridden_short_form",
|
||||
"test_run_service_with_workdir_overridden",
|
||||
"test_run_service_with_workdir_overridden_short_form",
|
||||
"test_run_service_without_links",
|
||||
"test_run_service_without_map_ports",
|
||||
"test_run_unicode_env_values_from_system",
|
||||
"test_run_with_custom_name",
|
||||
"test_run_with_expose_ports",
|
||||
"test_run_with_no_deps",
|
||||
"test_run_without_command",
|
||||
"test_scale",
|
||||
"test_scale_v2_2",
|
||||
"test_shorthand_host_opt",
|
||||
"test_shorthand_host_opt_interactive",
|
||||
"test_start_no_containers",
|
||||
"test_stop",
|
||||
"test_stop_signal",
|
||||
"test_top_processes_running",
|
||||
"test_top_services_not_running",
|
||||
"test_top_services_running",
|
||||
"test_unpause_no_containers",
|
||||
"test_up",
|
||||
"test_up_attached",
|
||||
"test_up_detached",
|
||||
"test_up_detached_long_form",
|
||||
"test_up_external_networks",
|
||||
"test_up_handles_abort_on_container_exit",
|
||||
"test_up_handles_abort_on_container_exit_code",
|
||||
"test_up_handles_aborted_dependencies",
|
||||
"test_up_handles_force_shutdown",
|
||||
"test_up_handles_sigint",
|
||||
"test_up_handles_sigterm",
|
||||
"test_up_logging",
|
||||
"test_up_logging_legacy",
|
||||
"test_up_missing_network",
|
||||
"test_up_no_ansi",
|
||||
"test_up_no_services",
|
||||
"test_up_no_start",
|
||||
"test_up_no_start_remove_orphans",
|
||||
"test_up_scale_reset",
|
||||
"test_up_scale_scale_down",
|
||||
"test_up_scale_scale_up",
|
||||
"test_up_scale_to_zero",
|
||||
"test_up_with_attach_dependencies",
|
||||
"test_up_with_default_network_config",
|
||||
"test_up_with_default_override_file",
|
||||
"test_up_with_duplicate_override_yaml_files",
|
||||
"test_up_with_extends",
|
||||
"test_up_with_external_default_network",
|
||||
"test_up_with_force_recreate",
|
||||
"test_up_with_force_recreate_and_no_recreate",
|
||||
"test_up_with_healthcheck",
|
||||
"test_up_with_ignore_remove_orphans",
|
||||
"test_up_with_links_v1",
|
||||
"test_up_with_multiple_files",
|
||||
"test_up_with_net_is_invalid",
|
||||
"test_up_with_net_v1",
|
||||
"test_up_with_network_aliases",
|
||||
"test_up_with_network_internal",
|
||||
"test_up_with_network_labels",
|
||||
"test_up_with_network_mode",
|
||||
"test_up_with_network_static_addresses",
|
||||
"test_up_with_networks",
|
||||
"test_up_with_no_deps",
|
||||
"test_up_with_no_recreate",
|
||||
"test_up_with_override_yaml",
|
||||
"test_up_with_pid_mode",
|
||||
"test_up_with_timeout",
|
||||
"test_up_with_volume_labels",
|
||||
"test_fail_on_both_host_and_context_opt",
|
||||
"test_fail_run_on_inexistent_context",
|
||||
]
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--conformity",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Only runs tests that are not black listed as non conformity test. "
|
||||
"The conformity tests check for compatibility with the Compose spec."
|
||||
)
|
||||
parser.addoption(
|
||||
"--binary",
|
||||
default=tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE,
|
||||
help="Forces the execution of a binary in the PATH. Default is `docker-compose`."
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
if not config.getoption("--conformity"):
|
||||
return
|
||||
if config.getoption("--binary"):
|
||||
tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE = config.getoption("--binary")
|
||||
|
||||
print("Binary -> {}".format(tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE))
|
||||
skip_non_conformity = pytest.mark.skip(reason="skipping because that's not a conformity test")
|
||||
for item in items:
|
||||
if item.name in non_conformity_tests:
|
||||
print("Skipping '{}' when running in compatibility mode".format(item.name))
|
||||
item.add_marker(skip_non_conformity)
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: top
|
||||
another:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: top
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: top
|
||||
another:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: ls .
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
simple:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: top
|
||||
another:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: ls /thecakeisalie
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
version: "2.0"
|
||||
services:
|
||||
simple:
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: top
|
||||
depends_on:
|
||||
- another
|
||||
another:
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: ls /thecakeisalie
|
||||
2
tests/fixtures/build-args/Dockerfile
vendored
2
tests/fixtures/build-args/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
ARG favorite_th_character
|
||||
RUN echo "Favorite Touhou Character: ${favorite_th_character}"
|
||||
|
||||
2
tests/fixtures/build-ctx/Dockerfile
vendored
2
tests/fixtures/build-ctx/Dockerfile
vendored
@@ -1,3 +1,3 @@
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
CMD echo "success"
|
||||
|
||||
2
tests/fixtures/build-memory/Dockerfile
vendored
2
tests/fixtures/build-memory/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox
|
||||
|
||||
# Report the memory (through the size of the group memory)
|
||||
RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox:latest
|
||||
RUN echo a
|
||||
CMD top
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox:latest
|
||||
RUN echo b
|
||||
CMD top
|
||||
|
||||
9
tests/fixtures/bundle-with-digests/docker-compose.yml
vendored
Normal file
9
tests/fixtures/bundle-with-digests/docker-compose.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
version: '2.0'
|
||||
|
||||
services:
|
||||
web:
|
||||
image: dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d
|
||||
|
||||
redis:
|
||||
image: redis@sha256:a84cb8f53a70e19f61ff2e1d5e73fb7ae62d374b2b7392de1e7d77be26ef8f7b
|
||||
@@ -1,7 +1,7 @@
|
||||
version: '3.5'
|
||||
services:
|
||||
foo:
|
||||
image: alpine:3.10.1
|
||||
image: alpine:3.7
|
||||
command: /bin/true
|
||||
deploy:
|
||||
replicas: 3
|
||||
|
||||
4
tests/fixtures/default-env-file/.env2
vendored
4
tests/fixtures/default-env-file/.env2
vendored
@@ -1,4 +0,0 @@
|
||||
IMAGE=alpine:latest
|
||||
COMMAND=false
|
||||
PORT1=5644
|
||||
PORT2=9998
|
||||
2
tests/fixtures/default-env-file/alt/.env
vendored
2
tests/fixtures/default-env-file/alt/.env
vendored
@@ -1,4 +1,4 @@
|
||||
IMAGE=alpine:3.10.1
|
||||
IMAGE=alpine:3.4
|
||||
COMMAND=echo uwu
|
||||
PORT1=3341
|
||||
PORT2=4449
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM busybox:1.31.0-uclibc
|
||||
FROM busybox:latest
|
||||
LABEL com.docker.compose.test_image=true
|
||||
VOLUME /data
|
||||
CMD top
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
web:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: "sleep 100"
|
||||
links:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: busybox:1.31.0-uclibc
|
||||
image: busybox:latest
|
||||
command: "sleep 200"
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
version: "2.0"
|
||||
services:
|
||||
simple:
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: echo simple
|
||||
depends_on:
|
||||
- another
|
||||
another:
|
||||
image: busybox:1.31.0-uclibc
|
||||
command: echo another
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user