mirror of
https://github.com/docker/compose.git
synced 2026-02-10 02:29:25 +08:00
Compare commits
195 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00bd108aec | ||
|
|
792afb8d13 | ||
|
|
150449bbd2 | ||
|
|
8d0df18762 | ||
|
|
5b53f8e47f | ||
|
|
c5fef61383 | ||
|
|
ce3cb2b00c | ||
|
|
d9e73db8e6 | ||
|
|
d6b4d1c755 | ||
|
|
0baf24a269 | ||
|
|
0511b0c2b8 | ||
|
|
5bbdf3d84a | ||
|
|
52103cce74 | ||
|
|
020b57ca31 | ||
|
|
bfa54081d4 | ||
|
|
0be8e4a676 | ||
|
|
fd8ab2f7ac | ||
|
|
b406b393bf | ||
|
|
0a9d1277c5 | ||
|
|
c350f80d4b | ||
|
|
8a4095b507 | ||
|
|
0345461412 | ||
|
|
80856eacaf | ||
|
|
d7b1972d5e | ||
|
|
7c42776770 | ||
|
|
3b0742fd57 | ||
|
|
efd44de1b7 | ||
|
|
bdb3f91eb4 | ||
|
|
f94cb49062 | ||
|
|
e7ed070690 | ||
|
|
8a1bf5d28b | ||
|
|
7ef392004f | ||
|
|
f34f5b4d26 | ||
|
|
b0484700da | ||
|
|
f65fd02383 | ||
|
|
cf8dc46560 | ||
|
|
2cfbe63533 | ||
|
|
8318f66330 | ||
|
|
cb17c3c8a6 | ||
|
|
9174a99d27 | ||
|
|
4eb43c53fa | ||
|
|
150b88ab5d | ||
|
|
5159058c7e | ||
|
|
1ae191a936 | ||
|
|
3b2f3cdce3 | ||
|
|
47778f8b77 | ||
|
|
7d88edaf24 | ||
|
|
636c13f818 | ||
|
|
5a072b1ad5 | ||
|
|
ddceb1ac9d | ||
|
|
d48f28c72c | ||
|
|
2d16a05afa | ||
|
|
bb94ea034e | ||
|
|
0938c7e96f | ||
|
|
f429ee958a | ||
|
|
e9ded2c518 | ||
|
|
54e6e0bd8f | ||
|
|
3bc871e64b | ||
|
|
6ff15d9472 | ||
|
|
49bc0603e3 | ||
|
|
ce8a09b53f | ||
|
|
3dc8734897 | ||
|
|
852e192820 | ||
|
|
d9e7859664 | ||
|
|
e28b223650 | ||
|
|
1964693074 | ||
|
|
dc74e6aa0e | ||
|
|
b182cf6850 | ||
|
|
f330b24632 | ||
|
|
8339269e13 | ||
|
|
ee6aeed84e | ||
|
|
7a9dfa4284 | ||
|
|
29daae3d6e | ||
|
|
8dea7b5cae | ||
|
|
bc6ad2e4a4 | ||
|
|
e6a7694b8d | ||
|
|
46d936c750 | ||
|
|
15bc7850bb | ||
|
|
8a64ab56a0 | ||
|
|
1178c51e6a | ||
|
|
3b3fd3e56c | ||
|
|
b1e10f559e | ||
|
|
baea5a48f5 | ||
|
|
cb3a6ce52b | ||
|
|
28f3802a07 | ||
|
|
fd0e0a2cbd | ||
|
|
e90df62bb0 | ||
|
|
b0af2deb2b | ||
|
|
be22bc735a | ||
|
|
b5f5e27597 | ||
|
|
b1334b8dfc | ||
|
|
25ca75db4d | ||
|
|
827e864ed0 | ||
|
|
28301fb1a4 | ||
|
|
fa3e16c66b | ||
|
|
edd76bfd70 | ||
|
|
c496c23071 | ||
|
|
02284378bf | ||
|
|
10b290e682 | ||
|
|
3906a7a67c | ||
|
|
83671db3dd | ||
|
|
1a41678c58 | ||
|
|
035276e027 | ||
|
|
db24023884 | ||
|
|
c48f542962 | ||
|
|
42dc7a6a87 | ||
|
|
30b3b47383 | ||
|
|
061b52da9a | ||
|
|
04aa155878 | ||
|
|
2d4f8d31fc | ||
|
|
e1f8603a62 | ||
|
|
a2ce602f6c | ||
|
|
401334e03f | ||
|
|
93cf2b921a | ||
|
|
83b2433a27 | ||
|
|
c8d06137b5 | ||
|
|
c61b8aa5ac | ||
|
|
ff3984e609 | ||
|
|
2efea2e9f5 | ||
|
|
6a3a95c4a8 | ||
|
|
fee8a1c6c6 | ||
|
|
7ffe83dc95 | ||
|
|
d20c2551f2 | ||
|
|
0e9a5b6b78 | ||
|
|
6887a3fc3e | ||
|
|
586fe87f98 | ||
|
|
6d66130266 | ||
|
|
0f83a8630e | ||
|
|
26cb941f79 | ||
|
|
43783d36e2 | ||
|
|
08e6bfc859 | ||
|
|
7a870e2449 | ||
|
|
8cd8f08d77 | ||
|
|
cfe91becc7 | ||
|
|
9384e5f4d7 | ||
|
|
68bd0eb523 | ||
|
|
3fe665b93d | ||
|
|
508d71c5df | ||
|
|
58c5ea8217 | ||
|
|
ec31d3c2ac | ||
|
|
599723f890 | ||
|
|
0a9b9fd8fe | ||
|
|
3c8a56dbf3 | ||
|
|
e63ab14b1e | ||
|
|
32cf776ecd | ||
|
|
955784c406 | ||
|
|
2d22c2b5ce | ||
|
|
852c9e80b4 | ||
|
|
37850f7955 | ||
|
|
4bf2fe9fed | ||
|
|
e21a8d6293 | ||
|
|
f8b6459403 | ||
|
|
be6c9565e3 | ||
|
|
60fe97416c | ||
|
|
629c9f62e9 | ||
|
|
7c3fe359b7 | ||
|
|
d2aa15c06e | ||
|
|
6530880361 | ||
|
|
1bd8a773a7 | ||
|
|
fed8ef6b79 | ||
|
|
419fcdd6c8 | ||
|
|
65b714c108 | ||
|
|
44dd232e97 | ||
|
|
83ad5e97b7 | ||
|
|
b0a35ccc98 | ||
|
|
f5480ee3ed | ||
|
|
b4924dee83 | ||
|
|
2ca8ab914a | ||
|
|
3ec8c60657 | ||
|
|
06ec06472f | ||
|
|
466e1d3197 | ||
|
|
0d6b99e6f9 | ||
|
|
01d91c490c | ||
|
|
6f6e1635fd | ||
|
|
3d05a1becf | ||
|
|
42cd961d58 | ||
|
|
d15fcc6444 | ||
|
|
22c2471a08 | ||
|
|
29a1cc452d | ||
|
|
b05a94fd66 | ||
|
|
15cad92b61 | ||
|
|
c7afc6188b | ||
|
|
ca19b7fcc9 | ||
|
|
93bd27a0cc | ||
|
|
68c462e607 | ||
|
|
916aac6c27 | ||
|
|
eafcd1b35e | ||
|
|
1e399c271a | ||
|
|
544b579cb0 | ||
|
|
daa6bec80a | ||
|
|
34bd41cc0c | ||
|
|
70953b18c0 | ||
|
|
cfe1a860ff | ||
|
|
4dcda432cf | ||
|
|
5c2a885647 |
18
.github/dependabot.yml
vendored
18
.github/dependabot.yml
vendored
@@ -5,14 +5,20 @@ updates:
|
||||
schedule:
|
||||
interval: daily
|
||||
ignore:
|
||||
# docker/buildx + docker/cli + docker/docker require coordination to
|
||||
# ensure compatibility between them
|
||||
# docker + moby deps require coordination
|
||||
- dependency-name: "github.com/docker/buildx"
|
||||
# buildx is still 0.x
|
||||
update-types: ["version-update:semver-minor"]
|
||||
- dependency-name: "github.com/moby/buildkit"
|
||||
# buildkit is still 0.x
|
||||
update-types: [ "version-update:semver-minor" ]
|
||||
- dependency-name: "github.com/docker/cli"
|
||||
# docker/cli uses CalVer rather than SemVer
|
||||
update-types: ["version-update:semver-major", "version-update:semver-minor"]
|
||||
update-types: ["version-update:semver-major"]
|
||||
- dependency-name: "github.com/docker/docker"
|
||||
# docker/docker uses CalVer rather than SemVer
|
||||
update-types: ["version-update:semver-major", "version-update:semver-minor"]
|
||||
update-types: ["version-update:semver-major"]
|
||||
- dependency-name: "github.com/containerd/containerd"
|
||||
# containerd major/minor must be kept in sync with moby
|
||||
update-types: [ "version-update:semver-major", "version-update:semver-minor" ]
|
||||
- dependency-name: "go.opentelemetry.io/otel/*"
|
||||
# OTEL is v1.x but has some parts that are not API stable yet
|
||||
update-types: [ "version-update:semver-major", "version-update:semver-minor"]
|
||||
|
||||
82
.github/workflows/ci.yml
vendored
82
.github/workflows/ci.yml
vendored
@@ -7,7 +7,7 @@ concurrency:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v2'
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
@@ -19,7 +19,6 @@ on:
|
||||
default: "false"
|
||||
|
||||
env:
|
||||
DESTDIR: "./bin"
|
||||
DOCKER_CLI_VERSION: "20.10.17"
|
||||
|
||||
permissions:
|
||||
@@ -103,7 +102,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: compose
|
||||
path: ${{ env.DESTDIR }}/*
|
||||
path: ./bin/release/*
|
||||
if-no-files-found: error
|
||||
|
||||
test:
|
||||
@@ -124,13 +123,15 @@ jobs:
|
||||
*.cache-from=type=gha,scope=test
|
||||
*.cache-to=type=gha,scope=test
|
||||
-
|
||||
name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
name: Gather coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-data-unit
|
||||
path: bin/coverage/unit/
|
||||
if-no-files-found: error
|
||||
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DESTDIR: "./bin/build"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -179,11 +180,17 @@ jobs:
|
||||
name: Test plugin mode
|
||||
if: ${{ matrix.mode == 'plugin' }}
|
||||
run: |
|
||||
rm -rf ./covdatafiles
|
||||
mkdir ./covdatafiles
|
||||
make e2e-compose GOCOVERDIR=covdatafiles
|
||||
go tool covdata textfmt -i=covdatafiles -o=coverage.out
|
||||
|
||||
rm -rf ./bin/coverage/e2e
|
||||
mkdir -p ./bin/coverage/e2e
|
||||
make e2e-compose GOCOVERDIR=bin/coverage/e2e TEST_FLAGS="-v"
|
||||
-
|
||||
name: Gather coverage data
|
||||
if: ${{ matrix.mode == 'plugin' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-data-e2e
|
||||
path: bin/coverage/e2e/
|
||||
if-no-files-found: error
|
||||
-
|
||||
name: Test standalone mode
|
||||
if: ${{ matrix.mode == 'standalone' }}
|
||||
@@ -196,9 +203,44 @@ jobs:
|
||||
if: ${{ matrix.mode == 'cucumber'}}
|
||||
run: |
|
||||
make test-cucumber
|
||||
-
|
||||
name: Upload coverage to Codecov
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- test
|
||||
- e2e
|
||||
steps:
|
||||
# codecov won't process the report without the source code available
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Download unit test coverage
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: coverage-data-unit
|
||||
path: coverage/unit
|
||||
- name: Download E2E test coverage
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: coverage-data-e2e
|
||||
path: coverage/e2e
|
||||
- name: Merge coverage reports
|
||||
run: |
|
||||
go tool covdata textfmt -i=./coverage/unit,./coverage/e2e -o ./coverage.txt
|
||||
- name: Store coverage report in GitHub Actions
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: go-covdata-txt
|
||||
path: ./coverage.txt
|
||||
if-no-files-found: error
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
|
||||
release:
|
||||
permissions:
|
||||
@@ -216,10 +258,10 @@ jobs:
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: compose
|
||||
path: ${{ env.DESTDIR }}
|
||||
path: bin/release
|
||||
-
|
||||
name: Create checksums
|
||||
working-directory: ${{ env.DESTDIR }}
|
||||
working-directory: bin/release
|
||||
run: |
|
||||
find . -type f -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# \*\./# *#' > $RUNNER_TEMP/checksums.txt
|
||||
shasum -a 256 -U -c $RUNNER_TEMP/checksums.txt
|
||||
@@ -227,21 +269,21 @@ jobs:
|
||||
cat checksums.txt | while read sum file; do echo "$sum $file" > ${file#\*}.sha256; done
|
||||
-
|
||||
name: License
|
||||
run: cp packaging/* ${{ env.DESTDIR }}/
|
||||
run: cp packaging/* bin/release/
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
tree -nh bin/release
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
find bin/release -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: ncipollo/release-action@58ae73b360456532aafd58ee170c045abbeaee37 # v1.10.0
|
||||
with:
|
||||
artifacts: ${{ env.DESTDIR }}/*
|
||||
artifacts: bin/release/*
|
||||
generateReleaseNotes: true
|
||||
draft: true
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
33
.github/workflows/merge.yml
vendored
33
.github/workflows/merge.yml
vendored
@@ -7,7 +7,7 @@ concurrency:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v2'
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
@@ -76,6 +76,8 @@ jobs:
|
||||
|
||||
bin-image:
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
digest: ${{ fromJSON(steps.bake.outputs.metadata).image-cross['containerimage.digest'] }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -107,6 +109,7 @@ jobs:
|
||||
-
|
||||
name: Build and push image
|
||||
uses: docker/bake-action@v2
|
||||
id: bake
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
@@ -118,3 +121,31 @@ jobs:
|
||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||
*.attest=type=sbom
|
||||
*.attest=type=provenance,mode=max,builder-id=https://github.com/${{ env.GITHUB_REPOSITORY }}/actions/runs/${{ env.GITHUB_RUN_ID }}
|
||||
|
||||
desktop-edge-test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: bin-image
|
||||
steps:
|
||||
-
|
||||
name: Generate Token
|
||||
id: generate_token
|
||||
uses: tibdex/github-app-token@v1
|
||||
with:
|
||||
app_id: ${{ vars.DOCKERDESKTOP_APP_ID }}
|
||||
private_key: ${{ secrets.DOCKERDESKTOP_APP_PRIVATEKEY }}
|
||||
repository: docker/${{ secrets.DOCKERDESKTOP_REPO }}
|
||||
-
|
||||
name: Trigger Docker Desktop e2e with edge version
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: ${{ steps.generate_token.outputs.token }}
|
||||
script: |
|
||||
await github.rest.actions.createWorkflowDispatch({
|
||||
owner: 'docker',
|
||||
repo: '${{ secrets.DOCKERDESKTOP_REPO }}',
|
||||
workflow_id: 'compose-edge-integration.yml',
|
||||
ref: 'main',
|
||||
inputs: {
|
||||
"image-tag": "${{ needs.bin-image.outputs.digest }}"
|
||||
}
|
||||
})
|
||||
|
||||
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '44 9 * * 4'
|
||||
push:
|
||||
branches: [ "v2" ]
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
@@ -31,12 +31,11 @@ linters-settings:
|
||||
- name: package-comments
|
||||
disabled: true
|
||||
depguard:
|
||||
list-type: denylist
|
||||
include-go-root: true
|
||||
packages:
|
||||
# The io/ioutil package has been deprecated.
|
||||
# https://go.dev/doc/go1.16#ioutil
|
||||
- io/ioutil
|
||||
rules:
|
||||
all:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: 'io/ioutil package has been deprecated'
|
||||
gomodguard:
|
||||
blocked:
|
||||
versions:
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@@ -15,9 +15,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GO_VERSION=1.20.4
|
||||
ARG GO_VERSION=1.21.0
|
||||
ARG XX_VERSION=1.2.1
|
||||
ARG GOLANGCI_LINT_VERSION=v1.52.2
|
||||
ARG GOLANGCI_LINT_VERSION=v1.53.2
|
||||
ARG ADDLICENSE_VERSION=v1.0.0
|
||||
|
||||
ARG BUILD_TAGS="e2e"
|
||||
@@ -84,13 +84,14 @@ RUN --mount=type=bind,target=. \
|
||||
--mount=type=bind,from=osxcross,src=/osxsdk,target=/xx-sdk \
|
||||
xx-go --wrap && \
|
||||
if [ "$(xx-info os)" == "darwin" ]; then export CGO_ENABLED=1; fi && \
|
||||
make build GO_BUILDTAGS="$BUILD_TAGS" DESTDIR=/usr/bin && \
|
||||
xx-verify --static /usr/bin/docker-compose
|
||||
make build GO_BUILDTAGS="$BUILD_TAGS" DESTDIR=/out && \
|
||||
xx-verify --static /out/docker-compose
|
||||
|
||||
FROM build-base AS lint
|
||||
ARG BUILD_TAGS
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=from=golangci-lint,source=/usr/bin/golangci-lint,target=/usr/bin/golangci-lint \
|
||||
golangci-lint run --build-tags "$BUILD_TAGS" ./...
|
||||
|
||||
@@ -100,11 +101,13 @@ ARG BUILD_TAGS
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go test -tags "$BUILD_TAGS" -v -coverprofile=/tmp/coverage.txt -covermode=atomic $(go list $(TAGS) ./... | grep -vE 'e2e') && \
|
||||
go tool cover -func=/tmp/coverage.txt
|
||||
rm -rf /tmp/coverage && \
|
||||
mkdir -p /tmp/coverage && \
|
||||
go test -tags "$BUILD_TAGS" -v -cover -covermode=atomic $(go list $(TAGS) ./... | grep -vE 'e2e') -args -test.gocoverdir="/tmp/coverage" && \
|
||||
go tool covdata percent -i=/tmp/coverage
|
||||
|
||||
FROM scratch AS test-coverage
|
||||
COPY --from=test /tmp/coverage.txt /coverage.txt
|
||||
COPY --from=test --link /tmp/coverage /
|
||||
|
||||
FROM base AS license-set
|
||||
ARG LICENSE_FILES
|
||||
@@ -127,6 +130,7 @@ FROM base AS docsgen
|
||||
WORKDIR /src
|
||||
RUN --mount=target=. \
|
||||
--mount=target=/root/.cache,type=cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go build -o /out/docsgen ./docs/yaml/main/generate.go
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} alpine AS docs-build
|
||||
@@ -162,11 +166,11 @@ RUN --mount=target=/context \
|
||||
EOT
|
||||
|
||||
FROM scratch AS binary-unix
|
||||
COPY --link --from=build /usr/bin/docker-compose /
|
||||
COPY --link --from=build /out/docker-compose /
|
||||
FROM binary-unix AS binary-darwin
|
||||
FROM binary-unix AS binary-linux
|
||||
FROM scratch AS binary-windows
|
||||
COPY --link --from=build /usr/bin/docker-compose /docker-compose.exe
|
||||
COPY --link --from=build /out/docker-compose /docker-compose.exe
|
||||
FROM binary-$TARGETOS AS binary
|
||||
# enable scanning for this stage
|
||||
ARG BUILDKIT_SBOM_SCAN_STAGE=true
|
||||
|
||||
35
Makefile
35
Makefile
@@ -17,29 +17,18 @@ VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always --tags)
|
||||
|
||||
GO_LDFLAGS ?= -w -X ${PKG}/internal.Version=${VERSION}
|
||||
GO_BUILDTAGS ?= e2e
|
||||
|
||||
DRIVE_PREFIX?=
|
||||
ifeq ($(OS),Windows_NT)
|
||||
DETECTED_OS = Windows
|
||||
DRIVE_PREFIX=C:
|
||||
else
|
||||
DETECTED_OS = $(shell uname -s)
|
||||
endif
|
||||
|
||||
ifeq ($(DETECTED_OS),Linux)
|
||||
MOBY_DOCKER=/usr/bin/docker
|
||||
endif
|
||||
ifeq ($(DETECTED_OS),Darwin)
|
||||
MOBY_DOCKER=/Applications/Docker.app/Contents/Resources/bin/docker
|
||||
endif
|
||||
ifeq ($(DETECTED_OS),Windows)
|
||||
BINARY_EXT=.exe
|
||||
endif
|
||||
|
||||
TEST_COVERAGE_FLAGS = -coverprofile=coverage.out -covermode=atomic
|
||||
ifneq ($(DETECTED_OS),Windows)
|
||||
# go race detector requires gcc on Windows so not used by default
|
||||
# https://github.com/golang/go/issues/27089
|
||||
TEST_COVERAGE_FLAGS += -race
|
||||
endif
|
||||
BUILD_FLAGS?=
|
||||
TEST_FLAGS?=
|
||||
E2E_TEST?=
|
||||
@@ -49,13 +38,23 @@ else
|
||||
endif
|
||||
|
||||
BUILDX_CMD ?= docker buildx
|
||||
DESTDIR ?= ./bin/build
|
||||
|
||||
# DESTDIR overrides the output path for binaries and other artifacts
|
||||
# this is used by docker/docker-ce-packaging for the apt/rpm builds,
|
||||
# so it's important that the resulting binary ends up EXACTLY at the
|
||||
# path $DESTDIR/docker-compose when specified.
|
||||
#
|
||||
# See https://github.com/docker/docker-ce-packaging/blob/e43fbd37e48fde49d907b9195f23b13537521b94/rpm/SPECS/docker-compose-plugin.spec#L47
|
||||
#
|
||||
# By default, all artifacts go to subdirectories under ./bin/ in the
|
||||
# repo root, e.g. ./bin/build, ./bin/coverage, ./bin/release.
|
||||
DESTDIR ?=
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY: build ## Build the compose cli-plugin
|
||||
build:
|
||||
GO111MODULE=on go build $(BUILD_FLAGS) -trimpath -tags "$(GO_BUILDTAGS)" -ldflags "$(GO_LDFLAGS)" -o "$(DESTDIR)/docker-compose$(BINARY_EXT)" ./cmd
|
||||
GO111MODULE=on go build $(BUILD_FLAGS) -trimpath -tags "$(GO_BUILDTAGS)" -ldflags "$(GO_LDFLAGS)" -o "$(or $(DESTDIR),./bin/build)/docker-compose$(BINARY_EXT)" ./cmd
|
||||
|
||||
.PHONY: binary
|
||||
binary:
|
||||
@@ -68,7 +67,7 @@ binary-with-coverage:
|
||||
.PHONY: install
|
||||
install: binary
|
||||
mkdir -p ~/.docker/cli-plugins
|
||||
install bin/build/docker-compose ~/.docker/cli-plugins/docker-compose
|
||||
install $(or $(DESTDIR),./bin/build)/docker-compose ~/.docker/cli-plugins/docker-compose
|
||||
|
||||
.PHONY: e2e-compose
|
||||
e2e-compose: ## Run end to end local tests in plugin mode. Set E2E_TEST=TestName to run a single test
|
||||
@@ -122,8 +121,8 @@ docs: ## generate documentation
|
||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t compose-output.XXXXXXXXXX))
|
||||
$(BUILDX_CMD) bake --set "*.output=type=local,dest=$($@_TMP_OUT)" docs-update
|
||||
rm -rf ./docs/internal
|
||||
cp -R "$($@_TMP_OUT)"/out/* ./docs/
|
||||
rm -rf "$($@_TMP_OUT)"/*
|
||||
cp -R "$(DRIVE_PREFIX)$($@_TMP_OUT)"/out/* ./docs/
|
||||
rm -rf "$(DRIVE_PREFIX)$($@_TMP_OUT)"/*
|
||||
|
||||
.PHONY: validate-docs
|
||||
validate-docs: ## validate the doc does not change
|
||||
|
||||
16
README.md
16
README.md
@@ -6,13 +6,14 @@
|
||||
+ [Linux](#linux)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Contributing](#contributing)
|
||||
- [Legacy](#legacy)
|
||||
# Docker Compose v2
|
||||
|
||||
[](https://github.com/docker/compose/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/docker/compose/v2)
|
||||
[](https://github.com/docker/compose/actions?query=workflow%3Aci)
|
||||
[](https://github.com/docker/compose/actions?query=workflow%3Aci)
|
||||
[](https://goreportcard.com/report/github.com/docker/compose/v2)
|
||||
[](https://codecov.io/gh/docker/compose)
|
||||
[](https://codecov.io/gh/docker/compose)
|
||||
[](https://api.securityscorecards.dev/projects/github.com/docker/compose)
|
||||

|
||||
|
||||
@@ -23,12 +24,6 @@ your application are configured.
|
||||
Once you have a Compose file, you can create and start your application with a
|
||||
single command: `docker compose up`.
|
||||
|
||||
# About update and backward compatibility
|
||||
|
||||
Docker Compose V2 is a major version bump release of Docker Compose. It has been completely rewritten from scratch in Golang (V1 was in Python). The installation instructions for Compose V2 differ from V1. V2 is not a standalone binary anymore, and installation scripts will have to be adjusted. Some commands are different.
|
||||
|
||||
For a smooth transition from legacy docker-compose 1.xx, please consider installing [compose-switch](https://github.com/docker/compose-switch) to translate `docker-compose ...` commands into Compose V2's `docker compose .... `. Also check V2's `--compatibility` flag.
|
||||
|
||||
# Where to get Docker Compose
|
||||
|
||||
### Windows and macOS
|
||||
@@ -85,3 +80,8 @@ Want to help develop Docker Compose? Check out our
|
||||
|
||||
If you find an issue, please report it on the
|
||||
[issue tracker](https://github.com/docker/compose/issues/new/choose).
|
||||
|
||||
Legacy
|
||||
-------------
|
||||
|
||||
The Python version of Compose is available under the `v1` [branch](https://github.com/docker/compose/tree/v1)
|
||||
131
cmd/cmdtrace/cmd_span.go
Normal file
131
cmd/cmdtrace/cmd_span.go
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmdtrace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dockercli "github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
commands "github.com/docker/compose/v2/cmd/compose"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
"github.com/spf13/cobra"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Setup should be called as part of the command's PersistentPreRunE
|
||||
// as soon as possible after initializing the dockerCli.
|
||||
//
|
||||
// It initializes the tracer for the CLI using both auto-detection
|
||||
// from the Docker context metadata as well as standard OTEL_ env
|
||||
// vars, creates a root span for the command, and wraps the actual
|
||||
// command invocation to ensure the span is properly finalized and
|
||||
// exported before exit.
|
||||
func Setup(cmd *cobra.Command, dockerCli command.Cli) error {
|
||||
tracingShutdown, err := tracing.InitTracing(dockerCli)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing tracing: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
ctx, cmdSpan := tracing.Tracer.Start(
|
||||
ctx,
|
||||
"cli/"+strings.Join(commandName(cmd), "-"),
|
||||
)
|
||||
cmd.SetContext(ctx)
|
||||
wrapRunE(cmd, cmdSpan, tracingShutdown)
|
||||
return nil
|
||||
}
|
||||
|
||||
// wrapRunE injects a wrapper function around the command's actual RunE (or Run)
|
||||
// method. This is necessary to capture the command result for reporting as well
|
||||
// as flushing any spans before exit.
|
||||
//
|
||||
// Unfortunately, PersistentPostRun(E) can't be used for this purpose because it
|
||||
// only runs if RunE does _not_ return an error, but this should run unconditionally.
|
||||
func wrapRunE(c *cobra.Command, cmdSpan trace.Span, tracingShutdown tracing.ShutdownFunc) {
|
||||
origRunE := c.RunE
|
||||
if origRunE == nil {
|
||||
origRun := c.Run
|
||||
//nolint:unparam // wrapper function for RunE, always returns nil by design
|
||||
origRunE = func(cmd *cobra.Command, args []string) error {
|
||||
origRun(cmd, args)
|
||||
return nil
|
||||
}
|
||||
c.Run = nil
|
||||
}
|
||||
|
||||
c.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
cmdErr := origRunE(cmd, args)
|
||||
if cmdSpan != nil {
|
||||
if cmdErr != nil && !errors.Is(cmdErr, context.Canceled) {
|
||||
// default exit code is 1 if a more descriptive error
|
||||
// wasn't returned
|
||||
exitCode := 1
|
||||
var statusErr dockercli.StatusError
|
||||
if errors.As(cmdErr, &statusErr) {
|
||||
exitCode = statusErr.StatusCode
|
||||
}
|
||||
cmdSpan.SetStatus(codes.Error, "CLI command returned error")
|
||||
cmdSpan.RecordError(cmdErr, trace.WithAttributes(
|
||||
attribute.Int("exit_code", exitCode),
|
||||
))
|
||||
|
||||
} else {
|
||||
cmdSpan.SetStatus(codes.Ok, "")
|
||||
}
|
||||
cmdSpan.End()
|
||||
}
|
||||
if tracingShutdown != nil {
|
||||
// use background for root context because the cmd's context might have
|
||||
// been canceled already
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
// TODO(milas): add an env var to enable logging from the
|
||||
// OTel components for debugging purposes
|
||||
_ = tracingShutdown(ctx)
|
||||
}
|
||||
return cmdErr
|
||||
}
|
||||
}
|
||||
|
||||
// commandName returns the path components for a given command.
|
||||
//
|
||||
// The root Compose command and anything before (i.e. "docker")
|
||||
// are not included.
|
||||
//
|
||||
// For example:
|
||||
// - docker compose alpha watch -> [alpha, watch]
|
||||
// - docker-compose up -> [up]
|
||||
func commandName(cmd *cobra.Command) []string {
|
||||
var name []string
|
||||
for c := cmd; c != nil; c = c.Parent() {
|
||||
if c.Name() == commands.PluginName {
|
||||
break
|
||||
}
|
||||
name = append(name, c.Name())
|
||||
}
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(name)))
|
||||
return name
|
||||
}
|
||||
@@ -62,10 +62,6 @@ func Convert(args []string) []string {
|
||||
continue
|
||||
}
|
||||
if len(arg) > 0 && arg[0] != '-' {
|
||||
// not a top-level flag anymore, keep the rest of the command unmodified
|
||||
if arg == compose.PluginName {
|
||||
i++
|
||||
}
|
||||
command = append(command, args[i:]...)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -83,6 +83,11 @@ func Test_convert(t *testing.T) {
|
||||
args: []string{"--project-directory", "", "ps"},
|
||||
want: []string{"compose", "--project-directory", "", "ps"},
|
||||
},
|
||||
{
|
||||
name: "compose as project name",
|
||||
args: []string{"--project-name", "compose", "down", "--remove-orphans"},
|
||||
want: []string{"compose", "--project-name", "compose", "down", "--remove-orphans"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -32,6 +32,7 @@ func alphaCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
cmd.AddCommand(
|
||||
watchCommand(p, backend),
|
||||
vizCommand(p, backend),
|
||||
publishCommand(p, backend),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -27,8 +27,7 @@ import (
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
buildx "github.com/docker/buildx/util/progress"
|
||||
cliopts "github.com/docker/cli/opts"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
ui "github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
@@ -37,14 +36,14 @@ import (
|
||||
type buildOptions struct {
|
||||
*ProjectOptions
|
||||
composeOptions
|
||||
quiet bool
|
||||
pull bool
|
||||
push bool
|
||||
progress string
|
||||
args []string
|
||||
noCache bool
|
||||
memory cliopts.MemBytes
|
||||
ssh string
|
||||
quiet bool
|
||||
pull bool
|
||||
push bool
|
||||
args []string
|
||||
noCache bool
|
||||
memory cliopts.MemBytes
|
||||
ssh string
|
||||
builder string
|
||||
}
|
||||
|
||||
func (opts buildOptions) toAPIBuildOptions(services []string) (api.BuildOptions, error) {
|
||||
@@ -56,27 +55,25 @@ func (opts buildOptions) toAPIBuildOptions(services []string) (api.BuildOptions,
|
||||
return api.BuildOptions{}, err
|
||||
}
|
||||
}
|
||||
builderName := opts.builder
|
||||
if builderName == "" {
|
||||
builderName = os.Getenv("BUILDX_BUILDER")
|
||||
}
|
||||
|
||||
return api.BuildOptions{
|
||||
Pull: opts.pull,
|
||||
Push: opts.push,
|
||||
Progress: opts.progress,
|
||||
Progress: ui.Mode,
|
||||
Args: types.NewMappingWithEquals(opts.args),
|
||||
NoCache: opts.noCache,
|
||||
Quiet: opts.quiet,
|
||||
Services: services,
|
||||
SSHs: SSHKeys,
|
||||
Builder: builderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var printerModes = []string{
|
||||
buildx.PrinterModeAuto,
|
||||
buildx.PrinterModeTty,
|
||||
buildx.PrinterModePlain,
|
||||
buildx.PrinterModeQuiet,
|
||||
}
|
||||
|
||||
func buildCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
func buildCommand(p *ProjectOptions, progress *string, backend api.Service) *cobra.Command {
|
||||
opts := buildOptions{
|
||||
ProjectOptions: p,
|
||||
}
|
||||
@@ -85,24 +82,21 @@ func buildCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
Short: "Build or rebuild services",
|
||||
PreRunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
if opts.quiet {
|
||||
opts.progress = buildx.PrinterModeQuiet
|
||||
ui.Mode = ui.ModeQuiet
|
||||
devnull, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Stdout = devnull
|
||||
}
|
||||
if !utils.StringContains(printerModes, opts.progress) {
|
||||
return fmt.Errorf("unsupported --progress value %q", opts.progress)
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
RunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("ssh") && opts.ssh == "" {
|
||||
opts.ssh = "default"
|
||||
}
|
||||
if progress.Mode == progress.ModePlain && !cmd.Flags().Changed("progress") {
|
||||
opts.progress = buildx.PrinterModePlain
|
||||
if cmd.Flags().Changed("progress") && opts.ssh == "" {
|
||||
fmt.Fprint(os.Stderr, "--progress is a global compose flag, better use `docker compose --progress xx build ...")
|
||||
}
|
||||
return runBuild(ctx, backend, opts, args)
|
||||
}),
|
||||
@@ -111,9 +105,9 @@ func buildCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
cmd.Flags().BoolVar(&opts.push, "push", false, "Push service images.")
|
||||
cmd.Flags().BoolVarP(&opts.quiet, "quiet", "q", false, "Don't print anything to STDOUT")
|
||||
cmd.Flags().BoolVar(&opts.pull, "pull", false, "Always attempt to pull a newer version of the image.")
|
||||
cmd.Flags().StringVar(&opts.progress, "progress", buildx.PrinterModeAuto, fmt.Sprintf(`Set type of progress output (%s)`, strings.Join(printerModes, ", ")))
|
||||
cmd.Flags().StringArrayVar(&opts.args, "build-arg", []string{}, "Set build-time variables for services.")
|
||||
cmd.Flags().StringVar(&opts.ssh, "ssh", "", "Set SSH authentications used when building service images. (use 'default' for using your default SSH Agent)")
|
||||
cmd.Flags().StringVar(&opts.builder, "builder", "", "Set builder to use.")
|
||||
cmd.Flags().Bool("parallel", true, "Build images in parallel. DEPRECATED")
|
||||
cmd.Flags().MarkHidden("parallel") //nolint:errcheck
|
||||
cmd.Flags().Bool("compress", true, "Compress the build context using gzip. DEPRECATED")
|
||||
@@ -124,6 +118,8 @@ func buildCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
cmd.Flags().Bool("no-rm", false, "Do not remove intermediate containers after a successful build. DEPRECATED")
|
||||
cmd.Flags().MarkHidden("no-rm") //nolint:errcheck
|
||||
cmd.Flags().VarP(&opts.memory, "memory", "m", "Set memory limit for the build container. Not supported by BuildKit.")
|
||||
cmd.Flags().StringVar(progress, "progress", buildx.PrinterModeAuto, fmt.Sprintf(`Set type of ui output (%s)`, strings.Join(printerModes, ", ")))
|
||||
cmd.Flags().MarkHidden("progress") //nolint:errcheck
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package compose
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
@@ -65,3 +66,23 @@ func completeProjectNames(backend api.Service) func(cmd *cobra.Command, args []s
|
||||
return values, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
func completeProfileNames(p *ProjectOptions) validArgsFn {
|
||||
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
project, err := p.ToProject(nil)
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
allProfileNames := project.AllServices().GetProfiles()
|
||||
sort.Strings(allProfileNames)
|
||||
|
||||
var values []string
|
||||
for _, profileName := range allProfileNames {
|
||||
if strings.HasPrefix(profileName, toComplete) {
|
||||
values = append(values, profileName)
|
||||
}
|
||||
}
|
||||
return values, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
buildx "github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
|
||||
"github.com/compose-spec/compose-go/cli"
|
||||
@@ -43,7 +45,7 @@ import (
|
||||
"github.com/docker/compose/v2/cmd/formatter"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/compose"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
ui "github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -175,7 +177,7 @@ func (o *ProjectOptions) toProjectName() (string, error) {
|
||||
return o.ProjectName, nil
|
||||
}
|
||||
|
||||
envProjectName := os.Getenv("ComposeProjectName")
|
||||
envProjectName := os.Getenv(ComposeProjectName)
|
||||
if envProjectName != "" {
|
||||
return envProjectName, nil
|
||||
}
|
||||
@@ -273,6 +275,7 @@ func RootCommand(streams command.Cli, backend api.Service) *cobra.Command { //no
|
||||
version bool
|
||||
parallel int
|
||||
dryRun bool
|
||||
progress string
|
||||
)
|
||||
c := &cobra.Command{
|
||||
Short: "Docker Compose",
|
||||
@@ -326,16 +329,36 @@ func RootCommand(streams command.Cli, backend api.Service) *cobra.Command { //no
|
||||
formatter.SetANSIMode(streams, ansi)
|
||||
|
||||
if noColor, ok := os.LookupEnv("NO_COLOR"); ok && noColor != "" {
|
||||
progress.NoColor()
|
||||
ui.NoColor()
|
||||
formatter.SetANSIMode(streams, formatter.Never)
|
||||
}
|
||||
|
||||
switch ansi {
|
||||
case "never":
|
||||
progress.Mode = progress.ModePlain
|
||||
ui.Mode = ui.ModePlain
|
||||
case "always":
|
||||
progress.Mode = progress.ModeTTY
|
||||
ui.Mode = ui.ModeTTY
|
||||
}
|
||||
|
||||
switch progress {
|
||||
case ui.ModeAuto:
|
||||
ui.Mode = ui.ModeAuto
|
||||
case ui.ModeTTY:
|
||||
if ansi == "never" {
|
||||
return fmt.Errorf("can't use --progress tty while ANSI support is disabled")
|
||||
}
|
||||
ui.Mode = ui.ModeTTY
|
||||
case ui.ModePlain:
|
||||
if ansi == "always" {
|
||||
return fmt.Errorf("can't use --progress plain while ANSI support is forced")
|
||||
}
|
||||
ui.Mode = ui.ModePlain
|
||||
case ui.ModeQuiet, "none":
|
||||
ui.Mode = ui.ModeQuiet
|
||||
default:
|
||||
return fmt.Errorf("unsupported --progress value %q", progress)
|
||||
}
|
||||
|
||||
if opts.WorkDir != "" {
|
||||
if opts.ProjectDir != "" {
|
||||
return errors.New(`cannot specify DEPRECATED "--workdir" and "--project-directory". Please use only "--project-directory" instead`)
|
||||
@@ -404,11 +427,12 @@ func RootCommand(streams command.Cli, backend api.Service) *cobra.Command { //no
|
||||
portCommand(&opts, streams, backend),
|
||||
imagesCommand(&opts, streams, backend),
|
||||
versionCommand(streams),
|
||||
buildCommand(&opts, backend),
|
||||
buildCommand(&opts, &progress, backend),
|
||||
pushCommand(&opts, backend),
|
||||
pullCommand(&opts, backend),
|
||||
createCommand(&opts, backend),
|
||||
copyCommand(&opts, backend),
|
||||
waitCommand(&opts, backend),
|
||||
alphaCommand(&opts, backend),
|
||||
)
|
||||
|
||||
@@ -418,12 +442,24 @@ func RootCommand(streams command.Cli, backend api.Service) *cobra.Command { //no
|
||||
"project-name",
|
||||
completeProjectNames(backend),
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"project-directory",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{}, cobra.ShellCompDirectiveFilterDirs
|
||||
},
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{"yaml", "yml"}, cobra.ShellCompDirectiveFilterFileExt
|
||||
},
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"profile",
|
||||
completeProfileNames(&opts),
|
||||
)
|
||||
|
||||
c.Flags().StringVar(&progress, "progress", buildx.PrinterModeAuto, fmt.Sprintf(`Set type of progress output (%s)`, strings.Join(printerModes, ", ")))
|
||||
|
||||
c.Flags().StringVar(&ansi, "ansi", "auto", `Control when to print ANSI control characters ("never"|"always"|"auto")`)
|
||||
c.Flags().IntVar(¶llel, "parallel", -1, `Control max parallelism, -1 for unlimited`)
|
||||
@@ -447,7 +483,7 @@ func setEnvWithDotEnv(prjOpts *ProjectOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
envFromFile, err := cli.GetEnvFromFile(composegoutils.GetAsEqualsMap(os.Environ()), workingDir, options.EnvFiles)
|
||||
envFromFile, err := dotenv.GetEnvFromFile(composegoutils.GetAsEqualsMap(os.Environ()), workingDir, options.EnvFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -460,3 +496,10 @@ func setEnvWithDotEnv(prjOpts *ProjectOptions) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var printerModes = []string{
|
||||
ui.ModeAuto,
|
||||
ui.ModeTTY,
|
||||
ui.ModePlain,
|
||||
ui.ModeQuiet,
|
||||
}
|
||||
|
||||
@@ -233,11 +233,7 @@ func runConfigImages(streams api.Streams, opts configOptions, services []string)
|
||||
return err
|
||||
}
|
||||
for _, s := range project.Services {
|
||||
if s.Image != "" {
|
||||
fmt.Fprintln(streams.Out(), s.Image)
|
||||
} else {
|
||||
fmt.Fprintf(streams.Out(), "%s%s%s\n", project.Name, api.Separator, s.Name)
|
||||
}
|
||||
fmt.Fprintln(streams.Out(), api.GetImageNameOrDefault(s, project.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -64,10 +64,10 @@ func copyCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
}
|
||||
|
||||
flags := copyCmd.Flags()
|
||||
flags.IntVar(&opts.index, "index", 0, "Index of the container if there are multiple instances of a service .")
|
||||
flags.BoolVar(&opts.all, "all", false, "Copy to all the containers of the service.")
|
||||
flags.IntVar(&opts.index, "index", 0, "index of the container if service has multiple replicas")
|
||||
flags.BoolVar(&opts.all, "all", false, "copy to all the containers of the service.")
|
||||
flags.MarkHidden("all") //nolint:errcheck
|
||||
flags.MarkDeprecated("all", "By default all the containers of the service will get the source file/directory to be copied.") //nolint:errcheck
|
||||
flags.MarkDeprecated("all", "by default all the containers of the service will get the source file/directory to be copied.") //nolint:errcheck
|
||||
flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH")
|
||||
flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)")
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func downCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
ProjectOptions: p,
|
||||
}
|
||||
downCmd := &cobra.Command{
|
||||
Use: "down [OPTIONS]",
|
||||
Use: "down [OPTIONS] [SERVICES]",
|
||||
Short: "Stop and remove containers, networks",
|
||||
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
|
||||
opts.timeChanged = cmd.Flags().Changed("timeout")
|
||||
@@ -56,16 +56,15 @@ func downCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
return nil
|
||||
}),
|
||||
RunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
return runDown(ctx, backend, opts)
|
||||
return runDown(ctx, backend, opts, args)
|
||||
}),
|
||||
Args: cobra.NoArgs,
|
||||
ValidArgsFunction: noCompletion(),
|
||||
}
|
||||
flags := downCmd.Flags()
|
||||
removeOrphans := utils.StringToBool(os.Getenv(ComposeRemoveOrphans))
|
||||
flags.BoolVar(&opts.removeOrphans, "remove-orphans", removeOrphans, "Remove containers for services not defined in the Compose file.")
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
|
||||
flags.BoolVarP(&opts.volumes, "volumes", "v", false, "Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers.")
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
|
||||
flags.BoolVarP(&opts.volumes, "volumes", "v", false, `Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers.`)
|
||||
flags.StringVar(&opts.images, "rmi", "", `Remove images used by services. "local" remove only images that don't have a custom tag ("local"|"all")`)
|
||||
flags.SetNormalizeFunc(func(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||
if name == "volume" {
|
||||
@@ -77,7 +76,7 @@ func downCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
return downCmd
|
||||
}
|
||||
|
||||
func runDown(ctx context.Context, backend api.Service, opts downOptions) error {
|
||||
func runDown(ctx context.Context, backend api.Service, opts downOptions, services []string) error {
|
||||
project, name, err := opts.projectOrName()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -94,5 +93,6 @@ func runDown(ctx context.Context, backend api.Service, opts downOptions) error {
|
||||
Timeout: timeout,
|
||||
Images: opts.images,
|
||||
Volumes: opts.volumes,
|
||||
Services: services,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func execCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *c
|
||||
|
||||
runCmd.Flags().BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: Run command in the background.")
|
||||
runCmd.Flags().StringArrayVarP(&opts.environment, "env", "e", []string{}, "Set environment variables")
|
||||
runCmd.Flags().IntVar(&opts.index, "index", 1, "index of the container if there are multiple instances of a service [default: 1].")
|
||||
runCmd.Flags().IntVar(&opts.index, "index", 0, "index of the container if service has multiple replicas")
|
||||
runCmd.Flags().BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the process.")
|
||||
runCmd.Flags().StringVarP(&opts.user, "user", "u", "", "Run the command as this user.")
|
||||
runCmd.Flags().BoolVarP(&opts.noTty, "no-TTY", "T", !streams.Out().IsTerminal(), "Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY.")
|
||||
|
||||
@@ -57,7 +57,7 @@ func portCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *c
|
||||
ValidArgsFunction: completeServiceNames(p),
|
||||
}
|
||||
cmd.Flags().StringVar(&opts.protocol, "protocol", "tcp", "tcp or udp")
|
||||
cmd.Flags().IntVar(&opts.index, "index", 1, "index of the container if service has multiple replicas")
|
||||
cmd.Flags().IntVar(&opts.index, "index", 0, "index of the container if service has multiple replicas")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
49
cmd/compose/publish.go
Normal file
49
cmd/compose/publish.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
)
|
||||
|
||||
func publishCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
opts := pushOptions{
|
||||
ProjectOptions: p,
|
||||
}
|
||||
publishCmd := &cobra.Command{
|
||||
Use: "publish [OPTIONS] [REPOSITORY]",
|
||||
Short: "Publish compose application",
|
||||
RunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
return runPublish(ctx, backend, opts, args[0])
|
||||
}),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
return publishCmd
|
||||
}
|
||||
|
||||
func runPublish(ctx context.Context, backend api.Service, opts pushOptions, repository string) error {
|
||||
project, err := opts.ToProject(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return backend.Publish(ctx, project, repository)
|
||||
}
|
||||
@@ -28,8 +28,9 @@ import (
|
||||
|
||||
type restartOptions struct {
|
||||
*ProjectOptions
|
||||
timeout int
|
||||
noDeps bool
|
||||
timeChanged bool
|
||||
timeout int
|
||||
noDeps bool
|
||||
}
|
||||
|
||||
func restartCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
@@ -39,13 +40,16 @@ func restartCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
restartCmd := &cobra.Command{
|
||||
Use: "restart [OPTIONS] [SERVICE...]",
|
||||
Short: "Restart service containers",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
opts.timeChanged = cmd.Flags().Changed("timeout")
|
||||
},
|
||||
RunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
return runRestart(ctx, backend, opts, args)
|
||||
}),
|
||||
ValidArgsFunction: completeServiceNames(p),
|
||||
}
|
||||
flags := restartCmd.Flags()
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
|
||||
flags.BoolVar(&opts.noDeps, "no-deps", false, "Don't restart dependent services.")
|
||||
|
||||
return restartCmd
|
||||
@@ -57,6 +61,12 @@ func runRestart(ctx context.Context, backend api.Service, opts restartOptions, s
|
||||
return err
|
||||
}
|
||||
|
||||
var timeout *time.Duration
|
||||
if opts.timeChanged {
|
||||
timeoutValue := time.Duration(opts.timeout) * time.Second
|
||||
timeout = &timeoutValue
|
||||
}
|
||||
|
||||
if opts.noDeps {
|
||||
err := project.ForServices(services, types.IgnoreDependencies)
|
||||
if err != nil {
|
||||
@@ -64,9 +74,8 @@ func runRestart(ctx context.Context, backend api.Service, opts restartOptions, s
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.Duration(opts.timeout) * time.Second
|
||||
return backend.Restart(ctx, name, api.RestartOptions{
|
||||
Timeout: &timeout,
|
||||
Timeout: timeout,
|
||||
Services: services,
|
||||
Project: project,
|
||||
})
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
cgo "github.com/compose-spec/compose-go/cli"
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -48,6 +49,8 @@ type runOptions struct {
|
||||
workdir string
|
||||
entrypoint string
|
||||
entrypointCmd []string
|
||||
capAdd opts.ListOpts
|
||||
capDrop opts.ListOpts
|
||||
labels []string
|
||||
volumes []string
|
||||
publish []string
|
||||
@@ -59,20 +62,27 @@ type runOptions struct {
|
||||
quietPull bool
|
||||
}
|
||||
|
||||
func (opts runOptions) apply(project *types.Project) error {
|
||||
target, err := project.GetService(opts.Service)
|
||||
func (options runOptions) apply(project *types.Project) error {
|
||||
if options.noDeps {
|
||||
err := project.ForServices([]string{options.Service}, types.IgnoreDependencies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
target, err := project.GetService(options.Service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target.Tty = !opts.noTty
|
||||
target.StdinOpen = opts.interactive
|
||||
if !opts.servicePorts {
|
||||
target.Tty = !options.noTty
|
||||
target.StdinOpen = options.interactive
|
||||
if !options.servicePorts {
|
||||
target.Ports = []types.ServicePortConfig{}
|
||||
}
|
||||
if len(opts.publish) > 0 {
|
||||
if len(options.publish) > 0 {
|
||||
target.Ports = []types.ServicePortConfig{}
|
||||
for _, p := range opts.publish {
|
||||
for _, p := range options.publish {
|
||||
config, err := types.ParsePortConfig(p)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -80,8 +90,8 @@ func (opts runOptions) apply(project *types.Project) error {
|
||||
target.Ports = append(target.Ports, config...)
|
||||
}
|
||||
}
|
||||
if len(opts.volumes) > 0 {
|
||||
for _, v := range opts.volumes {
|
||||
if len(options.volumes) > 0 {
|
||||
for _, v := range options.volumes {
|
||||
volume, err := loader.ParseVolume(v)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -90,15 +100,8 @@ func (opts runOptions) apply(project *types.Project) error {
|
||||
}
|
||||
}
|
||||
|
||||
if opts.noDeps {
|
||||
err := project.ForServices([]string{opts.Service}, types.IgnoreDependencies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i, s := range project.Services {
|
||||
if s.Name == opts.Service {
|
||||
if s.Name == options.Service {
|
||||
project.Services[i] = target
|
||||
break
|
||||
}
|
||||
@@ -107,10 +110,12 @@ func (opts runOptions) apply(project *types.Project) error {
|
||||
}
|
||||
|
||||
func runCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *cobra.Command {
|
||||
opts := runOptions{
|
||||
options := runOptions{
|
||||
composeOptions: &composeOptions{
|
||||
ProjectOptions: p,
|
||||
},
|
||||
capAdd: opts.NewListOpts(nil),
|
||||
capDrop: opts.NewListOpts(nil),
|
||||
}
|
||||
createOpts := createOptions{}
|
||||
cmd := &cobra.Command{
|
||||
@@ -118,61 +123,63 @@ func runCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *co
|
||||
Short: "Run a one-off command on a service.",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
|
||||
opts.Service = args[0]
|
||||
options.Service = args[0]
|
||||
if len(args) > 1 {
|
||||
opts.Command = args[1:]
|
||||
options.Command = args[1:]
|
||||
}
|
||||
if len(opts.publish) > 0 && opts.servicePorts {
|
||||
if len(options.publish) > 0 && options.servicePorts {
|
||||
return fmt.Errorf("--service-ports and --publish are incompatible")
|
||||
}
|
||||
if cmd.Flags().Changed("entrypoint") {
|
||||
command, err := shellwords.Parse(opts.entrypoint)
|
||||
command, err := shellwords.Parse(options.entrypoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.entrypointCmd = command
|
||||
options.entrypointCmd = command
|
||||
}
|
||||
if cmd.Flags().Changed("tty") {
|
||||
if cmd.Flags().Changed("no-TTY") {
|
||||
return fmt.Errorf("--tty and --no-TTY can't be used together")
|
||||
} else {
|
||||
opts.noTty = !opts.tty
|
||||
options.noTty = !options.tty
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
RunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
project, err := p.ToProject([]string{opts.Service}, cgo.WithResolvedPaths(true), cgo.WithDiscardEnvFile)
|
||||
project, err := p.ToProject([]string{options.Service}, cgo.WithResolvedPaths(true), cgo.WithDiscardEnvFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.ignoreOrphans = utils.StringToBool(project.Environment[ComposeIgnoreOrphans])
|
||||
return runRun(ctx, backend, project, opts, createOpts, streams)
|
||||
options.ignoreOrphans = utils.StringToBool(project.Environment[ComposeIgnoreOrphans])
|
||||
return runRun(ctx, backend, project, options, createOpts, streams)
|
||||
}),
|
||||
ValidArgsFunction: completeServiceNames(p),
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.Detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.StringArrayVarP(&opts.environment, "env", "e", []string{}, "Set environment variables")
|
||||
flags.StringArrayVarP(&opts.labels, "label", "l", []string{}, "Add or override a label")
|
||||
flags.BoolVar(&opts.Remove, "rm", false, "Automatically remove the container when it exits")
|
||||
flags.BoolVarP(&opts.noTty, "no-TTY", "T", !streams.Out().IsTerminal(), "Disable pseudo-TTY allocation (default: auto-detected).")
|
||||
flags.StringVar(&opts.name, "name", "", "Assign a name to the container")
|
||||
flags.StringVarP(&opts.user, "user", "u", "", "Run as specified username or uid")
|
||||
flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container")
|
||||
flags.StringVar(&opts.entrypoint, "entrypoint", "", "Override the entrypoint of the image")
|
||||
flags.BoolVar(&opts.noDeps, "no-deps", false, "Don't start linked services.")
|
||||
flags.StringArrayVarP(&opts.volumes, "volume", "v", []string{}, "Bind mount a volume.")
|
||||
flags.StringArrayVarP(&opts.publish, "publish", "p", []string{}, "Publish a container's port(s) to the host.")
|
||||
flags.BoolVar(&opts.useAliases, "use-aliases", false, "Use the service's network useAliases in the network(s) the container connects to.")
|
||||
flags.BoolVar(&opts.servicePorts, "service-ports", false, "Run command with the service's ports enabled and mapped to the host.")
|
||||
flags.BoolVar(&opts.quietPull, "quiet-pull", false, "Pull without printing progress information.")
|
||||
flags.BoolVarP(&options.Detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.StringArrayVarP(&options.environment, "env", "e", []string{}, "Set environment variables")
|
||||
flags.StringArrayVarP(&options.labels, "label", "l", []string{}, "Add or override a label")
|
||||
flags.BoolVar(&options.Remove, "rm", false, "Automatically remove the container when it exits")
|
||||
flags.BoolVarP(&options.noTty, "no-TTY", "T", !streams.Out().IsTerminal(), "Disable pseudo-TTY allocation (default: auto-detected).")
|
||||
flags.StringVar(&options.name, "name", "", "Assign a name to the container")
|
||||
flags.StringVarP(&options.user, "user", "u", "", "Run as specified username or uid")
|
||||
flags.StringVarP(&options.workdir, "workdir", "w", "", "Working directory inside the container")
|
||||
flags.StringVar(&options.entrypoint, "entrypoint", "", "Override the entrypoint of the image")
|
||||
flags.Var(&options.capAdd, "cap-add", "Add Linux capabilities")
|
||||
flags.Var(&options.capDrop, "cap-drop", "Drop Linux capabilities")
|
||||
flags.BoolVar(&options.noDeps, "no-deps", false, "Don't start linked services.")
|
||||
flags.StringArrayVarP(&options.volumes, "volume", "v", []string{}, "Bind mount a volume.")
|
||||
flags.StringArrayVarP(&options.publish, "publish", "p", []string{}, "Publish a container's port(s) to the host.")
|
||||
flags.BoolVar(&options.useAliases, "use-aliases", false, "Use the service's network useAliases in the network(s) the container connects to.")
|
||||
flags.BoolVar(&options.servicePorts, "service-ports", false, "Run command with the service's ports enabled and mapped to the host.")
|
||||
flags.BoolVar(&options.quietPull, "quiet-pull", false, "Pull without printing progress information.")
|
||||
flags.BoolVar(&createOpts.Build, "build", false, "Build image before starting container.")
|
||||
flags.BoolVar(&createOpts.removeOrphans, "remove-orphans", false, "Remove containers for services not defined in the Compose file.")
|
||||
|
||||
cmd.Flags().BoolVarP(&opts.interactive, "interactive", "i", true, "Keep STDIN open even if not attached.")
|
||||
cmd.Flags().BoolVarP(&opts.tty, "tty", "t", true, "Allocate a pseudo-TTY.")
|
||||
cmd.Flags().BoolVarP(&options.interactive, "interactive", "i", true, "Keep STDIN open even if not attached.")
|
||||
cmd.Flags().BoolVarP(&options.tty, "tty", "t", true, "Allocate a pseudo-TTY.")
|
||||
cmd.Flags().MarkHidden("tty") //nolint:errcheck
|
||||
|
||||
flags.SetNormalizeFunc(normalizeRunFlags)
|
||||
@@ -190,8 +197,8 @@ func normalizeRunFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||
return pflag.NormalizedName(name)
|
||||
}
|
||||
|
||||
func runRun(ctx context.Context, backend api.Service, project *types.Project, opts runOptions, createOpts createOptions, streams api.Streams) error {
|
||||
err := opts.apply(project)
|
||||
func runRun(ctx context.Context, backend api.Service, project *types.Project, options runOptions, createOpts createOptions, streams api.Streams) error {
|
||||
err := options.apply(project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -202,14 +209,14 @@ func runRun(ctx context.Context, backend api.Service, project *types.Project, op
|
||||
}
|
||||
|
||||
err = progress.Run(ctx, func(ctx context.Context) error {
|
||||
return startDependencies(ctx, backend, *project, opts.Service, opts.ignoreOrphans)
|
||||
return startDependencies(ctx, backend, *project, options.Service, options.ignoreOrphans)
|
||||
}, streams.Err())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labels := types.Labels{}
|
||||
for _, s := range opts.labels {
|
||||
for _, s := range options.labels {
|
||||
parts := strings.SplitN(s, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("label must be set as KEY=VALUE")
|
||||
@@ -219,27 +226,29 @@ func runRun(ctx context.Context, backend api.Service, project *types.Project, op
|
||||
|
||||
// start container and attach to container streams
|
||||
runOpts := api.RunOptions{
|
||||
Name: opts.name,
|
||||
Service: opts.Service,
|
||||
Command: opts.Command,
|
||||
Detach: opts.Detach,
|
||||
AutoRemove: opts.Remove,
|
||||
Tty: !opts.noTty,
|
||||
Interactive: opts.interactive,
|
||||
WorkingDir: opts.workdir,
|
||||
User: opts.user,
|
||||
Environment: opts.environment,
|
||||
Entrypoint: opts.entrypointCmd,
|
||||
Name: options.name,
|
||||
Service: options.Service,
|
||||
Command: options.Command,
|
||||
Detach: options.Detach,
|
||||
AutoRemove: options.Remove,
|
||||
Tty: !options.noTty,
|
||||
Interactive: options.interactive,
|
||||
WorkingDir: options.workdir,
|
||||
User: options.user,
|
||||
CapAdd: options.capAdd.GetAll(),
|
||||
CapDrop: options.capDrop.GetAll(),
|
||||
Environment: options.environment,
|
||||
Entrypoint: options.entrypointCmd,
|
||||
Labels: labels,
|
||||
UseNetworkAliases: opts.useAliases,
|
||||
NoDeps: opts.noDeps,
|
||||
UseNetworkAliases: options.useAliases,
|
||||
NoDeps: options.noDeps,
|
||||
Index: 0,
|
||||
QuietPull: opts.quietPull,
|
||||
QuietPull: options.quietPull,
|
||||
}
|
||||
|
||||
for i, service := range project.Services {
|
||||
if service.Name == opts.Service {
|
||||
service.StdinOpen = opts.interactive
|
||||
if service.Name == options.Service {
|
||||
service.StdinOpen = options.interactive
|
||||
project.Services[i] = service
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func stopCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
ValidArgsFunction: completeServiceNames(p),
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 10, "Specify a shutdown timeout in seconds")
|
||||
flags.IntVarP(&opts.timeout, "timeout", "t", 0, "Specify a shutdown timeout in seconds")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -21,9 +21,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/compose/v2/cmd/formatter"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/compose/v2/cmd/formatter"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
@@ -78,7 +77,7 @@ func upCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *cob
|
||||
Short: "Create and start containers",
|
||||
PreRunE: AdaptCmd(func(ctx context.Context, cmd *cobra.Command, args []string) error {
|
||||
create.pullChanged = cmd.Flags().Changed("pull")
|
||||
create.timeChanged = cmd.Flags().Changed("waitTimeout")
|
||||
create.timeChanged = cmd.Flags().Changed("timeout")
|
||||
return validateFlags(&up, &create)
|
||||
}),
|
||||
RunE: p.WithServices(func(ctx context.Context, project *types.Project, services []string) error {
|
||||
@@ -104,7 +103,7 @@ func upCommand(p *ProjectOptions, streams api.Streams, backend api.Service) *cob
|
||||
flags.BoolVar(&up.noStart, "no-start", false, "Don't start the services after creating them.")
|
||||
flags.BoolVar(&up.cascadeStop, "abort-on-container-exit", false, "Stops all containers if any container was stopped. Incompatible with -d")
|
||||
flags.StringVar(&up.exitCodeFrom, "exit-code-from", "", "Return the exit code of the selected service container. Implies --abort-on-container-exit")
|
||||
flags.IntVarP(&create.timeout, "timeout", "t", 10, "Use this timeout in seconds for container shutdown when attached or when containers are already running.")
|
||||
flags.IntVarP(&create.timeout, "timeout", "t", 0, "Use this timeout in seconds for container shutdown when attached or when containers are already running.")
|
||||
flags.BoolVar(&up.timestamp, "timestamps", false, "Show timestamps.")
|
||||
flags.BoolVar(&up.noDeps, "no-deps", false, "Don't start linked services.")
|
||||
flags.BoolVar(&create.recreateDeps, "always-recreate-deps", false, "Recreate dependent containers. Incompatible with --no-recreate.")
|
||||
@@ -164,17 +163,31 @@ func runUp(ctx context.Context, streams api.Streams, backend api.Service, create
|
||||
consumer = formatter.NewLogConsumer(ctx, streams.Out(), streams.Err(), !upOptions.noColor, !upOptions.noPrefix, upOptions.timestamp)
|
||||
}
|
||||
|
||||
attachTo := services
|
||||
attachTo := utils.Set[string]{}
|
||||
if len(upOptions.attach) > 0 {
|
||||
attachTo = upOptions.attach
|
||||
attachTo.AddAll(upOptions.attach...)
|
||||
}
|
||||
if upOptions.attachDependencies {
|
||||
attachTo = project.ServiceNames()
|
||||
if err := project.WithServices(attachTo.Elements(), func(s types.ServiceConfig) error {
|
||||
if s.Attach == nil || *s.Attach {
|
||||
attachTo.Add(s.Name)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(attachTo) == 0 {
|
||||
attachTo = project.ServiceNames()
|
||||
if err := project.WithServices(services, func(s types.ServiceConfig) error {
|
||||
if s.Attach == nil || *s.Attach {
|
||||
attachTo.Add(s.Name)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
attachTo = utils.Remove(attachTo, upOptions.noAttach...)
|
||||
attachTo.RemoveAll(upOptions.noAttach...)
|
||||
|
||||
create := api.CreateOptions{
|
||||
Services: services,
|
||||
@@ -198,7 +211,7 @@ func runUp(ctx context.Context, streams api.Streams, backend api.Service, create
|
||||
Start: api.StartOptions{
|
||||
Project: project,
|
||||
Attach: consumer,
|
||||
AttachTo: attachTo,
|
||||
AttachTo: attachTo.Elements(),
|
||||
ExitCodeFrom: upOptions.exitCodeFrom,
|
||||
CascadeStop: upOptions.cascadeStop,
|
||||
Wait: upOptions.wait,
|
||||
|
||||
72
cmd/compose/wait.go
Normal file
72
cmd/compose/wait.go
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type waitOptions struct {
|
||||
*ProjectOptions
|
||||
|
||||
services []string
|
||||
|
||||
downProject bool
|
||||
}
|
||||
|
||||
func waitCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
opts := waitOptions{
|
||||
ProjectOptions: p,
|
||||
}
|
||||
|
||||
var statusCode int64
|
||||
var err error
|
||||
cmd := &cobra.Command{
|
||||
Use: "wait SERVICE [SERVICE...] [OPTIONS]",
|
||||
Short: "Block until the first service container stops",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: Adapt(func(ctx context.Context, services []string) error {
|
||||
opts.services = services
|
||||
statusCode, err = runWait(ctx, backend, &opts)
|
||||
return err
|
||||
}),
|
||||
PostRun: func(cmd *cobra.Command, args []string) {
|
||||
os.Exit(int(statusCode))
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&opts.downProject, "down-project", false, "Drops project when the first container stops")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runWait(ctx context.Context, backend api.Service, opts *waitOptions) (int64, error) {
|
||||
_, name, err := opts.projectOrName()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return backend.Wait(ctx, name, api.WaitOptions{
|
||||
Services: opts.services,
|
||||
DownProjectOnContainerExit: opts.downProject,
|
||||
})
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/docker/cli/cli-plugins/manager"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/compose/v2/cmd/cmdtrace"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/compose/v2/cmd/compatibility"
|
||||
@@ -38,14 +39,20 @@ func pluginMain() {
|
||||
cmd := commands.RootCommand(dockerCli, serviceProxy)
|
||||
originalPreRun := cmd.PersistentPreRunE
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
// initialize the dockerCli instance
|
||||
if err := plugin.PersistentPreRunE(cmd, args); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(milas): add an env var to enable logging from the
|
||||
// OTel components for debugging purposes
|
||||
_ = cmdtrace.Setup(cmd, dockerCli)
|
||||
|
||||
if originalPreRun != nil {
|
||||
return originalPreRun(cmd, args)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd.SetFlagErrorFunc(func(c *cobra.Command, err error) error {
|
||||
return dockercli.StatusError{
|
||||
StatusCode: compose.CommandSyntaxFailure.ExitCode,
|
||||
|
||||
21
codecov.yml
Normal file
21
codecov.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
target: auto
|
||||
threshold: 2%
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
|
||||
comment:
|
||||
require_changes: true
|
||||
|
||||
ignore:
|
||||
- "packaging"
|
||||
- "docs"
|
||||
- "bin"
|
||||
- "e2e"
|
||||
- "pkg/e2e"
|
||||
- "**/*_test.go"
|
||||
@@ -25,13 +25,16 @@ variable "DOCS_FORMATS" {
|
||||
default = "md,yaml"
|
||||
}
|
||||
|
||||
# Defines the output folder
|
||||
# Defines the output folder to override the default behavior.
|
||||
# See Makefile for details, this is generally only useful for
|
||||
# the packaging scripts and care should be taken to not break
|
||||
# them.
|
||||
variable "DESTDIR" {
|
||||
default = ""
|
||||
}
|
||||
function "bindir" {
|
||||
function "outdir" {
|
||||
params = [defaultdir]
|
||||
result = DESTDIR != "" ? DESTDIR : "./bin/${defaultdir}"
|
||||
result = DESTDIR != "" ? DESTDIR : "${defaultdir}"
|
||||
}
|
||||
|
||||
# Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
@@ -84,23 +87,23 @@ target "vendor-update" {
|
||||
target "test" {
|
||||
inherits = ["_common"]
|
||||
target = "test-coverage"
|
||||
output = [bindir("coverage")]
|
||||
output = [outdir("./bin/coverage/unit")]
|
||||
}
|
||||
|
||||
target "binary-with-coverage" {
|
||||
inherits = ["_common"]
|
||||
target = "binary"
|
||||
args = {
|
||||
BUILD_FLAGS = "-cover"
|
||||
BUILD_FLAGS = "-cover -covermode=atomic"
|
||||
}
|
||||
output = [bindir("build")]
|
||||
output = [outdir("./bin/build")]
|
||||
platforms = ["local"]
|
||||
}
|
||||
|
||||
target "binary" {
|
||||
inherits = ["_common"]
|
||||
target = "binary"
|
||||
output = [bindir("build")]
|
||||
output = [outdir("./bin/build")]
|
||||
platforms = ["local"]
|
||||
}
|
||||
|
||||
@@ -124,7 +127,7 @@ target "binary-cross" {
|
||||
target "release" {
|
||||
inherits = ["binary-cross"]
|
||||
target = "release"
|
||||
output = [bindir("release")]
|
||||
output = [outdir("./bin/release")]
|
||||
}
|
||||
|
||||
target "docs-validate" {
|
||||
|
||||
@@ -7,7 +7,6 @@ Define and run multi-container applications with Docker.
|
||||
|
||||
| Name | Description |
|
||||
|:--------------------------------|:------------------------------------------------------------------------|
|
||||
| [`alpha`](compose_alpha.md) | Experimental commands |
|
||||
| [`build`](compose_build.md) | Build or rebuild services |
|
||||
| [`config`](compose_config.md) | Parse, resolve and render compose file in canonical format |
|
||||
| [`cp`](compose_cp.md) | Copy files/folders between a service container and the local filesystem |
|
||||
@@ -33,6 +32,7 @@ Define and run multi-container applications with Docker.
|
||||
| [`unpause`](compose_unpause.md) | Unpause services |
|
||||
| [`up`](compose_up.md) | Create and start containers |
|
||||
| [`version`](compose_version.md) | Show the Docker Compose version information |
|
||||
| [`wait`](compose_wait.md) | Block until the first service container stops |
|
||||
|
||||
|
||||
### Options
|
||||
@@ -46,6 +46,7 @@ Define and run multi-container applications with Docker.
|
||||
| `-f`, `--file` | `stringArray` | | Compose configuration files |
|
||||
| `--parallel` | `int` | `-1` | Control max parallelism, -1 for unlimited |
|
||||
| `--profile` | `stringArray` | | Specify a profile to enable |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (auto, tty, plain, quiet) |
|
||||
| `--project-directory` | `string` | | Specify an alternate working directory<br>(default: the path of the, first specified, Compose file) |
|
||||
| `-p`, `--project-name` | `string` | | Project name |
|
||||
|
||||
@@ -171,7 +172,6 @@ If flags are explicitly set on the command line, the associated environment vari
|
||||
Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` will stop docker compose from detecting orphaned
|
||||
containers for the project.
|
||||
|
||||
|
||||
### Use Dry Run mode to test your command
|
||||
|
||||
Use `--dry-run` flag to test a command without changing your application stack state.
|
||||
@@ -195,24 +195,4 @@ $ docker compose --dry-run up --build -d
|
||||
From the example above, you can see that the first step is to pull the image defined by `db` service, then build the `backend` service.
|
||||
Next, the containers are created. The `db` service is started, and the `backend` and `proxy` wait until the `db` service is healthy before starting.
|
||||
|
||||
Dry Run mode does not currently work with all commands. In particular, you cannot use Dry Run mode with a command that doesn't change the state of a Compose stack
|
||||
such as `ps`, `ls`, `logs` for example.
|
||||
|
||||
Here the list of commands supporting `--dry-run` flag:
|
||||
* build
|
||||
* cp
|
||||
* create
|
||||
* down
|
||||
* exec
|
||||
* kill
|
||||
* pause
|
||||
* pull
|
||||
* push
|
||||
* remove
|
||||
* restart
|
||||
* run
|
||||
* start
|
||||
* stop
|
||||
* unpause
|
||||
* up
|
||||
|
||||
Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example.
|
||||
|
||||
14
docs/reference/compose_alpha_publish.md
Normal file
14
docs/reference/compose_alpha_publish.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# docker compose alpha publish
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Publish compose application
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------|:-----|:--------|:--------------------------------|
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -8,10 +8,10 @@ Build or rebuild services
|
||||
| Name | Type | Default | Description |
|
||||
|:-----------------|:--------------|:--------|:------------------------------------------------------------------------------------------------------------|
|
||||
| `--build-arg` | `stringArray` | | Set build-time variables for services. |
|
||||
| `--builder` | `string` | | Set builder to use. |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-m`, `--memory` | `bytes` | `0` | Set memory limit for the build container. Not supported by BuildKit. |
|
||||
| `--no-cache` | | | Do not use cache when building the image |
|
||||
| `--progress` | `string` | `auto` | Set type of progress output (auto, tty, plain, quiet) |
|
||||
| `--pull` | | | Always attempt to pull a newer version of the image. |
|
||||
| `--push` | | | Push service images. |
|
||||
| `-q`, `--quiet` | | | Don't print anything to STDOUT |
|
||||
|
||||
@@ -5,12 +5,12 @@ Copy files/folders between a service container and the local filesystem
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:------|:--------|:----------------------------------------------------------------------|
|
||||
| `-a`, `--archive` | | | Archive mode (copy all uid/gid information) |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-L`, `--follow-link` | | | Always follow symbol link in SRC_PATH |
|
||||
| `--index` | `int` | `0` | Index of the container if there are multiple instances of a service . |
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:------|:--------|:--------------------------------------------------------|
|
||||
| `-a`, `--archive` | | | Archive mode (copy all uid/gid information) |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-L`, `--follow-link` | | | Always follow symbol link in SRC_PATH |
|
||||
| `--index` | `int` | `0` | index of the container if service has multiple replicas |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -10,8 +10,8 @@ Stop and remove containers, networks
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `--remove-orphans` | | | Remove containers for services not defined in the Compose file. |
|
||||
| `--rmi` | `string` | | Remove images used by services. "local" remove only images that don't have a custom tag ("local"\|"all") |
|
||||
| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
|
||||
| `-v`, `--volumes` | | | Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers. |
|
||||
| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
|
||||
| `-v`, `--volumes` | | | Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers. |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -5,16 +5,16 @@ Execute a command in a running container.
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:--------------|:--------|:----------------------------------------------------------------------------------|
|
||||
| `-d`, `--detach` | | | Detached mode: Run command in the background. |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-e`, `--env` | `stringArray` | | Set environment variables |
|
||||
| `--index` | `int` | `1` | index of the container if there are multiple instances of a service [default: 1]. |
|
||||
| `-T`, `--no-TTY` | | | Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY. |
|
||||
| `--privileged` | | | Give extended privileges to the process. |
|
||||
| `-u`, `--user` | `string` | | Run the command as this user. |
|
||||
| `-w`, `--workdir` | `string` | | Path to workdir directory for this command. |
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:--------------|:--------|:---------------------------------------------------------------------------------|
|
||||
| `-d`, `--detach` | | | Detached mode: Run command in the background. |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-e`, `--env` | `stringArray` | | Set environment variables |
|
||||
| `--index` | `int` | `0` | index of the container if service has multiple replicas |
|
||||
| `-T`, `--no-TTY` | | | Disable pseudo-TTY allocation. By default `docker compose exec` allocates a TTY. |
|
||||
| `--privileged` | | | Give extended privileges to the process. |
|
||||
| `-u`, `--user` | `string` | | Run the command as this user. |
|
||||
| `-w`, `--workdir` | `string` | | Path to workdir directory for this command. |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -8,7 +8,7 @@ Print the public port for a port binding.
|
||||
| Name | Type | Default | Description |
|
||||
|:-------------|:---------|:--------|:--------------------------------------------------------|
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `--index` | `int` | `1` | index of the container if service has multiple replicas |
|
||||
| `--index` | `int` | `0` | index of the container if service has multiple replicas |
|
||||
| `--protocol` | `string` | `tcp` | tcp or udp |
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Restart service containers
|
||||
|:------------------|:------|:--------|:--------------------------------------|
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `--no-deps` | | | Don't restart dependent services. |
|
||||
| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
|
||||
| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -8,6 +8,8 @@ Run a one-off command on a service.
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:--------------|:--------|:----------------------------------------------------------------------------------|
|
||||
| `--build` | | | Build image before starting container. |
|
||||
| `--cap-add` | `list` | | Add Linux capabilities |
|
||||
| `--cap-drop` | `list` | | Drop Linux capabilities |
|
||||
| `-d`, `--detach` | | | Run container in background and print container ID |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `--entrypoint` | `string` | | Override the entrypoint of the image |
|
||||
@@ -34,7 +36,7 @@ Run a one-off command on a service.
|
||||
|
||||
Runs a one-time command against a service.
|
||||
|
||||
the following command starts the `web` service and runs `bash` as its command:
|
||||
The following command starts the `web` service and runs `bash` as its command:
|
||||
|
||||
```console
|
||||
$ docker compose run web bash
|
||||
|
||||
@@ -8,7 +8,7 @@ Stop services
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------|:------|:--------|:--------------------------------------|
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
| `-t`, `--timeout` | `int` | `10` | Specify a shutdown timeout in seconds |
|
||||
| `-t`, `--timeout` | `int` | `0` | Specify a shutdown timeout in seconds |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -28,7 +28,7 @@ Create and start containers
|
||||
| `--remove-orphans` | | | Remove containers for services not defined in the Compose file. |
|
||||
| `-V`, `--renew-anon-volumes` | | | Recreate anonymous volumes instead of retrieving data from the previous containers. |
|
||||
| `--scale` | `stringArray` | | Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. |
|
||||
| `-t`, `--timeout` | `int` | `10` | Use this timeout in seconds for container shutdown when attached or when containers are already running. |
|
||||
| `-t`, `--timeout` | `int` | `0` | Use this timeout in seconds for container shutdown when attached or when containers are already running. |
|
||||
| `--timestamps` | | | Show timestamps. |
|
||||
| `--wait` | | | Wait for services to be running\|healthy. Implies detached mode. |
|
||||
| `--wait-timeout` | `int` | `0` | timeout waiting for application to be running\|healthy. |
|
||||
|
||||
15
docs/reference/compose_wait.md
Normal file
15
docs/reference/compose_wait.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# docker compose wait
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Block until the first service container stops
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:-----------------|:-----|:--------|:---------------------------------------------|
|
||||
| `--down-project` | | | Drops project when the first container stops |
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -118,7 +118,6 @@ long: |-
|
||||
Setting the `COMPOSE_IGNORE_ORPHANS` environment variable to `true` will stop docker compose from detecting orphaned
|
||||
containers for the project.
|
||||
|
||||
|
||||
### Use Dry Run mode to test your command
|
||||
|
||||
Use `--dry-run` flag to test a command without changing your application stack state.
|
||||
@@ -142,26 +141,7 @@ long: |-
|
||||
From the example above, you can see that the first step is to pull the image defined by `db` service, then build the `backend` service.
|
||||
Next, the containers are created. The `db` service is started, and the `backend` and `proxy` wait until the `db` service is healthy before starting.
|
||||
|
||||
Dry Run mode does not currently work with all commands. In particular, you cannot use Dry Run mode with a command that doesn't change the state of a Compose stack
|
||||
such as `ps`, `ls`, `logs` for example.
|
||||
|
||||
Here the list of commands supporting `--dry-run` flag:
|
||||
* build
|
||||
* cp
|
||||
* create
|
||||
* down
|
||||
* exec
|
||||
* kill
|
||||
* pause
|
||||
* pull
|
||||
* push
|
||||
* remove
|
||||
* restart
|
||||
* run
|
||||
* start
|
||||
* stop
|
||||
* unpause
|
||||
* up
|
||||
Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example.
|
||||
usage: docker compose
|
||||
pname: docker
|
||||
plink: docker.yaml
|
||||
@@ -191,6 +171,7 @@ cname:
|
||||
- docker compose unpause
|
||||
- docker compose up
|
||||
- docker compose version
|
||||
- docker compose wait
|
||||
clink:
|
||||
- docker_compose_build.yaml
|
||||
- docker_compose_config.yaml
|
||||
@@ -217,6 +198,7 @@ clink:
|
||||
- docker_compose_unpause.yaml
|
||||
- docker_compose_up.yaml
|
||||
- docker_compose_version.yaml
|
||||
- docker_compose_wait.yaml
|
||||
options:
|
||||
- option: ansi
|
||||
value_type: string
|
||||
@@ -300,6 +282,16 @@ options:
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: progress
|
||||
value_type: string
|
||||
default_value: auto
|
||||
description: Set type of progress output (auto, tty, plain, quiet)
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: project-directory
|
||||
value_type: string
|
||||
description: |-
|
||||
@@ -355,6 +347,7 @@ options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -4,9 +4,11 @@ long: Experimental commands
|
||||
pname: docker compose
|
||||
plink: docker_compose.yaml
|
||||
cname:
|
||||
- docker compose alpha publish
|
||||
- docker compose alpha viz
|
||||
- docker compose alpha watch
|
||||
clink:
|
||||
- docker_compose_alpha_publish.yaml
|
||||
- docker_compose_alpha_viz.yaml
|
||||
- docker_compose_alpha_watch.yaml
|
||||
inherited_options:
|
||||
@@ -21,6 +23,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: true
|
||||
experimental: false
|
||||
experimentalcli: true
|
||||
kubernetes: false
|
||||
|
||||
24
docs/reference/docker_compose_alpha_publish.yaml
Normal file
24
docs/reference/docker_compose_alpha_publish.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
command: docker compose alpha publish
|
||||
short: Publish compose application
|
||||
long: Publish compose application
|
||||
usage: docker compose alpha publish [OPTIONS] [REPOSITORY]
|
||||
pname: docker compose alpha
|
||||
plink: docker_compose_alpha.yaml
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Execute command in dry run mode
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: true
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
||||
@@ -69,6 +69,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: true
|
||||
kubernetes: false
|
||||
|
||||
@@ -29,6 +29,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: true
|
||||
kubernetes: false
|
||||
|
||||
@@ -24,6 +24,15 @@ options:
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: builder
|
||||
value_type: string
|
||||
description: Set builder to use.
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: compress
|
||||
value_type: bool
|
||||
default_value: "true"
|
||||
@@ -90,9 +99,9 @@ options:
|
||||
- option: progress
|
||||
value_type: string
|
||||
default_value: auto
|
||||
description: Set type of progress output (auto, tty, plain, quiet)
|
||||
description: Set type of ui output (auto, tty, plain, quiet)
|
||||
deprecated: false
|
||||
hidden: false
|
||||
hidden: true
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
@@ -150,6 +159,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -152,6 +152,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -10,7 +10,7 @@ options:
|
||||
- option: all
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Copy to all the containers of the service.
|
||||
description: copy to all the containers of the service.
|
||||
deprecated: true
|
||||
hidden: true
|
||||
experimental: false
|
||||
@@ -42,8 +42,7 @@ options:
|
||||
- option: index
|
||||
value_type: int
|
||||
default_value: "0"
|
||||
description: |
|
||||
Index of the container if there are multiple instances of a service .
|
||||
description: index of the container if service has multiple replicas
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
@@ -62,6 +61,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -90,6 +90,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -14,7 +14,7 @@ long: |-
|
||||
Anonymous volumes are not removed by default. However, as they don’t have a stable name, they will not be automatically
|
||||
mounted by a subsequent `up`. For data that needs to persist between updates, use explicit paths as bind mounts or
|
||||
named volumes.
|
||||
usage: docker compose down [OPTIONS]
|
||||
usage: docker compose down [OPTIONS] [SERVICES]
|
||||
pname: docker compose
|
||||
plink: docker_compose.yaml
|
||||
options:
|
||||
@@ -41,7 +41,7 @@ options:
|
||||
- option: timeout
|
||||
shorthand: t
|
||||
value_type: int
|
||||
default_value: "10"
|
||||
default_value: "0"
|
||||
description: Specify a shutdown timeout in seconds
|
||||
deprecated: false
|
||||
hidden: false
|
||||
@@ -54,7 +54,7 @@ options:
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: |
|
||||
Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers.
|
||||
Remove named volumes declared in the "volumes" section of the Compose file and anonymous volumes attached to containers.
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
@@ -73,6 +73,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -46,6 +46,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -33,9 +33,8 @@ options:
|
||||
swarm: false
|
||||
- option: index
|
||||
value_type: int
|
||||
default_value: "1"
|
||||
description: |
|
||||
index of the container if there are multiple instances of a service [default: 1].
|
||||
default_value: "0"
|
||||
description: index of the container if service has multiple replicas
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
@@ -118,6 +117,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -38,6 +38,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -43,6 +43,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -91,6 +91,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -58,6 +58,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -17,6 +17,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -7,7 +7,7 @@ plink: docker_compose.yaml
|
||||
options:
|
||||
- option: index
|
||||
value_type: int
|
||||
default_value: "1"
|
||||
default_value: "0"
|
||||
description: index of the container if service has multiple replicas
|
||||
deprecated: false
|
||||
hidden: false
|
||||
@@ -37,6 +37,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -180,6 +180,7 @@ examples: |-
|
||||
The `docker compose ps` command currently only supports the `--filter status=<status>`
|
||||
option, but additional filter options may be added in the future.
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -124,6 +124,7 @@ examples: |-
|
||||
`docker compose pull` will try to pull image for services with a build section. If pull fails, it will let
|
||||
user know this service image MUST be built. You can skip this by setting `--ignore-buildable` flag
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -66,6 +66,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -28,7 +28,7 @@ options:
|
||||
- option: timeout
|
||||
shorthand: t
|
||||
value_type: int
|
||||
default_value: "10"
|
||||
default_value: "0"
|
||||
description: Specify a shutdown timeout in seconds
|
||||
deprecated: false
|
||||
hidden: false
|
||||
@@ -48,6 +48,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -76,6 +76,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -3,7 +3,7 @@ short: Run a one-off command on a service.
|
||||
long: |-
|
||||
Runs a one-time command against a service.
|
||||
|
||||
the following command starts the `web` service and runs `bash` as its command:
|
||||
The following command starts the `web` service and runs `bash` as its command:
|
||||
|
||||
```console
|
||||
$ docker compose run web bash
|
||||
@@ -68,6 +68,24 @@ options:
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: cap-add
|
||||
value_type: list
|
||||
description: Add Linux capabilities
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: cap-drop
|
||||
value_type: list
|
||||
description: Drop Linux capabilities
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
- option: detach
|
||||
shorthand: d
|
||||
value_type: bool
|
||||
@@ -268,6 +286,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -16,6 +16,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -9,7 +9,7 @@ options:
|
||||
- option: timeout
|
||||
shorthand: t
|
||||
value_type: int
|
||||
default_value: "10"
|
||||
default_value: "0"
|
||||
description: Specify a shutdown timeout in seconds
|
||||
deprecated: false
|
||||
hidden: false
|
||||
@@ -29,6 +29,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -23,6 +23,7 @@ examples: |-
|
||||
root 142353 142331 2 15:33 ? 00:00:00 ping localhost -c 5
|
||||
```
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -16,6 +16,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -234,7 +234,7 @@ options:
|
||||
- option: timeout
|
||||
shorthand: t
|
||||
value_type: int
|
||||
default_value: "10"
|
||||
default_value: "0"
|
||||
description: |
|
||||
Use this timeout in seconds for container shutdown when attached or when containers are already running.
|
||||
deprecated: false
|
||||
@@ -285,6 +285,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
@@ -37,6 +37,7 @@ inherited_options:
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
|
||||
35
docs/reference/docker_compose_wait.yaml
Normal file
35
docs/reference/docker_compose_wait.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
command: docker compose wait
|
||||
short: Block until the first service container stops
|
||||
long: Block until the first service container stops
|
||||
usage: docker compose wait SERVICE [SERVICE...] [OPTIONS]
|
||||
pname: docker compose
|
||||
plink: docker_compose.yaml
|
||||
options:
|
||||
- option: down-project
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Drops project when the first container stops
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Execute command in dry run mode
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
||||
28
e2e/cucumber-features/port-conflict.feature
Normal file
28
e2e/cucumber-features/port-conflict.feature
Normal file
@@ -0,0 +1,28 @@
|
||||
Feature: Report port conflicts
|
||||
|
||||
Background:
|
||||
Given a compose file
|
||||
"""
|
||||
services:
|
||||
web:
|
||||
image: nginx
|
||||
ports:
|
||||
- 31415:80
|
||||
"""
|
||||
And I run "docker rm -f nginx-pi-31415"
|
||||
|
||||
Scenario: Reports a port allocation conflict with another container
|
||||
Given I run "docker run -d -p 31415:80 --name nginx-pi-31415 nginx"
|
||||
When I run "compose up -d"
|
||||
Then the output contains "port is already allocated"
|
||||
And the exit code is 1
|
||||
|
||||
Scenario: Reports a port conflict with some other process
|
||||
Given a process listening on port 31415
|
||||
When I run "compose up -d"
|
||||
Then the output contains "address already in use"
|
||||
And the exit code is 1
|
||||
|
||||
Scenario: Cleanup
|
||||
Given I run "docker rm -f nginx-pi-31415"
|
||||
|
||||
@@ -16,6 +16,7 @@ Background:
|
||||
"""
|
||||
FROM golang:1.19-alpine
|
||||
"""
|
||||
And I run "docker rm -f external-test"
|
||||
|
||||
Scenario: external container from compose image exists
|
||||
When I run "compose build"
|
||||
@@ -24,4 +25,5 @@ Scenario: external container from compose image exists
|
||||
Then the exit code is 0
|
||||
And I run "compose ps -a"
|
||||
Then the output does not contain "external-test"
|
||||
And I run "docker rm -f external-test"
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ Background:
|
||||
services:
|
||||
should_fail:
|
||||
image: alpine
|
||||
command: ls /does_not_exist
|
||||
command: ['sh', '-c', 'exit 123']
|
||||
sleep: # will be killed
|
||||
image: alpine
|
||||
command: ping localhost
|
||||
@@ -15,15 +15,22 @@ Background:
|
||||
|
||||
Scenario: Cascade stop
|
||||
When I run "compose up --abort-on-container-exit"
|
||||
Then the output contains "should_fail-1 exited with code 1"
|
||||
Then the output contains "should_fail-1 exited with code 123"
|
||||
And the output contains "Aborting on container exit..."
|
||||
And the exit code is 1
|
||||
And the exit code is 123
|
||||
|
||||
Scenario: Exit code from
|
||||
When I run "compose up --exit-code-from sleep"
|
||||
Then the output contains "should_fail-1 exited with code 1"
|
||||
When I run "compose up --exit-code-from should_fail"
|
||||
Then the output contains "should_fail-1 exited with code 123"
|
||||
And the output contains "Aborting on container exit..."
|
||||
And the exit code is 143
|
||||
And the exit code is 123
|
||||
|
||||
# TODO: this is currently not working propagating the exit code properly
|
||||
#Scenario: Exit code from (cascade stop)
|
||||
# When I run "compose up --exit-code-from sleep"
|
||||
# Then the output contains "should_fail-1 exited with code 123"
|
||||
# And the output contains "Aborting on container exit..."
|
||||
# And the exit code is 143
|
||||
|
||||
Scenario: Exit code from unknown service
|
||||
When I run "compose up --exit-code-from unknown"
|
||||
|
||||
@@ -19,6 +19,7 @@ package cucumber
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -87,6 +88,7 @@ func setup(s *godog.ScenarioContext) {
|
||||
s.Step(`output contains "(.*)"$`, th.outputContains(true))
|
||||
s.Step(`output does not contain "(.*)"$`, th.outputContains(false))
|
||||
s.Step(`exit code is (\d+)$`, th.exitCodeIs)
|
||||
s.Step(`a process listening on port (\d+)$`, th.listenerOnPort)
|
||||
}
|
||||
|
||||
type testHelper struct {
|
||||
@@ -174,3 +176,16 @@ func (th *testHelper) setDockerfile(dockerfileString string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (th *testHelper) listenerOnPort(port int) error {
|
||||
l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
th.T.Cleanup(func() {
|
||||
_ = l.Close()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
195
go.mod
195
go.mod
@@ -1,105 +1,121 @@
|
||||
module github.com/docker/compose/v2
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/Microsoft/go-winio v0.6.1
|
||||
github.com/buger/goterm v1.0.4
|
||||
github.com/compose-spec/compose-go v1.13.5
|
||||
github.com/compose-spec/compose-go v1.18.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.6.21
|
||||
github.com/containerd/containerd v1.7.3
|
||||
github.com/cucumber/godog v0.0.0-00010101000000-000000000000 // replaced; see replace for the actual version used
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230327091844-0c958010ace2
|
||||
github.com/docker/buildx v0.10.4
|
||||
github.com/docker/cli v23.0.6+incompatible
|
||||
github.com/docker/cli-docs-tool v0.5.1
|
||||
github.com/docker/docker v23.0.6+incompatible
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230601133803-97b1d649c493
|
||||
github.com/docker/buildx v0.11.2
|
||||
github.com/docker/cli v24.0.5+incompatible
|
||||
github.com/docker/cli-docs-tool v0.6.0
|
||||
github.com/docker/docker v24.0.5+incompatible // v24.0.5-dev
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/fsnotify/fsevents v0.1.1
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
github.com/jonboulle/clockwork v0.4.0
|
||||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/moby/buildkit v0.11.6
|
||||
github.com/moby/buildkit v0.12.1 // v0.12 release branch
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/theupdateframework/notary v0.7.0
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375
|
||||
go.opentelemetry.io/otel v1.15.1
|
||||
go.opentelemetry.io/otel v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
|
||||
go.opentelemetry.io/otel/sdk v1.14.0
|
||||
go.opentelemetry.io/otel/trace v1.14.0
|
||||
go.uber.org/goleak v1.2.1
|
||||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/sync v0.3.0
|
||||
google.golang.org/grpc v1.57.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.4.0
|
||||
gotest.tools/v3 v3.5.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
|
||||
github.com/aws/smithy-go v1.11.2 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bugsnag/bugsnag-go v1.5.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cloudflare/cfssl v1.4.1
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/ttrpc v1.1.1 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.4 // indirect
|
||||
github.com/containerd/continuity v0.4.1 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cucumber/gherkin-go/v19 v19.0.3 // indirect
|
||||
github.com/cucumber/messages-go/v16 v16.0.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gofrs/uuid v4.2.0+incompatible // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-memdb v1.3.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/gorm v1.9.11 // indirect
|
||||
github.com/jonboulle/clockwork v0.4.0
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.15.12 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
@@ -107,7 +123,6 @@ require (
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
@@ -115,75 +130,65 @@ require (
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.5 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/runc v1.1.7 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/zmap/zcrypto v0.0.0-20220605182715-4dfcec6e9a8c // indirect
|
||||
github.com/zmap/zlint v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.27.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.15.1 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
|
||||
golang.org/x/crypto v0.2.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.24.1 // indirect; replaced; see replace for the actual version used
|
||||
k8s.io/apimachinery v0.24.1 // indirect; replaced; see replace for the actual version used
|
||||
k8s.io/client-go v0.24.1 // indirect; replaced; see replace for the actual version used
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
k8s.io/api v0.26.2 // indirect
|
||||
k8s.io/apimachinery v0.26.2 // indirect
|
||||
k8s.io/client-go v0.26.2 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
// Override for e2e tests
|
||||
github.com/cucumber/godog => github.com/laurazard/godog v0.0.0-20220922095256-4c4b17abdae7
|
||||
|
||||
golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783
|
||||
|
||||
// For k8s dependencies, we use a replace directive, to prevent them being
|
||||
// upgraded to the version specified in containerd, which is not relevant to the
|
||||
// version needed.
|
||||
// See https://github.com/docker/buildx/pull/948 for details.
|
||||
// https://github.com/docker/buildx/blob/v0.9.1/go.mod#L62-L64
|
||||
k8s.io/api => k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
||||
)
|
||||
// Override for e2e tests
|
||||
replace github.com/cucumber/godog => github.com/laurazard/godog v0.0.0-20220922095256-4c4b17abdae7
|
||||
|
||||
107
internal/sync/docker_cp.go
Normal file
107
internal/sync/docker_cp.go
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ComposeClient interface {
|
||||
Exec(ctx context.Context, projectName string, options api.RunOptions) (int, error)
|
||||
|
||||
Copy(ctx context.Context, projectName string, options api.CopyOptions) error
|
||||
}
|
||||
|
||||
type DockerCopy struct {
|
||||
client ComposeClient
|
||||
|
||||
projectName string
|
||||
|
||||
infoWriter io.Writer
|
||||
}
|
||||
|
||||
var _ Syncer = &DockerCopy{}
|
||||
|
||||
func NewDockerCopy(projectName string, client ComposeClient, infoWriter io.Writer) *DockerCopy {
|
||||
return &DockerCopy{
|
||||
projectName: projectName,
|
||||
client: client,
|
||||
infoWriter: infoWriter,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DockerCopy) Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error {
|
||||
var errs []error
|
||||
for i := range paths {
|
||||
if err := d.sync(ctx, service, paths[i]); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (d *DockerCopy) sync(ctx context.Context, service types.ServiceConfig, pathMapping PathMapping) error {
|
||||
scale := 1
|
||||
if service.Deploy != nil && service.Deploy.Replicas != nil {
|
||||
scale = int(*service.Deploy.Replicas)
|
||||
}
|
||||
|
||||
if fi, statErr := os.Stat(pathMapping.HostPath); statErr == nil {
|
||||
if fi.IsDir() {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := d.client.Exec(ctx, d.projectName, api.RunOptions{
|
||||
Service: service.Name,
|
||||
Command: []string{"mkdir", "-p", pathMapping.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to create %q from %s: %v", pathMapping.ContainerPath, service.Name, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s created\n", pathMapping.ContainerPath)
|
||||
} else {
|
||||
err := d.client.Copy(ctx, d.projectName, api.CopyOptions{
|
||||
Source: pathMapping.HostPath,
|
||||
Destination: fmt.Sprintf("%s:%s", service.Name, pathMapping.ContainerPath),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s updated\n", pathMapping.ContainerPath)
|
||||
}
|
||||
} else if errors.Is(statErr, fs.ErrNotExist) {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := d.client.Exec(ctx, d.projectName, api.RunOptions{
|
||||
Service: service.Name,
|
||||
Command: []string{"rm", "-rf", pathMapping.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to delete %q from %s: %v", pathMapping.ContainerPath, service.Name, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s deleted from service\n", pathMapping.ContainerPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
42
internal/sync/shared.go
Normal file
42
internal/sync/shared.go
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
)
|
||||
|
||||
// PathMapping contains the Compose service and modified host system path.
|
||||
type PathMapping struct {
|
||||
// HostPath that was created/modified/deleted outside the container.
|
||||
//
|
||||
// This is the path as seen from the user's perspective, e.g.
|
||||
// - C:\Users\moby\Documents\hello-world\main.go (file on Windows)
|
||||
// - /Users/moby/Documents/hello-world (directory on macOS)
|
||||
HostPath string
|
||||
// ContainerPath for the target file inside the container (only populated
|
||||
// for sync events, not rebuild).
|
||||
//
|
||||
// This is the path as used in Docker CLI commands, e.g.
|
||||
// - /workdir/main.go
|
||||
// - /workdir/subdir
|
||||
ContainerPath string
|
||||
}
|
||||
|
||||
type Syncer interface {
|
||||
Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error
|
||||
}
|
||||
354
internal/sync/tar.go
Normal file
354
internal/sync/tar.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2018 The Tilt Dev Authors
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
type archiveEntry struct {
|
||||
path string
|
||||
info os.FileInfo
|
||||
header *tar.Header
|
||||
}
|
||||
|
||||
type LowLevelClient interface {
|
||||
ContainersForService(ctx context.Context, projectName string, serviceName string) ([]moby.Container, error)
|
||||
|
||||
Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error
|
||||
}
|
||||
|
||||
type Tar struct {
|
||||
client LowLevelClient
|
||||
|
||||
projectName string
|
||||
}
|
||||
|
||||
var _ Syncer = &Tar{}
|
||||
|
||||
func NewTar(projectName string, client LowLevelClient) *Tar {
|
||||
return &Tar{
|
||||
projectName: projectName,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tar) Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error {
|
||||
containers, err := t.client.ContainersForService(ctx, t.projectName, service.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pathsToCopy []PathMapping
|
||||
var pathsToDelete []string
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p.HostPath); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
pathsToDelete = append(pathsToDelete, p.ContainerPath)
|
||||
} else {
|
||||
pathsToCopy = append(pathsToCopy, p)
|
||||
}
|
||||
}
|
||||
|
||||
var deleteCmd []string
|
||||
if len(pathsToDelete) != 0 {
|
||||
deleteCmd = append([]string{"rm", "-rf"}, pathsToDelete...)
|
||||
}
|
||||
copyCmd := []string{"tar", "-v", "-C", "/", "-x", "-f", "-"}
|
||||
|
||||
var eg multierror.Group
|
||||
writers := make([]*io.PipeWriter, len(containers))
|
||||
for i := range containers {
|
||||
containerID := containers[i].ID
|
||||
r, w := io.Pipe()
|
||||
writers[i] = w
|
||||
eg.Go(func() error {
|
||||
if len(deleteCmd) != 0 {
|
||||
if err := t.client.Exec(ctx, containerID, deleteCmd, nil); err != nil {
|
||||
return fmt.Errorf("deleting paths in %s: %w", containerID, err)
|
||||
}
|
||||
}
|
||||
if err := t.client.Exec(ctx, containerID, copyCmd, r); err != nil {
|
||||
return fmt.Errorf("copying files to %s: %w", containerID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
multiWriter := newLossyMultiWriter(writers...)
|
||||
tarReader := tarArchive(pathsToCopy)
|
||||
defer func() {
|
||||
_ = tarReader.Close()
|
||||
multiWriter.Close()
|
||||
}()
|
||||
_, err = io.Copy(multiWriter, tarReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
multiWriter.Close()
|
||||
|
||||
return eg.Wait().ErrorOrNil()
|
||||
}
|
||||
|
||||
type ArchiveBuilder struct {
|
||||
tw *tar.Writer
|
||||
// A shared I/O buffer to help with file copying.
|
||||
copyBuf *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewArchiveBuilder(writer io.Writer) *ArchiveBuilder {
|
||||
tw := tar.NewWriter(writer)
|
||||
return &ArchiveBuilder{
|
||||
tw: tw,
|
||||
copyBuf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArchiveBuilder) Close() error {
|
||||
return a.tw.Close()
|
||||
}
|
||||
|
||||
// ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist.
|
||||
func (a *ArchiveBuilder) ArchivePathsIfExist(paths []PathMapping) error {
|
||||
// In order to handle overlapping syncs, we
|
||||
// 1) collect all the entries,
|
||||
// 2) de-dupe them, with last-one-wins semantics
|
||||
// 3) write all the entries
|
||||
//
|
||||
// It's not obvious that this is the correct behavior. A better approach
|
||||
// (that's more in-line with how syncs work) might ignore files in earlier
|
||||
// path mappings when we know they're going to be "synced" over.
|
||||
// There's a bunch of subtle product decisions about how overlapping path
|
||||
// mappings work that we're not sure about.
|
||||
var entries []archiveEntry
|
||||
for _, p := range paths {
|
||||
newEntries, err := a.entriesForPath(p.HostPath, p.ContainerPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inspecting %q: %w", p.HostPath, err)
|
||||
}
|
||||
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
|
||||
entries = dedupeEntries(entries)
|
||||
for _, entry := range entries {
|
||||
err := a.writeEntry(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archiving %q: %w", entry.path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ArchiveBuilder) writeEntry(entry archiveEntry) error {
|
||||
pathInTar := entry.path
|
||||
header := entry.header
|
||||
|
||||
if header.Typeflag != tar.TypeReg {
|
||||
// anything other than a regular file (e.g. dir, symlink) just needs the header
|
||||
if err := a.tw.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("writing %q header: %w", pathInTar, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.Open(pathInTar)
|
||||
if err != nil {
|
||||
// In case the file has been deleted since we last looked at it.
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
// The size header must match the number of contents bytes.
|
||||
//
|
||||
// There is room for a race condition here if something writes to the file
|
||||
// after we've read the file size.
|
||||
//
|
||||
// For small files, we avoid this by first copying the file into a buffer,
|
||||
// and using the size of the buffer to populate the header.
|
||||
//
|
||||
// For larger files, we don't want to copy the whole thing into a buffer,
|
||||
// because that would blow up heap size. There is some danger that this
|
||||
// will lead to a spurious error when the tar writer validates the sizes.
|
||||
// That error will be disruptive but will be handled as best as we
|
||||
// can downstream.
|
||||
useBuf := header.Size < 5000000
|
||||
if useBuf {
|
||||
a.copyBuf.Reset()
|
||||
_, err = io.Copy(a.copyBuf, file)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("copying %q: %w", pathInTar, err)
|
||||
}
|
||||
header.Size = int64(len(a.copyBuf.Bytes()))
|
||||
}
|
||||
|
||||
// wait to write the header until _after_ the file is successfully opened
|
||||
// to avoid generating an invalid tar entry that has a header but no contents
|
||||
// in the case the file has been deleted
|
||||
err = a.tw.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing %q header: %w", pathInTar, err)
|
||||
}
|
||||
|
||||
if useBuf {
|
||||
_, err = io.Copy(a.tw, a.copyBuf)
|
||||
} else {
|
||||
_, err = io.Copy(a.tw, file)
|
||||
}
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("copying %q: %w", pathInTar, err)
|
||||
}
|
||||
|
||||
// explicitly flush so that if the entry is invalid we will detect it now and
|
||||
// provide a more meaningful error
|
||||
if err := a.tw.Flush(); err != nil {
|
||||
return fmt.Errorf("finalizing %q: %w", pathInTar, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tarPath writes the given source path into tarWriter at the given dest (recursively for directories).
|
||||
// e.g. tarring my_dir --> dest d: d/file_a, d/file_b
|
||||
// If source path does not exist, quietly skips it and returns no err
|
||||
func (a *ArchiveBuilder) entriesForPath(localPath, containerPath string) ([]archiveEntry, error) {
|
||||
localInfo, err := os.Stat(localPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localPathIsDir := localInfo.IsDir()
|
||||
if localPathIsDir {
|
||||
// Make sure we can trim this off filenames to get valid relative filepaths
|
||||
if !strings.HasSuffix(localPath, string(filepath.Separator)) {
|
||||
localPath += string(filepath.Separator)
|
||||
}
|
||||
}
|
||||
|
||||
containerPath = strings.TrimPrefix(containerPath, "/")
|
||||
|
||||
result := make([]archiveEntry, 0)
|
||||
err = filepath.Walk(localPath, func(curLocalPath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("walking %q: %w", curLocalPath, err)
|
||||
}
|
||||
|
||||
linkname := ""
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
var err error
|
||||
linkname, err = os.Readlink(curLocalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var name string
|
||||
//nolint:gocritic
|
||||
if localPathIsDir {
|
||||
// Name of file in tar should be relative to source directory...
|
||||
tmp, err := filepath.Rel(localPath, curLocalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making %q relative to %q: %w", curLocalPath, localPath, err)
|
||||
}
|
||||
// ...and live inside `dest`
|
||||
name = path.Join(containerPath, filepath.ToSlash(tmp))
|
||||
} else if strings.HasSuffix(containerPath, "/") {
|
||||
name = containerPath + filepath.Base(curLocalPath)
|
||||
} else {
|
||||
name = containerPath
|
||||
}
|
||||
|
||||
header, err := archive.FileInfoHeader(name, info, linkname)
|
||||
if err != nil {
|
||||
// Not all types of files are allowed in a tarball. That's OK.
|
||||
// Mimic the Docker behavior and just skip the file.
|
||||
return nil
|
||||
}
|
||||
|
||||
result = append(result, archiveEntry{
|
||||
path: curLocalPath,
|
||||
info: info,
|
||||
header: header,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func tarArchive(ops []PathMapping) io.ReadCloser {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
ab := NewArchiveBuilder(pw)
|
||||
err := ab.ArchivePathsIfExist(ops)
|
||||
if err != nil {
|
||||
_ = pw.CloseWithError(fmt.Errorf("adding files to tar: %w", err))
|
||||
} else {
|
||||
// propagate errors from the TarWriter::Close() because it performs a final
|
||||
// Flush() and any errors mean the tar is invalid
|
||||
if err := ab.Close(); err != nil {
|
||||
_ = pw.CloseWithError(fmt.Errorf("closing tar: %w", err))
|
||||
} else {
|
||||
_ = pw.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
// Dedupe the entries with last-entry-wins semantics.
|
||||
func dedupeEntries(entries []archiveEntry) []archiveEntry {
|
||||
seenIndex := make(map[string]int, len(entries))
|
||||
result := make([]archiveEntry, 0, len(entries))
|
||||
for i, entry := range entries {
|
||||
seenIndex[entry.header.Name] = i
|
||||
}
|
||||
for i, entry := range entries {
|
||||
if seenIndex[entry.header.Name] == i {
|
||||
result = append(result, entry)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
91
internal/sync/writer.go
Normal file
91
internal/sync/writer.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// lossyMultiWriter attempts to tee all writes to the provided io.PipeWriter
|
||||
// instances.
|
||||
//
|
||||
// If a writer fails during a Write call, the write-side of the pipe is then
|
||||
// closed with the error and no subsequent attempts are made to write to the
|
||||
// pipe.
|
||||
//
|
||||
// If all writers fail during a write, an error is returned.
|
||||
//
|
||||
// On Close, any remaining writers are closed.
|
||||
type lossyMultiWriter struct {
|
||||
writers []*io.PipeWriter
|
||||
}
|
||||
|
||||
// newLossyMultiWriter creates a new writer that *attempts* to tee all data written to it to the provided io.PipeWriter
|
||||
// instances. Rather than failing a write operation if any writer fails, writes only fail if there are no more valid
|
||||
// writers. Otherwise, errors for specific writers are propagated via CloseWithError.
|
||||
func newLossyMultiWriter(writers ...*io.PipeWriter) *lossyMultiWriter {
|
||||
// reverse the writers because during the write we iterate
|
||||
// backwards, so this way we'll end up writing in the same
|
||||
// order as the writers were passed to us
|
||||
writers = append([]*io.PipeWriter(nil), writers...)
|
||||
for i, j := 0, len(writers)-1; i < j; i, j = i+1, j-1 {
|
||||
writers[i], writers[j] = writers[j], writers[i]
|
||||
}
|
||||
|
||||
return &lossyMultiWriter{
|
||||
writers: writers,
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to each writer that is still active (i.e. has not failed/encountered an error on write).
|
||||
//
|
||||
// If a writer encounters an error during the write, the write side of the pipe is closed with the error
|
||||
// and no subsequent attempts will be made to write to that writer.
|
||||
//
|
||||
// An error is only returned from this function if ALL writers have failed.
|
||||
func (l *lossyMultiWriter) Write(p []byte) (int, error) {
|
||||
// NOTE: this function iterates backwards so that it can
|
||||
// safely remove elements during the loop
|
||||
for i := len(l.writers) - 1; i >= 0; i-- {
|
||||
written, err := l.writers[i].Write(p)
|
||||
if err == nil && written != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
// pipe writer close cannot fail
|
||||
_ = l.writers[i].CloseWithError(err)
|
||||
l.writers = append(l.writers[:i], l.writers[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.writers) == 0 {
|
||||
return 0, errors.New("no writers remaining")
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Close closes any still open (non-failed) writers.
|
||||
//
|
||||
// Failed writers have already been closed with an error.
|
||||
func (l *lossyMultiWriter) Close() {
|
||||
for i := range l.writers {
|
||||
// pipe writer close cannot fail
|
||||
_ = l.writers[i].Close()
|
||||
}
|
||||
}
|
||||
152
internal/sync/writer_test.go
Normal file
152
internal/sync/writer_test.go
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLossyMultiWriter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
const count = 5
|
||||
readers := make([]*bufReader, count)
|
||||
writers := make([]*io.PipeWriter, count)
|
||||
for i := 0; i < count; i++ {
|
||||
r, w := io.Pipe()
|
||||
readers[i] = newBufReader(ctx, r)
|
||||
writers[i] = w
|
||||
}
|
||||
|
||||
w := newLossyMultiWriter(writers...)
|
||||
t.Cleanup(w.Close)
|
||||
n, err := w.Write([]byte("hello world"))
|
||||
require.Equal(t, 11, n)
|
||||
require.NoError(t, err)
|
||||
for i := range readers {
|
||||
readers[i].waitForWrite(t)
|
||||
require.Equal(t, "hello world", string(readers[i].contents()))
|
||||
readers[i].reset()
|
||||
}
|
||||
|
||||
// even if a writer fails (in this case simulated by closing the receiving end of the pipe),
|
||||
// write operations should continue to return nil error but the writer should be closed
|
||||
// with an error
|
||||
const failIndex = 3
|
||||
require.NoError(t, readers[failIndex].r.CloseWithError(errors.New("oh no")))
|
||||
n, err = w.Write([]byte("hello"))
|
||||
require.Equal(t, 5, n)
|
||||
require.NoError(t, err)
|
||||
for i := range readers {
|
||||
readers[i].waitForWrite(t)
|
||||
if i == failIndex {
|
||||
err := readers[i].error()
|
||||
require.EqualError(t, err, "io: read/write on closed pipe")
|
||||
require.Empty(t, readers[i].contents())
|
||||
} else {
|
||||
require.Equal(t, "hello", string(readers[i].contents()))
|
||||
}
|
||||
}
|
||||
|
||||
// perform another write, verify there's still no errors
|
||||
n, err = w.Write([]byte(" world"))
|
||||
require.Equal(t, 6, n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
ctx context.Context
|
||||
r *io.PipeReader
|
||||
mu sync.Mutex
|
||||
err error
|
||||
data []byte
|
||||
writeSync chan struct{}
|
||||
}
|
||||
|
||||
func newBufReader(ctx context.Context, r *io.PipeReader) *bufReader {
|
||||
b := &bufReader{
|
||||
ctx: ctx,
|
||||
r: r,
|
||||
writeSync: make(chan struct{}),
|
||||
}
|
||||
go b.consume()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *bufReader) waitForWrite(t testing.TB) {
|
||||
t.Helper()
|
||||
select {
|
||||
case <-b.writeSync:
|
||||
return
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatal("timed out waiting for write")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bufReader) consume() {
|
||||
defer close(b.writeSync)
|
||||
for {
|
||||
buf := make([]byte, 512)
|
||||
n, err := b.r.Read(buf)
|
||||
if n != 0 {
|
||||
b.mu.Lock()
|
||||
b.data = append(b.data, buf[:n]...)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
b.mu.Lock()
|
||||
b.err = err
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// prevent goroutine leak, tie lifetime to the test
|
||||
select {
|
||||
case b.writeSync <- struct{}{}:
|
||||
case <-b.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bufReader) contents() []byte {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b *bufReader) reset() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.data = nil
|
||||
}
|
||||
|
||||
func (b *bufReader) error() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.err
|
||||
}
|
||||
152
internal/tracing/attributes.go
Normal file
152
internal/tracing/attributes.go
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// SpanOptions is a small helper type to make it easy to share the options helpers between
|
||||
// downstream functions that accept slices of trace.SpanStartOption and trace.EventOption.
|
||||
type SpanOptions []trace.SpanStartEventOption
|
||||
|
||||
func (s SpanOptions) SpanStartOptions() []trace.SpanStartOption {
|
||||
out := make([]trace.SpanStartOption, len(s))
|
||||
for i := range s {
|
||||
out[i] = s[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s SpanOptions) EventOptions() []trace.EventOption {
|
||||
out := make([]trace.EventOption, len(s))
|
||||
for i := range s {
|
||||
out[i] = s[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ProjectOptions returns common attributes from a Compose project.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ProjectOptions(proj *types.Project) SpanOptions {
|
||||
if proj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
disabledServiceNames := make([]string, len(proj.DisabledServices))
|
||||
for i := range proj.DisabledServices {
|
||||
disabledServiceNames[i] = proj.DisabledServices[i].Name
|
||||
}
|
||||
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("project.name", proj.Name),
|
||||
attribute.String("project.dir", proj.WorkingDir),
|
||||
attribute.StringSlice("project.compose_files", proj.ComposeFiles),
|
||||
attribute.StringSlice("project.services.active", proj.ServiceNames()),
|
||||
attribute.StringSlice("project.services.disabled", disabledServiceNames),
|
||||
attribute.StringSlice("project.profiles", proj.Profiles),
|
||||
attribute.StringSlice("project.volumes", proj.VolumeNames()),
|
||||
attribute.StringSlice("project.networks", proj.NetworkNames()),
|
||||
attribute.StringSlice("project.secrets", proj.SecretNames()),
|
||||
attribute.StringSlice("project.configs", proj.ConfigNames()),
|
||||
attribute.StringSlice("project.extensions", keys(proj.Extensions)),
|
||||
}
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceOptions returns common attributes from a Compose service.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ServiceOptions(service types.ServiceConfig) SpanOptions {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("service.name", service.Name),
|
||||
attribute.String("service.image", service.Image),
|
||||
attribute.StringSlice("service.networks", keys(service.Networks)),
|
||||
}
|
||||
|
||||
configNames := make([]string, len(service.Configs))
|
||||
for i := range service.Configs {
|
||||
configNames[i] = service.Configs[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.configs", configNames))
|
||||
|
||||
secretNames := make([]string, len(service.Secrets))
|
||||
for i := range service.Secrets {
|
||||
secretNames[i] = service.Secrets[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.secrets", secretNames))
|
||||
|
||||
volNames := make([]string, len(service.Volumes))
|
||||
for i := range service.Volumes {
|
||||
volNames[i] = service.Volumes[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.volumes", volNames))
|
||||
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerOptions returns common attributes from a Moby container.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ContainerOptions(container moby.Container) SpanOptions {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("container.id", container.ID),
|
||||
attribute.String("container.image", container.Image),
|
||||
unixTimeAttr("container.created_at", container.Created),
|
||||
}
|
||||
|
||||
if len(container.Names) != 0 {
|
||||
attrs = append(attrs, attribute.String("container.name", strings.TrimPrefix(container.Names[0], "/")))
|
||||
}
|
||||
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
func keys[T any](m map[string]T) []string {
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func timeAttr(key string, value time.Time) attribute.KeyValue {
|
||||
return attribute.String(key, value.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func unixTimeAttr(key string, value int64) attribute.KeyValue {
|
||||
return timeAttr(key, time.Unix(value, 0).UTC())
|
||||
}
|
||||
44
internal/tracing/conn_unix.go
Normal file
44
internal/tracing/conn_unix.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
|
||||
|
||||
func DialInMemory(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if !strings.HasPrefix(addr, "unix://") {
|
||||
return nil, fmt.Errorf("not a Unix socket address: %s", addr)
|
||||
}
|
||||
addr = strings.TrimPrefix(addr, "unix://")
|
||||
|
||||
if len(addr) > maxUnixSocketPathSize {
|
||||
//goland:noinspection GoErrorStringFormat
|
||||
return nil, fmt.Errorf("Unix socket address is too long: %s", addr)
|
||||
}
|
||||
|
||||
var d net.Dialer
|
||||
return d.DialContext(ctx, "unix", addr)
|
||||
}
|
||||
35
internal/tracing/conn_windows.go
Normal file
35
internal/tracing/conn_windows.go
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
func DialInMemory(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if !strings.HasPrefix(addr, "npipe://") {
|
||||
return nil, fmt.Errorf("not a named pipe address: %s", addr)
|
||||
}
|
||||
addr = strings.TrimPrefix(addr, "npipe://")
|
||||
|
||||
return winio.DialPipeContext(ctx, addr)
|
||||
}
|
||||
125
internal/tracing/docker_context.go
Normal file
125
internal/tracing/docker_context.go
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
const otelConfigFieldName = "otel"
|
||||
|
||||
// traceClientFromDockerContext creates a gRPC OTLP client based on metadata
|
||||
// from the active Docker CLI context.
|
||||
func traceClientFromDockerContext(dockerCli command.Cli, otelEnv envMap) (otlptrace.Client, error) {
|
||||
// attempt to extract an OTEL config from the Docker context to enable
|
||||
// automatic integration with Docker Desktop;
|
||||
cfg, err := ConfigFromDockerContext(dockerCli.ContextStore(), dockerCli.CurrentContext())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading otel config from docker context metadata: %v", err)
|
||||
}
|
||||
|
||||
if cfg.Endpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// HACK: unfortunately _all_ public OTEL initialization functions
|
||||
// implicitly read from the OS env, so temporarily unset them all and
|
||||
// restore afterwards
|
||||
defer func() {
|
||||
for k, v := range otelEnv {
|
||||
if err := os.Setenv(k, v); err != nil {
|
||||
panic(fmt.Errorf("restoring env for %q: %v", k, err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
for k := range otelEnv {
|
||||
if err := os.Unsetenv(k); err != nil {
|
||||
return nil, fmt.Errorf("stashing env for %q: %v", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
dialCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(
|
||||
dialCtx,
|
||||
cfg.Endpoint,
|
||||
grpc.WithContextDialer(DialInMemory),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing otel connection from docker context metadata: %v", err)
|
||||
}
|
||||
|
||||
client := otlptracegrpc.NewClient(otlptracegrpc.WithGRPCConn(conn))
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// ConfigFromDockerContext inspects extra metadata included as part of the
|
||||
// specified Docker context to try and extract a valid OTLP client configuration.
|
||||
func ConfigFromDockerContext(st store.Store, name string) (OTLPConfig, error) {
|
||||
meta, err := st.GetMetadata(name)
|
||||
if err != nil {
|
||||
return OTLPConfig{}, err
|
||||
}
|
||||
|
||||
var otelCfg interface{}
|
||||
switch m := meta.Metadata.(type) {
|
||||
case command.DockerContext:
|
||||
otelCfg = m.AdditionalFields[otelConfigFieldName]
|
||||
case map[string]interface{}:
|
||||
otelCfg = m[otelConfigFieldName]
|
||||
}
|
||||
if otelCfg == nil {
|
||||
return OTLPConfig{}, nil
|
||||
}
|
||||
|
||||
otelMap, ok := otelCfg.(map[string]interface{})
|
||||
if !ok {
|
||||
return OTLPConfig{}, fmt.Errorf(
|
||||
"unexpected type for field %q: %T (expected: %T)",
|
||||
otelConfigFieldName,
|
||||
otelCfg,
|
||||
otelMap,
|
||||
)
|
||||
}
|
||||
|
||||
// keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
|
||||
cfg := OTLPConfig{
|
||||
Endpoint: valueOrDefault[string](otelMap, "OTEL_EXPORTER_OTLP_ENDPOINT"),
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// valueOrDefault returns the type-cast value at the specified key in the map
|
||||
// if present and the correct type; otherwise, it returns the default value for
|
||||
// T.
|
||||
func valueOrDefault[T any](m map[string]interface{}, key string) T {
|
||||
if v, ok := m[key].(T); ok {
|
||||
return v
|
||||
}
|
||||
return *new(T)
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,22 +14,16 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated" //nolint:blank-imports
|
||||
_ "github.com/moby/buildkit/util/tracing/env" //nolint:blank-imports
|
||||
)
|
||||
|
||||
func init() {
|
||||
detect.ServiceName = "compose"
|
||||
// do not log tracing errors to stdio
|
||||
otel.SetErrorHandler(skipErrors{})
|
||||
}
|
||||
|
||||
// skipErrors is a no-op otel.ErrorHandler.
|
||||
type skipErrors struct{}
|
||||
|
||||
func (skipErrors) Handle(err error) {}
|
||||
// Handle does nothing, ignoring any errors passed to it.
|
||||
func (skipErrors) Handle(_ error) {}
|
||||
|
||||
var _ otel.ErrorHandler = skipErrors{}
|
||||
50
internal/tracing/mux.go
Normal file
50
internal/tracing/mux.go
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
type MuxExporter struct {
|
||||
exporters []sdktrace.SpanExporter
|
||||
}
|
||||
|
||||
func (m MuxExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
|
||||
var eg multierror.Group
|
||||
for i := range m.exporters {
|
||||
exporter := m.exporters[i]
|
||||
eg.Go(func() error {
|
||||
return exporter.ExportSpans(ctx, spans)
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func (m MuxExporter) Shutdown(ctx context.Context) error {
|
||||
var eg multierror.Group
|
||||
for i := range m.exporters {
|
||||
exporter := m.exporters[i]
|
||||
eg.Go(func() error {
|
||||
return exporter.Shutdown(ctx)
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
156
internal/tracing/tracing.go
Normal file
156
internal/tracing/tracing.go
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/compose/v2/internal"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated" //nolint:blank-imports
|
||||
_ "github.com/moby/buildkit/util/tracing/env" //nolint:blank-imports
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
)
|
||||
|
||||
func init() {
|
||||
detect.ServiceName = "compose"
|
||||
// do not log tracing errors to stdio
|
||||
otel.SetErrorHandler(skipErrors{})
|
||||
}
|
||||
|
||||
var Tracer = otel.Tracer("compose")
|
||||
|
||||
// OTLPConfig contains the necessary values to initialize an OTLP client
|
||||
// manually.
|
||||
//
|
||||
// This supports a minimal set of options based on what is necessary for
|
||||
// automatic OTEL configuration from Docker context metadata.
|
||||
type OTLPConfig struct {
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// ShutdownFunc flushes and stops an OTEL exporter.
|
||||
type ShutdownFunc func(ctx context.Context) error
|
||||
|
||||
// envMap is a convenience type for OS environment variables.
|
||||
type envMap map[string]string
|
||||
|
||||
func InitTracing(dockerCli command.Cli) (ShutdownFunc, error) {
|
||||
// set global propagator to tracecontext (the default is no-op).
|
||||
otel.SetTextMapPropagator(propagation.TraceContext{})
|
||||
|
||||
if v, _ := strconv.ParseBool(os.Getenv("COMPOSE_EXPERIMENTAL_OTEL")); !v {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return InitProvider(dockerCli)
|
||||
}
|
||||
|
||||
func InitProvider(dockerCli command.Cli) (ShutdownFunc, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
var errs []error
|
||||
var exporters []sdktrace.SpanExporter
|
||||
|
||||
envClient, otelEnv := traceClientFromEnv()
|
||||
if envClient != nil {
|
||||
if envExporter, err := otlptrace.New(ctx, envClient); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if envExporter != nil {
|
||||
exporters = append(exporters, envExporter)
|
||||
}
|
||||
}
|
||||
|
||||
if dcClient, err := traceClientFromDockerContext(dockerCli, otelEnv); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if dcClient != nil {
|
||||
if dcExporter, err := otlptrace.New(ctx, dcClient); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if dcExporter != nil {
|
||||
exporters = append(exporters, dcExporter)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, errors.Join(errs...)
|
||||
}
|
||||
|
||||
res, err := resource.New(
|
||||
ctx,
|
||||
resource.WithAttributes(
|
||||
semconv.ServiceName("compose"),
|
||||
semconv.ServiceVersion(internal.Version),
|
||||
attribute.String("docker.context", dockerCli.CurrentContext()),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource: %v", err)
|
||||
}
|
||||
|
||||
muxExporter := MuxExporter{exporters: exporters}
|
||||
sp := sdktrace.NewSimpleSpanProcessor(muxExporter)
|
||||
tracerProvider := sdktrace.NewTracerProvider(
|
||||
sdktrace.WithSampler(sdktrace.AlwaysSample()),
|
||||
sdktrace.WithResource(res),
|
||||
sdktrace.WithSpanProcessor(sp),
|
||||
)
|
||||
otel.SetTracerProvider(tracerProvider)
|
||||
|
||||
// Shutdown will flush any remaining spans and shut down the exporter.
|
||||
return tracerProvider.Shutdown, nil
|
||||
}
|
||||
|
||||
// traceClientFromEnv creates a GRPC OTLP client based on OS environment
|
||||
// variables.
|
||||
//
|
||||
// https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
|
||||
func traceClientFromEnv() (otlptrace.Client, envMap) {
|
||||
hasOtelEndpointInEnv := false
|
||||
otelEnv := make(map[string]string)
|
||||
for _, kv := range os.Environ() {
|
||||
k, v, ok := strings.Cut(kv, "=")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(k, "OTEL_") {
|
||||
otelEnv[k] = v
|
||||
if strings.HasSuffix(k, "ENDPOINT") {
|
||||
hasOtelEndpointInEnv = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasOtelEndpointInEnv {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
client := otlptracegrpc.NewClient()
|
||||
return client, otelEnv
|
||||
}
|
||||
60
internal/tracing/tracing_test.go
Normal file
60
internal/tracing/tracing_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
)
|
||||
|
||||
var testStoreCfg = store.NewConfig(
|
||||
func() interface{} {
|
||||
return &map[string]interface{}{}
|
||||
},
|
||||
)
|
||||
|
||||
func TestExtractOtelFromContext(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Requires filesystem access")
|
||||
}
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
st := store.New(dir, testStoreCfg)
|
||||
err := st.CreateOrUpdate(store.Metadata{
|
||||
Name: "test",
|
||||
Metadata: command.DockerContext{
|
||||
Description: t.Name(),
|
||||
AdditionalFields: map[string]interface{}{
|
||||
"otel": map[string]interface{}{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "localhost:1234",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoints: make(map[string]interface{}),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := tracing.ConfigFromDockerContext(st, "test")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "localhost:1234", cfg.Endpoint)
|
||||
}
|
||||
91
internal/tracing/wrap.go
Normal file
91
internal/tracing/wrap.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// SpanWrapFunc wraps a function that takes a context with a trace.Span, marking the status as codes.Error if the
|
||||
// wrapped function returns an error.
|
||||
//
|
||||
// The context passed to the function is created from the span to ensure correct propagation.
|
||||
//
|
||||
// NOTE: This function is nearly identical to SpanWrapFuncForErrGroup, except the latter is designed specially for
|
||||
// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
|
||||
// adding even more levels of function wrapping/indirection.
|
||||
func SpanWrapFunc(spanName string, opts SpanOptions, fn func(ctx context.Context) error) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
ctx, span := Tracer.Start(ctx, spanName, opts.SpanStartOptions()...)
|
||||
defer span.End()
|
||||
|
||||
if err := fn(ctx); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Ok, "")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SpanWrapFuncForErrGroup wraps a function that takes a context with a trace.Span, marking the status as codes.Error
|
||||
// if the wrapped function returns an error.
|
||||
//
|
||||
// The context passed to the function is created from the span to ensure correct propagation.
|
||||
//
|
||||
// NOTE: This function is nearly identical to SpanWrapFunc, except this function is designed specially for
|
||||
// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
|
||||
// adding even more levels of function wrapping/indirection.
|
||||
func SpanWrapFuncForErrGroup(ctx context.Context, spanName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
|
||||
return func() error {
|
||||
ctx, span := Tracer.Start(ctx, spanName, opts.SpanStartOptions()...)
|
||||
defer span.End()
|
||||
|
||||
if err := fn(ctx); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Ok, "")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// EventWrapFuncForErrGroup invokes a function and records an event, optionally including the returned
|
||||
// error as the "exception message" on the event.
|
||||
//
|
||||
// This is intended for lightweight usage to wrap errgroup.Group calls where a full span is not desired.
|
||||
func EventWrapFuncForErrGroup(ctx context.Context, eventName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
|
||||
return func() error {
|
||||
span := trace.SpanFromContext(ctx)
|
||||
eventOpts := opts.EventOptions()
|
||||
|
||||
err := fn(ctx)
|
||||
|
||||
if err != nil {
|
||||
eventOpts = append(eventOpts, trace.WithAttributes(semconv.ExceptionMessage(err.Error())))
|
||||
}
|
||||
span.AddEvent(eventName, eventOpts...)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -52,7 +52,7 @@ type Service interface {
|
||||
Ps(ctx context.Context, projectName string, options PsOptions) ([]ContainerSummary, error)
|
||||
// List executes the equivalent to a `docker stack ls`
|
||||
List(ctx context.Context, options ListOptions) ([]Stack, error)
|
||||
// Convert translate compose model into backend's native format
|
||||
// Config executes the equivalent to a `compose config`
|
||||
Config(ctx context.Context, project *types.Project, options ConfigOptions) ([]byte, error)
|
||||
// Kill executes the equivalent to a `compose kill`
|
||||
Kill(ctx context.Context, projectName string, options KillOptions) error
|
||||
@@ -74,6 +74,8 @@ type Service interface {
|
||||
Events(ctx context.Context, projectName string, options EventsOptions) error
|
||||
// Port executes the equivalent to a `compose port`
|
||||
Port(ctx context.Context, projectName string, service string, port uint16, options PortOptions) (string, int, error)
|
||||
// Publish executes the equivalent to a `compose publish`
|
||||
Publish(ctx context.Context, project *types.Project, repository string) error
|
||||
// Images executes the equivalent of a `compose images`
|
||||
Images(ctx context.Context, projectName string, options ImagesOptions) ([]ImageSummary, error)
|
||||
// MaxConcurrency defines upper limit for concurrent operations against engine API
|
||||
@@ -84,6 +86,15 @@ type Service interface {
|
||||
Watch(ctx context.Context, project *types.Project, services []string, options WatchOptions) error
|
||||
// Viz generates a graphviz graph of the project services
|
||||
Viz(ctx context.Context, project *types.Project, options VizOptions) (string, error)
|
||||
// Wait blocks until at least one of the services' container exits
|
||||
Wait(ctx context.Context, projectName string, options WaitOptions) (int64, error)
|
||||
}
|
||||
|
||||
type WaitOptions struct {
|
||||
// Services passed in the command line to be waited
|
||||
Services []string
|
||||
// Executes a down when a container exits
|
||||
DownProjectOnContainerExit bool
|
||||
}
|
||||
|
||||
type VizOptions struct {
|
||||
@@ -121,6 +132,8 @@ type BuildOptions struct {
|
||||
SSHs []types.SSHKey
|
||||
// Memory limit for the build container
|
||||
Memory int64
|
||||
// Builder name passed in the command line
|
||||
Builder string
|
||||
}
|
||||
|
||||
// Apply mutates project according to build options
|
||||
@@ -134,7 +147,6 @@ func (o BuildOptions) Apply(project *types.Project) error {
|
||||
if service.Build == nil {
|
||||
continue
|
||||
}
|
||||
service.Image = GetImageNameOrDefault(service, project.Name)
|
||||
if platform != "" {
|
||||
if len(service.Build.Platforms) > 0 && !utils.StringContains(service.Build.Platforms, platform) {
|
||||
return fmt.Errorf("service %q build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: %s", service.Name, platform)
|
||||
@@ -232,6 +244,8 @@ type DownOptions struct {
|
||||
Images string
|
||||
// Volumes remove volumes, both declared in the `volumes` section and anonymous ones
|
||||
Volumes bool
|
||||
// Services passed in the command line to be stopped
|
||||
Services []string
|
||||
}
|
||||
|
||||
// ConfigOptions group options of the Config API
|
||||
@@ -303,6 +317,8 @@ type RunOptions struct {
|
||||
WorkingDir string
|
||||
User string
|
||||
Environment []string
|
||||
CapAdd []string
|
||||
CapDrop []string
|
||||
Labels types.Labels
|
||||
Privileged bool
|
||||
UseNetworkAliases bool
|
||||
|
||||
@@ -70,7 +70,7 @@ type execDetails struct {
|
||||
}
|
||||
|
||||
// NewDryRunClient produces a DryRunClient
|
||||
func NewDryRunClient(apiClient client.APIClient, cli *command.DockerCli) (*DryRunClient, error) {
|
||||
func NewDryRunClient(apiClient client.APIClient, cli command.Cli) (*DryRunClient, error) {
|
||||
b, err := builder.New(cli, builder.WithSkippedValidation())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -344,7 +344,7 @@ func (d *DryRunClient) ContainerCommit(ctx context.Context, container string, op
|
||||
return d.apiClient.ContainerCommit(ctx, container, options)
|
||||
}
|
||||
|
||||
func (d *DryRunClient) ContainerDiff(ctx context.Context, container string) ([]containerType.ContainerChangeResponseItem, error) {
|
||||
func (d *DryRunClient) ContainerDiff(ctx context.Context, container string) ([]containerType.FilesystemChange, error) {
|
||||
return d.apiClient.ContainerDiff(ctx, container)
|
||||
}
|
||||
|
||||
@@ -616,7 +616,7 @@ func (d *DryRunClient) Info(ctx context.Context) (moby.Info, error) {
|
||||
return d.apiClient.Info(ctx)
|
||||
}
|
||||
|
||||
func (d *DryRunClient) RegistryLogin(ctx context.Context, auth moby.AuthConfig) (registry.AuthenticateOKBody, error) {
|
||||
func (d *DryRunClient) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) {
|
||||
return d.apiClient.RegistryLogin(ctx, auth)
|
||||
}
|
||||
|
||||
@@ -636,8 +636,8 @@ func (d *DryRunClient) VolumeInspectWithRaw(ctx context.Context, volumeID string
|
||||
return d.apiClient.VolumeInspectWithRaw(ctx, volumeID)
|
||||
}
|
||||
|
||||
func (d *DryRunClient) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) {
|
||||
return d.apiClient.VolumeList(ctx, filter)
|
||||
func (d *DryRunClient) VolumeList(ctx context.Context, opts volume.ListOptions) (volume.ListResponse, error) {
|
||||
return d.apiClient.VolumeList(ctx, opts)
|
||||
}
|
||||
|
||||
func (d *DryRunClient) VolumesPrune(ctx context.Context, pruneFilter filters.Args) (moby.VolumesPruneReport, error) {
|
||||
|
||||
@@ -54,6 +54,8 @@ type ServiceProxy struct {
|
||||
MaxConcurrencyFn func(parallel int)
|
||||
DryRunModeFn func(ctx context.Context, dryRun bool) (context.Context, error)
|
||||
VizFn func(ctx context.Context, project *types.Project, options VizOptions) (string, error)
|
||||
WaitFn func(ctx context.Context, projectName string, options WaitOptions) (int64, error)
|
||||
PublishFn func(ctx context.Context, project *types.Project, repository string) error
|
||||
interceptors []Interceptor
|
||||
}
|
||||
|
||||
@@ -90,11 +92,13 @@ func (s *ServiceProxy) WithService(service Service) *ServiceProxy {
|
||||
s.TopFn = service.Top
|
||||
s.EventsFn = service.Events
|
||||
s.PortFn = service.Port
|
||||
s.PublishFn = service.Publish
|
||||
s.ImagesFn = service.Images
|
||||
s.WatchFn = service.Watch
|
||||
s.MaxConcurrencyFn = service.MaxConcurrency
|
||||
s.DryRunModeFn = service.DryRunMode
|
||||
s.VizFn = service.Viz
|
||||
s.WaitFn = service.Wait
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -215,7 +219,7 @@ func (s *ServiceProxy) List(ctx context.Context, options ListOptions) ([]Stack,
|
||||
return s.ListFn(ctx, options)
|
||||
}
|
||||
|
||||
// Convert implements Service interface
|
||||
// Config implements Service interface
|
||||
func (s *ServiceProxy) Config(ctx context.Context, project *types.Project, options ConfigOptions) ([]byte, error) {
|
||||
if s.ConfigFn == nil {
|
||||
return nil, ErrNotImplemented
|
||||
@@ -309,6 +313,10 @@ func (s *ServiceProxy) Port(ctx context.Context, projectName string, service str
|
||||
return s.PortFn(ctx, projectName, service, port, options)
|
||||
}
|
||||
|
||||
func (s *ServiceProxy) Publish(ctx context.Context, project *types.Project, repository string) error {
|
||||
return s.PublishFn(ctx, project, repository)
|
||||
}
|
||||
|
||||
// Images implements Service interface
|
||||
func (s *ServiceProxy) Images(ctx context.Context, project string, options ImagesOptions) ([]ImageSummary, error) {
|
||||
if s.ImagesFn == nil {
|
||||
@@ -325,7 +333,7 @@ func (s *ServiceProxy) Watch(ctx context.Context, project *types.Project, servic
|
||||
return s.WatchFn(ctx, project, services, options)
|
||||
}
|
||||
|
||||
// Viz implements Viz interface
|
||||
// Viz implements Service interface
|
||||
func (s *ServiceProxy) Viz(ctx context.Context, project *types.Project, options VizOptions) (string, error) {
|
||||
if s.VizFn == nil {
|
||||
return "", ErrNotImplemented
|
||||
@@ -333,6 +341,14 @@ func (s *ServiceProxy) Viz(ctx context.Context, project *types.Project, options
|
||||
return s.VizFn(ctx, project, options)
|
||||
}
|
||||
|
||||
// Wait implements Service interface
|
||||
func (s *ServiceProxy) Wait(ctx context.Context, projectName string, options WaitOptions) (int64, error) {
|
||||
if s.WaitFn == nil {
|
||||
return 0, ErrNotImplemented
|
||||
}
|
||||
return s.WaitFn(ctx, projectName, options)
|
||||
}
|
||||
|
||||
func (s *ServiceProxy) MaxConcurrency(i int) {
|
||||
s.MaxConcurrencyFn(i)
|
||||
}
|
||||
|
||||
@@ -25,10 +25,16 @@ import (
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/buildx/build"
|
||||
_ "github.com/docker/buildx/driver/docker" // required to get default driver registered
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
xprogress "github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||
bclient "github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -38,10 +44,10 @@ import (
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
// required to get default driver registered
|
||||
_ "github.com/docker/buildx/driver/docker"
|
||||
)
|
||||
|
||||
func (s *composeService) Build(ctx context.Context, project *types.Project, options api.BuildOptions) error {
|
||||
@@ -55,25 +61,52 @@ func (s *composeService) Build(ctx context.Context, project *types.Project, opti
|
||||
}, s.stdinfo(), "Building")
|
||||
}
|
||||
|
||||
func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions) (map[string]string, error) { //nolint:gocyclo
|
||||
args := options.Args.Resolve(envResolver(project.Environment))
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions) (map[string]string, error) {
|
||||
buildkitEnabled, err := s.dockerCli.BuildKitEnabled()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Progress needs its own context that lives longer than the
|
||||
// build one otherwise it won't read all the messages from
|
||||
// build and will lock
|
||||
progressCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
w, err := xprogress.NewPrinter(progressCtx, s.stdout(), os.Stdout, options.Progress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Initialize buildkit nodes
|
||||
var (
|
||||
b *builder.Builder
|
||||
nodes []builder.Node
|
||||
w *xprogress.Printer
|
||||
)
|
||||
if buildkitEnabled {
|
||||
builderName := options.Builder
|
||||
if builderName == "" {
|
||||
builderName = os.Getenv("BUILDX_BUILDER")
|
||||
}
|
||||
b, err = builder.New(s.dockerCli, builder.WithName(builderName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes, err = b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Progress needs its own context that lives longer than the
|
||||
// build one otherwise it won't read all the messages from
|
||||
// build and will lock
|
||||
progressCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, err = xprogress.NewPrinter(progressCtx, s.stdout(), os.Stdout, options.Progress,
|
||||
xprogress.WithDesc(
|
||||
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
|
||||
fmt.Sprintf("%s:%s", b.Driver, b.Name),
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
builtIDs := make([]string, len(project.Services))
|
||||
builtDigests := make([]string, len(project.Services))
|
||||
err = InDependencyOrder(ctx, project, func(ctx context.Context, name string) error {
|
||||
if len(options.Services) > 0 && !utils.Contains(options.Services, name) {
|
||||
return nil
|
||||
@@ -85,16 +118,11 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
}
|
||||
|
||||
if !buildkitEnabled {
|
||||
if service.Build.Args == nil {
|
||||
service.Build.Args = args
|
||||
} else {
|
||||
service.Build.Args = service.Build.Args.OverrideBy(args)
|
||||
}
|
||||
id, err := s.doBuildClassic(ctx, service, options)
|
||||
id, err := s.doBuildClassic(ctx, project, service, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builtIDs[idx] = id
|
||||
builtDigests[idx] = id
|
||||
|
||||
if options.Push {
|
||||
return s.push(ctx, project, api.PushOptions{})
|
||||
@@ -110,13 +138,12 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buildOptions.BuildArgs = mergeArgs(buildOptions.BuildArgs, flatten(args))
|
||||
|
||||
ids, err := s.doBuildBuildkit(ctx, service.Name, buildOptions, w)
|
||||
digest, err := s.doBuildBuildkit(ctx, service.Name, buildOptions, w, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builtIDs[idx] = ids[service.Name]
|
||||
builtDigests[idx] = digest
|
||||
|
||||
return nil
|
||||
}, func(traversal *graphTraversal) {
|
||||
@@ -124,8 +151,10 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
})
|
||||
|
||||
// enforce all build event get consumed
|
||||
if errw := w.Wait(); errw != nil {
|
||||
return nil, errw
|
||||
if buildkitEnabled {
|
||||
if errw := w.Wait(); errw != nil {
|
||||
return nil, errw
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -133,9 +162,10 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
}
|
||||
|
||||
imageIDs := map[string]string{}
|
||||
for i, d := range builtIDs {
|
||||
if d != "" {
|
||||
imageIDs[project.Services[i].Image] = d
|
||||
for i, imageDigest := range builtDigests {
|
||||
if imageDigest != "" {
|
||||
imageRef := api.GetImageNameOrDefault(project.Services[i], project.Name)
|
||||
imageIDs[imageRef] = imageDigest
|
||||
}
|
||||
}
|
||||
return imageIDs, err
|
||||
@@ -165,7 +195,11 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.pullRequiredImages(ctx, project, images, quietPull)
|
||||
err = tracing.SpanWrapFunc("project/pull", tracing.ProjectOptions(project),
|
||||
func(ctx context.Context) error {
|
||||
return s.pullRequiredImages(ctx, project, images, quietPull)
|
||||
},
|
||||
)(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -175,20 +209,32 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
mode = xprogress.PrinterModeQuiet
|
||||
}
|
||||
|
||||
err = s.prepareProjectForBuild(project, images)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builtImages, err := s.build(ctx, project, api.BuildOptions{
|
||||
Progress: mode,
|
||||
})
|
||||
buildRequired, err := s.prepareProjectForBuild(project, images)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, digest := range builtImages {
|
||||
images[name] = digest
|
||||
if buildRequired {
|
||||
err = tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(project),
|
||||
func(ctx context.Context) error {
|
||||
builtImages, err := s.build(ctx, project, api.BuildOptions{
|
||||
Progress: mode,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, digest := range builtImages {
|
||||
images[name] = digest
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set digest as com.docker.compose.image label so we can detect outdated containers
|
||||
for i, service := range project.Services {
|
||||
image := api.GetImageNameOrDefault(service, project.Name)
|
||||
@@ -203,17 +249,19 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *composeService) prepareProjectForBuild(project *types.Project, images map[string]string) error {
|
||||
func (s *composeService) prepareProjectForBuild(project *types.Project, images map[string]string) (bool, error) {
|
||||
buildRequired := false
|
||||
err := api.BuildOptions{}.Apply(project)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
for i, service := range project.Services {
|
||||
if service.Build == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
_, localImagePresent := images[service.Image]
|
||||
image := api.GetImageNameOrDefault(service, project.Name)
|
||||
_, localImagePresent := images[image]
|
||||
if localImagePresent && service.PullPolicy != types.PullPolicyBuild {
|
||||
service.Build = nil
|
||||
project.Services[i] = service
|
||||
@@ -227,8 +275,9 @@ func (s *composeService) prepareProjectForBuild(project *types.Project, images m
|
||||
service.Build.Platforms = []string{service.Platform}
|
||||
}
|
||||
project.Services[i] = service
|
||||
buildRequired = true
|
||||
}
|
||||
return nil
|
||||
return buildRequired, nil
|
||||
}
|
||||
|
||||
func (s *composeService) getLocalImagesDigests(ctx context.Context, project *types.Project) (map[string]string, error) {
|
||||
@@ -281,17 +330,38 @@ func (s *composeService) getLocalImagesDigests(ctx context.Context, project *typ
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, options api.BuildOptions) (build.Options, error) {
|
||||
tags := []string{service.Image}
|
||||
// resolveAndMergeBuildArgs returns the final set of build arguments to use for the service image build.
|
||||
//
|
||||
// First, args directly defined via `build.args` in YAML are considered.
|
||||
// Then, any explicitly passed args in opts (e.g. via `--build-arg` on the CLI) are merged, overwriting any
|
||||
// keys that already exist.
|
||||
// Next, any keys without a value are resolved using the project environment.
|
||||
//
|
||||
// Finally, standard proxy variables based on the Docker client configuration are added, but will not overwrite
|
||||
// any values if already present.
|
||||
func resolveAndMergeBuildArgs(
|
||||
dockerCli command.Cli,
|
||||
project *types.Project,
|
||||
service types.ServiceConfig,
|
||||
opts api.BuildOptions,
|
||||
) types.MappingWithEquals {
|
||||
result := make(types.MappingWithEquals).
|
||||
OverrideBy(service.Build.Args).
|
||||
OverrideBy(opts.Args).
|
||||
Resolve(envResolver(project.Environment))
|
||||
|
||||
buildArgs := flatten(service.Build.Args.Resolve(envResolver(project.Environment)))
|
||||
|
||||
for k, v := range storeutil.GetProxyConfig(s.dockerCli) {
|
||||
if _, ok := buildArgs[k]; !ok {
|
||||
buildArgs[k] = v
|
||||
// proxy arguments do NOT override and should NOT have env resolution applied,
|
||||
// so they're handled last
|
||||
for k, v := range storeutil.GetProxyConfig(dockerCli) {
|
||||
if _, ok := result[k]; !ok {
|
||||
v := v
|
||||
result[k] = &v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, options api.BuildOptions) (build.Options, error) {
|
||||
plats, err := addPlatforms(project, service)
|
||||
if err != nil {
|
||||
return build.Options{}, err
|
||||
@@ -325,6 +395,7 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
sessionConfig = append(sessionConfig, secretsProvider)
|
||||
}
|
||||
|
||||
tags := []string{api.GetImageNameOrDefault(service, project.Name)}
|
||||
if len(service.Build.Tags) > 0 {
|
||||
tags = append(tags, service.Build.Tags...)
|
||||
}
|
||||
@@ -335,18 +406,19 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
|
||||
imageLabels := getImageBuildLabels(project, service)
|
||||
|
||||
push := options.Push && service.Image != ""
|
||||
exports := []bclient.ExportEntry{{
|
||||
Type: "docker",
|
||||
Attrs: map[string]string{
|
||||
"load": "true",
|
||||
"push": fmt.Sprint(options.Push),
|
||||
"push": fmt.Sprint(push),
|
||||
},
|
||||
}}
|
||||
if len(service.Build.Platforms) > 1 {
|
||||
exports = []bclient.ExportEntry{{
|
||||
Type: "image",
|
||||
Attrs: map[string]string{
|
||||
"push": fmt.Sprint(options.Push),
|
||||
"push": fmt.Sprint(push),
|
||||
},
|
||||
}}
|
||||
}
|
||||
@@ -358,11 +430,11 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
DockerfilePath: dockerFilePath(service.Build.Context, service.Build.Dockerfile),
|
||||
NamedContexts: toBuildContexts(service.Build.AdditionalContexts),
|
||||
},
|
||||
CacheFrom: cacheFrom,
|
||||
CacheTo: cacheTo,
|
||||
CacheFrom: pb.CreateCaches(cacheFrom),
|
||||
CacheTo: pb.CreateCaches(cacheTo),
|
||||
NoCache: service.Build.NoCache,
|
||||
Pull: service.Build.Pull,
|
||||
BuildArgs: buildArgs,
|
||||
BuildArgs: flatten(resolveAndMergeBuildArgs(s.dockerCli, project, service, options)),
|
||||
Tags: tags,
|
||||
Target: service.Build.Target,
|
||||
Exports: exports,
|
||||
@@ -389,16 +461,6 @@ func flatten(in types.MappingWithEquals) types.Mapping {
|
||||
return out
|
||||
}
|
||||
|
||||
func mergeArgs(m ...types.Mapping) types.Mapping {
|
||||
merged := types.Mapping{}
|
||||
for _, mapping := range m {
|
||||
for key, val := range mapping {
|
||||
merged[key] = val
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func dockerFilePath(ctxName string, dockerfile string) string {
|
||||
if dockerfile == "" {
|
||||
return ""
|
||||
@@ -442,6 +504,9 @@ func addSecretsConfig(project *types.Project, service types.ServiceConfig) (sess
|
||||
default:
|
||||
return nil, fmt.Errorf("build.secrets only supports environment or file-based secrets: %q", secret.Source)
|
||||
}
|
||||
if secret.UID != "" || secret.GID != "" || secret.Mode != nil {
|
||||
logrus.Warn("secrets `uid`, `gid` and `mode` are not supported by BuildKit, they will be ignored")
|
||||
}
|
||||
}
|
||||
store, err := secretsprovider.NewStore(sources)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,44 +20,39 @@ import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
_ "github.com/docker/buildx/driver/docker" //nolint:blank-imports
|
||||
_ "github.com/docker/buildx/driver/docker-container" //nolint:blank-imports
|
||||
_ "github.com/docker/buildx/driver/kubernetes" //nolint:blank-imports
|
||||
_ "github.com/docker/buildx/driver/remote" //nolint:blank-imports
|
||||
buildx "github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
buildx "github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
func (s *composeService) doBuildBuildkit(ctx context.Context, service string, opts build.Options, p *buildx.Printer) (map[string]string, error) {
|
||||
b, err := builder.New(s.dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response map[string]*client.SolveResponse
|
||||
func (s *composeService) doBuildBuildkit(ctx context.Context, service string, opts build.Options, p *buildx.Printer, nodes []builder.Node) (string, error) {
|
||||
var (
|
||||
response map[string]*client.SolveResponse
|
||||
err error
|
||||
)
|
||||
if s.dryRun {
|
||||
response = s.dryRunBuildResponse(ctx, service, opts)
|
||||
} else {
|
||||
response, err = build.Build(ctx, nodes, map[string]build.Options{service: opts}, dockerutil.NewClient(s.dockerCli), filepath.Dir(s.configFile().Filename), buildx.WithPrefix(p, service, true))
|
||||
response, err = build.Build(ctx, nodes,
|
||||
map[string]build.Options{service: opts},
|
||||
dockerutil.NewClient(s.dockerCli),
|
||||
confutil.ConfigDir(s.dockerCli),
|
||||
buildx.WithPrefix(p, service, true))
|
||||
if err != nil {
|
||||
return nil, WrapCategorisedComposeError(err, BuildFailure)
|
||||
return "", WrapCategorisedComposeError(err, BuildFailure)
|
||||
}
|
||||
}
|
||||
|
||||
imagesBuilt := map[string]string{}
|
||||
for name, img := range response {
|
||||
for _, img := range response {
|
||||
if img == nil || len(img.ExporterResponse) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -65,10 +60,10 @@ func (s *composeService) doBuildBuildkit(ctx context.Context, service string, op
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
imagesBuilt[name] = digest
|
||||
return digest, nil
|
||||
}
|
||||
|
||||
return imagesBuilt, err
|
||||
return "", fmt.Errorf("buildkit response is missing expected result for %s", service)
|
||||
}
|
||||
|
||||
func (s composeService) dryRunBuildResponse(ctx context.Context, name string, options build.Options) map[string]*client.SolveResponse {
|
||||
|
||||
@@ -26,6 +26,10 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
@@ -43,7 +47,7 @@ import (
|
||||
)
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) doBuildClassic(ctx context.Context, service types.ServiceConfig, options api.BuildOptions) (string, error) {
|
||||
func (s *composeService) doBuildClassic(ctx context.Context, project *types.Project, service types.ServiceConfig, options api.BuildOptions) (string, error) {
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
@@ -153,12 +157,13 @@ func (s *composeService) doBuildClassic(ctx context.Context, service types.Servi
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
authConfigs := make(map[string]dockertypes.AuthConfig, len(creds))
|
||||
authConfigs := make(map[string]registry.AuthConfig, len(creds))
|
||||
for k, auth := range creds {
|
||||
authConfigs[k] = dockertypes.AuthConfig(auth)
|
||||
authConfigs[k] = registry.AuthConfig(auth)
|
||||
}
|
||||
buildOptions := imageBuildOptions(service.Build)
|
||||
buildOptions.Tags = append(buildOptions.Tags, service.Image)
|
||||
buildOptions := imageBuildOptions(s.dockerCli, project, service, options)
|
||||
imageName := api.GetImageNameOrDefault(service, project.Name)
|
||||
buildOptions.Tags = append(buildOptions.Tags, imageName)
|
||||
buildOptions.Dockerfile = relDockerfile
|
||||
buildOptions.AuthConfigs = authConfigs
|
||||
buildOptions.Memory = options.Memory
|
||||
@@ -212,14 +217,15 @@ func isLocalDir(c string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func imageBuildOptions(config *types.BuildConfig) dockertypes.ImageBuildOptions {
|
||||
func imageBuildOptions(dockerCli command.Cli, project *types.Project, service types.ServiceConfig, options api.BuildOptions) dockertypes.ImageBuildOptions {
|
||||
config := service.Build
|
||||
return dockertypes.ImageBuildOptions{
|
||||
Version: dockertypes.BuilderV1,
|
||||
Tags: config.Tags,
|
||||
NoCache: config.NoCache,
|
||||
Remove: true,
|
||||
PullParent: config.Pull,
|
||||
BuildArgs: config.Args,
|
||||
BuildArgs: resolveAndMergeBuildArgs(dockerCli, project, service, options),
|
||||
Labels: config.Labels,
|
||||
NetworkMode: config.Network,
|
||||
ExtraHosts: config.ExtraHosts.AsList(),
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestPrepareProjectForBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &composeService{}
|
||||
err := s.prepareProjectForBuild(&project, nil)
|
||||
_, err := s.prepareProjectForBuild(&project, nil)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, project.Services[0].Build.Platforms, types.StringList{"alice/32"})
|
||||
})
|
||||
@@ -70,7 +70,7 @@ func TestPrepareProjectForBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &composeService{}
|
||||
err := s.prepareProjectForBuild(&project, nil)
|
||||
_, err := s.prepareProjectForBuild(&project, nil)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, project.Services[0].Build.Platforms, types.StringList{"linux/amd64"})
|
||||
})
|
||||
@@ -89,7 +89,7 @@ func TestPrepareProjectForBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &composeService{}
|
||||
err := s.prepareProjectForBuild(&project, map[string]string{"foo": "exists"})
|
||||
_, err := s.prepareProjectForBuild(&project, map[string]string{"foo": "exists"})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, project.Services[0].Build == nil)
|
||||
})
|
||||
@@ -115,7 +115,7 @@ func TestPrepareProjectForBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &composeService{}
|
||||
err := s.prepareProjectForBuild(&project, nil)
|
||||
_, err := s.prepareProjectForBuild(&project, nil)
|
||||
assert.Check(t, err != nil)
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user