mirror of
https://github.com/docker/compose.git
synced 2026-02-09 18:19:26 +08:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7abaa06617 | ||
|
|
3b0e8f538e | ||
|
|
af376603c3 | ||
|
|
7f8814f4c5 | ||
|
|
af0029afe1 | ||
|
|
b76feb66e1 | ||
|
|
9dc7f1e70c | ||
|
|
03205124fe | ||
|
|
8b769bad6b | ||
|
|
671507a8b3 | ||
|
|
56ab28aef3 | ||
|
|
e7d870a106 | ||
|
|
d5bb3387ca | ||
|
|
d91fc63813 | ||
|
|
c51b1fea29 | ||
|
|
fa7549a851 | ||
|
|
a061c17736 | ||
|
|
c5e7d9158c | ||
|
|
3783b8ada3 |
153
.github/workflows/ci.yml
vendored
153
.github/workflows/ci.yml
vendored
@@ -22,24 +22,6 @@ permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
run: |
|
||||
echo matrix=$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms') >> $GITHUB_OUTPUT
|
||||
-
|
||||
name: Show matrix
|
||||
run: |
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
@@ -63,63 +45,88 @@ jobs:
|
||||
make ${{ matrix.target }}
|
||||
|
||||
binary:
|
||||
uses: docker/github-builder/.github/workflows/bake.yml@v1
|
||||
permissions:
|
||||
contents: read # same as global permission
|
||||
id-token: write # for signing attestation(s) with GitHub OIDC Token
|
||||
with:
|
||||
runner: amd64
|
||||
artifact-name: compose
|
||||
artifact-upload: true
|
||||
cache: true
|
||||
cache-scope: binary
|
||||
target: release
|
||||
output: local
|
||||
sbom: true
|
||||
sign: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
binary-finalize:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
|
||||
- binary
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${MATRIX_PLATFORM}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
env:
|
||||
MATRIX_PLATFORM: ${{ matrix.platform }}
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
source: .
|
||||
targets: release
|
||||
provenance: mode=max
|
||||
sbom: true
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
*.cache-from=type=gha,scope=binary-${{ env.PLATFORM_PAIR }}
|
||||
*.cache-to=type=gha,scope=binary-${{ env.PLATFORM_PAIR }},mode=max
|
||||
path: /tmp/compose-output
|
||||
name: ${{ needs.binary.outputs.artifact-name }}
|
||||
-
|
||||
name: Rename provenance and sbom
|
||||
run: |
|
||||
for pdir in /tmp/compose-output/*/; do
|
||||
(
|
||||
cd "$pdir"
|
||||
binname=$(find . -name 'docker-compose-*')
|
||||
filename=$(basename "${binname%.exe}")
|
||||
mv "provenance.json" "${filename}.provenance.json"
|
||||
mv "sbom-binary.spdx.json" "${filename}.sbom.json"
|
||||
find . -name 'sbom*.json' -exec rm {} \;
|
||||
if [ -f "provenance.sigstore.json" ]; then
|
||||
mv "provenance.sigstore.json" "${filename}.sigstore.json"
|
||||
fi
|
||||
)
|
||||
done
|
||||
mkdir -p "./bin/release"
|
||||
mv /tmp/compose-output/**/* "./bin/release/"
|
||||
-
|
||||
name: Create checksum file
|
||||
working-directory: ./bin/release
|
||||
run: |
|
||||
binname=$(find . -name 'docker-compose-*')
|
||||
filename=$(basename "$binname" | sed -E 's/\.exe$//')
|
||||
mv "provenance.json" "${filename}.provenance.json"
|
||||
mv "sbom-binary.spdx.json" "${filename}.sbom.json"
|
||||
find . -name 'sbom*.json' -exec rm {} \;
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ./bin/release
|
||||
find . -type f -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# \*\./# *#' > $RUNNER_TEMP/checksums.txt
|
||||
shasum -a 256 -U -c $RUNNER_TEMP/checksums.txt
|
||||
mv $RUNNER_TEMP/checksums.txt .
|
||||
cat checksums.txt | while read sum file; do
|
||||
if [[ "${file#\*}" == docker-compose-* && "${file#\*}" != *.provenance.json && "${file#\*}" != *.sbom.json && "${file#\*}" != *.sigstore.json ]]; then
|
||||
echo "$sum $file" > ${file#\*}.sha256
|
||||
fi
|
||||
done
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: compose-${{ env.PLATFORM_PAIR }}
|
||||
path: ./bin/release
|
||||
name: release
|
||||
path: ./bin/release/*
|
||||
if-no-files-found: error
|
||||
|
||||
bin-image-test:
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: docker/github-builder/.github/workflows/bake.yml@v1
|
||||
with:
|
||||
runner: amd64
|
||||
target: image-cross
|
||||
cache: true
|
||||
cache-scope: bin-image-test
|
||||
output: image
|
||||
push: false
|
||||
sbom: true
|
||||
set-meta-labels: true
|
||||
meta-images: |
|
||||
compose-bin
|
||||
meta-tags: |
|
||||
type=ref,event=pr
|
||||
meta-bake-target: meta-helper
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -147,6 +154,7 @@ jobs:
|
||||
with:
|
||||
paths: bin/coverage/unit/report.xml
|
||||
if: always()
|
||||
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
name: e2e (${{ matrix.mode }}, ${{ matrix.channel }})
|
||||
@@ -254,6 +262,7 @@ jobs:
|
||||
with:
|
||||
paths: /tmp/report/report.xml
|
||||
if: always()
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
@@ -290,40 +299,26 @@ jobs:
|
||||
path: ./coverage.txt
|
||||
if-no-files-found: error
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
|
||||
release:
|
||||
permissions:
|
||||
contents: write # to create a release (ncipollo/release-action)
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- binary
|
||||
- binary-finalize
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
pattern: compose-*
|
||||
path: ./bin/release
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Create checksums
|
||||
working-directory: ./bin/release
|
||||
run: |
|
||||
find . -type f -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# \*\./# *#' > $RUNNER_TEMP/checksums.txt
|
||||
shasum -a 256 -U -c $RUNNER_TEMP/checksums.txt
|
||||
mv $RUNNER_TEMP/checksums.txt .
|
||||
cat checksums.txt | while read sum file; do
|
||||
if [[ "${file#\*}" == docker-compose-* && "${file#\*}" != *.provenance.json && "${file#\*}" != *.sbom.json ]]; then
|
||||
echo "$sum $file" > ${file#\*}.sha256
|
||||
fi
|
||||
done
|
||||
name: release
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
|
||||
86
.github/workflows/merge.yml
vendored
86
.github/workflows/merge.yml
vendored
@@ -74,63 +74,41 @@ jobs:
|
||||
run: |
|
||||
make e2e-compose-standalone
|
||||
|
||||
bin-image:
|
||||
runs-on: ubuntu-22.04
|
||||
bin-image-prepare:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
digest: ${{ fromJSON(steps.bake.outputs.metadata).image-cross['containerimage.digest'] }}
|
||||
repo-slug: ${{ env.REPO_SLUG }}
|
||||
steps:
|
||||
-
|
||||
name: Free disk space
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||
with:
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
swap-storage: true
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
# FIXME: can't use env object in reusable workflow inputs: https://github.com/orgs/community/discussions/26671
|
||||
- run: echo "Exposing env vars for reusable workflow"
|
||||
|
||||
bin-image:
|
||||
uses: docker/github-builder/.github/workflows/bake.yml@v1
|
||||
needs:
|
||||
- bin-image-prepare
|
||||
permissions:
|
||||
contents: read # same as global permission
|
||||
id-token: write # for signing attestation(s) with GitHub OIDC Token
|
||||
with:
|
||||
runner: amd64
|
||||
target: image-cross
|
||||
cache: true
|
||||
cache-scope: bin-image
|
||||
output: image
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
sbom: true
|
||||
set-meta-labels: true
|
||||
meta-images: |
|
||||
${{ needs.bin-image-prepare.outputs.repo-slug }}
|
||||
meta-tags: |
|
||||
type=ref,event=tag
|
||||
type=edge
|
||||
meta-bake-target: meta-helper
|
||||
secrets:
|
||||
registry-auths: |
|
||||
- registry: docker.io
|
||||
username: ${{ secrets.DOCKERPUBLICBOT_USERNAME }}
|
||||
password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }}
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_SLUG }}
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
type=edge
|
||||
bake-target: meta-helper
|
||||
-
|
||||
name: Build and push image
|
||||
uses: docker/bake-action@v6
|
||||
id: bake
|
||||
with:
|
||||
source: .
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
${{ steps.meta.outputs.bake-file }}
|
||||
targets: image-cross
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
sbom: true
|
||||
provenance: mode=max
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=bin-image
|
||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
||||
|
||||
desktop-edge-test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -158,6 +136,6 @@ jobs:
|
||||
workflow_id: 'compose-edge-integration.yml',
|
||||
ref: 'main',
|
||||
inputs: {
|
||||
"image-tag": "${{ needs.bin-image.outputs.digest }}"
|
||||
"image-tag": "${{ env.REPO_SLUG }}:edge"
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.24.11
|
||||
1.25.7
|
||||
@@ -15,9 +15,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GO_VERSION=1.24.11
|
||||
ARG GO_VERSION=1.25.7
|
||||
ARG XX_VERSION=1.9.0
|
||||
ARG GOLANGCI_LINT_VERSION=v2.6.2
|
||||
ARG GOLANGCI_LINT_VERSION=v2.8.0
|
||||
ARG ADDLICENSE_VERSION=v1.0.0
|
||||
|
||||
ARG BUILD_TAGS="e2e"
|
||||
|
||||
4
Makefile
4
Makefile
@@ -29,10 +29,6 @@ ifeq ($(DETECTED_OS),Windows)
|
||||
BINARY_EXT=.exe
|
||||
endif
|
||||
|
||||
ifeq ($(DETECTED_OS),Darwin)
|
||||
GO_BUILDTAGS += fsnotify
|
||||
endif
|
||||
|
||||
BUILD_FLAGS?=
|
||||
TEST_FLAGS?=
|
||||
E2E_TEST?=
|
||||
|
||||
@@ -477,7 +477,7 @@ func RootCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.C
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
}
|
||||
|
||||
err := setEnvWithDotEnv(opts)
|
||||
err := setEnvWithDotEnv(opts, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -505,6 +505,7 @@ func RootCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.C
|
||||
display.Mode = display.ModeTTY
|
||||
}
|
||||
|
||||
detached, _ := cmd.Flags().GetBool("detach")
|
||||
var ep api.EventProcessor
|
||||
switch opts.Progress {
|
||||
case "", display.ModeAuto:
|
||||
@@ -513,7 +514,7 @@ func RootCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.C
|
||||
display.Mode = display.ModePlain
|
||||
ep = display.Plain(dockerCli.Err())
|
||||
case dockerCli.Out().IsTerminal():
|
||||
ep = display.Full(dockerCli.Err(), stdinfo(dockerCli))
|
||||
ep = display.Full(dockerCli.Err(), stdinfo(dockerCli), detached)
|
||||
default:
|
||||
ep = display.Plain(dockerCli.Err())
|
||||
}
|
||||
@@ -522,7 +523,7 @@ func RootCommand(dockerCli command.Cli, backendOptions *BackendOptions) *cobra.C
|
||||
return fmt.Errorf("can't use --progress tty while ANSI support is disabled")
|
||||
}
|
||||
display.Mode = display.ModeTTY
|
||||
ep = display.Full(dockerCli.Err(), stdinfo(dockerCli))
|
||||
ep = display.Full(dockerCli.Err(), stdinfo(dockerCli), detached)
|
||||
|
||||
case display.ModePlain:
|
||||
if ansi == "always" {
|
||||
@@ -676,7 +677,21 @@ func stdinfo(dockerCli command.Cli) io.Writer {
|
||||
return dockerCli.Err()
|
||||
}
|
||||
|
||||
func setEnvWithDotEnv(opts ProjectOptions) error {
|
||||
func setEnvWithDotEnv(opts ProjectOptions, dockerCli command.Cli) error {
|
||||
// Check if we're using a remote config (OCI or Git)
|
||||
// If so, skip env loading as remote loaders haven't been initialized yet
|
||||
// and trying to process the path would fail
|
||||
remoteLoaders := opts.remoteLoaders(dockerCli)
|
||||
for _, path := range opts.ConfigPaths {
|
||||
for _, loader := range remoteLoaders {
|
||||
if loader.Accept(path) {
|
||||
// Remote config - skip env loading for now
|
||||
// It will be loaded later when the project is fully initialized
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
options, err := cli.NewProjectOptions(opts.ConfigPaths,
|
||||
cli.WithWorkingDirectory(opts.ProjectDir),
|
||||
cli.WithOsEnv,
|
||||
|
||||
76
cmd/compose/compose_oci_test.go
Normal file
76
cmd/compose/compose_oci_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/compose/v5/pkg/mocks"
|
||||
)
|
||||
|
||||
func TestSetEnvWithDotEnv_WithOCIArtifact(t *testing.T) {
|
||||
// Test that setEnvWithDotEnv doesn't fail when using OCI artifacts
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cli := mocks.NewMockCli(ctrl)
|
||||
|
||||
opts := ProjectOptions{
|
||||
ConfigPaths: []string{"oci://docker.io/dockersamples/welcome-to-docker"},
|
||||
ProjectDir: "",
|
||||
EnvFiles: []string{},
|
||||
}
|
||||
|
||||
err := setEnvWithDotEnv(opts, cli)
|
||||
assert.NilError(t, err, "setEnvWithDotEnv should not fail with OCI artifact path")
|
||||
}
|
||||
|
||||
func TestSetEnvWithDotEnv_WithGitRemote(t *testing.T) {
|
||||
// Test that setEnvWithDotEnv doesn't fail when using Git remotes
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cli := mocks.NewMockCli(ctrl)
|
||||
|
||||
opts := ProjectOptions{
|
||||
ConfigPaths: []string{"https://github.com/docker/compose.git"},
|
||||
ProjectDir: "",
|
||||
EnvFiles: []string{},
|
||||
}
|
||||
|
||||
err := setEnvWithDotEnv(opts, cli)
|
||||
assert.NilError(t, err, "setEnvWithDotEnv should not fail with Git remote path")
|
||||
}
|
||||
|
||||
func TestSetEnvWithDotEnv_WithLocalPath(t *testing.T) {
|
||||
// Test that setEnvWithDotEnv still works with local paths
|
||||
// This will fail if the file doesn't exist, but it should not panic
|
||||
// or produce invalid paths
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cli := mocks.NewMockCli(ctrl)
|
||||
|
||||
opts := ProjectOptions{
|
||||
ConfigPaths: []string{"compose.yaml"},
|
||||
ProjectDir: "",
|
||||
EnvFiles: []string{},
|
||||
}
|
||||
|
||||
// This may error if files don't exist, but should not panic
|
||||
_ = setEnvWithDotEnv(opts, cli)
|
||||
}
|
||||
@@ -198,12 +198,11 @@ func (opts createOptions) Apply(project *types.Project) error {
|
||||
|
||||
func applyScaleOpts(project *types.Project, opts []string) error {
|
||||
for _, scale := range opts {
|
||||
split := strings.Split(scale, "=")
|
||||
if len(split) != 2 {
|
||||
name, val, ok := strings.Cut(scale, "=")
|
||||
if !ok || val == "" {
|
||||
return fmt.Errorf("invalid --scale option %q. Should be SERVICE=NUM", scale)
|
||||
}
|
||||
name := split[0]
|
||||
replicas, err := strconv.Atoi(split[1])
|
||||
replicas, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -213,9 +213,9 @@ func extractEnvCLIDefined(cmdEnvs []string) map[string]string {
|
||||
// Parse command-line environment variables
|
||||
cmdEnvMap := make(map[string]string)
|
||||
for _, env := range cmdEnvs {
|
||||
parts := strings.SplitN(env, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
cmdEnvMap[parts[0]] = parts[1]
|
||||
key, val, ok := strings.Cut(env, "=")
|
||||
if ok {
|
||||
cmdEnvMap[key] = val
|
||||
}
|
||||
}
|
||||
return cmdEnvMap
|
||||
|
||||
@@ -50,19 +50,19 @@ func (p *psOptions) parseFilter() error {
|
||||
if p.Filter == "" {
|
||||
return nil
|
||||
}
|
||||
parts := strings.SplitN(p.Filter, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
key, val, ok := strings.Cut(p.Filter, "=")
|
||||
if !ok {
|
||||
return errors.New("arguments to --filter should be in form KEY=VAL")
|
||||
}
|
||||
switch parts[0] {
|
||||
switch key {
|
||||
case "status":
|
||||
p.Status = append(p.Status, parts[1])
|
||||
p.Status = append(p.Status, val)
|
||||
return nil
|
||||
case "source":
|
||||
return api.ErrNotImplemented
|
||||
default:
|
||||
return fmt.Errorf("unknown filter %s", parts[0])
|
||||
return fmt.Errorf("unknown filter %s", key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func psCommand(p *ProjectOptions, dockerCli command.Cli, backendOptions *BackendOptions) *cobra.Command {
|
||||
|
||||
@@ -284,11 +284,11 @@ func runRun(ctx context.Context, backend api.Compose, project *types.Project, op
|
||||
|
||||
labels := types.Labels{}
|
||||
for _, s := range options.labels {
|
||||
parts := strings.SplitN(s, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
key, val, ok := strings.Cut(s, "=")
|
||||
if !ok {
|
||||
return fmt.Errorf("label must be set as KEY=VALUE")
|
||||
}
|
||||
labels[parts[0]] = parts[1]
|
||||
labels[key] = val
|
||||
}
|
||||
|
||||
var buildForRun *api.BuildOptions
|
||||
|
||||
@@ -37,13 +37,14 @@ import (
|
||||
|
||||
// Full creates an EventProcessor that render advanced UI within a terminal.
|
||||
// On Start, TUI lists task with a progress timer
|
||||
func Full(out io.Writer, info io.Writer) api.EventProcessor {
|
||||
func Full(out io.Writer, info io.Writer, detached bool) api.EventProcessor {
|
||||
return &ttyWriter{
|
||||
out: out,
|
||||
info: info,
|
||||
tasks: map[string]*task{},
|
||||
done: make(chan bool),
|
||||
mtx: &sync.Mutex{},
|
||||
out: out,
|
||||
info: info,
|
||||
tasks: map[string]*task{},
|
||||
done: make(chan bool),
|
||||
mtx: &sync.Mutex{},
|
||||
detached: detached,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +61,7 @@ type ttyWriter struct {
|
||||
ticker *time.Ticker
|
||||
suspended bool
|
||||
info io.Writer
|
||||
detached bool
|
||||
}
|
||||
|
||||
type task struct {
|
||||
@@ -190,7 +192,7 @@ func (w *ttyWriter) On(events ...api.Resource) {
|
||||
continue
|
||||
}
|
||||
|
||||
if w.operation != "start" && (e.Text == api.StatusStarted || e.Text == api.StatusStarting) {
|
||||
if w.operation != "start" && (e.Text == api.StatusStarted || e.Text == api.StatusStarting) && !w.detached {
|
||||
// skip those events to avoid mix with container logs
|
||||
continue
|
||||
}
|
||||
|
||||
4
go.mod
4
go.mod
@@ -52,9 +52,9 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.38.0
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/mock v0.6.0
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.3
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.4
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.40.0
|
||||
golang.org/x/sys v0.41.0
|
||||
google.golang.org/grpc v1.78.0
|
||||
gotest.tools/v3 v3.5.2
|
||||
tags.cncf.io/container-device-interface v1.1.0
|
||||
|
||||
8
go.sum
8
go.sum
@@ -448,8 +448,8 @@ go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U=
|
||||
go.yaml.in/yaml/v4 v4.0.0-rc.4/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -504,8 +504,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -158,11 +158,37 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
if ok {
|
||||
service.CustomLabels.Add(api.ImageDigestLabel, img.ID)
|
||||
}
|
||||
|
||||
resolveImageVolumes(&service, images, project.Name)
|
||||
|
||||
project.Services[name] = service
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveImageVolumes(service *types.ServiceConfig, images map[string]api.ImageSummary, projectName string) {
|
||||
for i, vol := range service.Volumes {
|
||||
if vol.Type == types.VolumeTypeImage {
|
||||
imgName := vol.Source
|
||||
if _, ok := images[vol.Source]; !ok {
|
||||
// check if source is another service in the project
|
||||
imgName = api.GetImageNameOrDefault(types.ServiceConfig{Name: vol.Source}, projectName)
|
||||
// If we still can't find it, it might be an external image that wasn't pulled yet or doesn't exist
|
||||
if _, ok := images[imgName]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if img, ok := images[imgName]; ok {
|
||||
// Use Image ID directly as source.
|
||||
// Using name@digest format (via reference.WithDigest) fails for local-only images
|
||||
// that don't have RepoDigests (e.g. built locally in CI).
|
||||
// Image ID (sha256:...) is always valid and ensures ServiceHash changes on rebuild.
|
||||
service.Volumes[i].Source = img.ID
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *composeService) getLocalImagesDigests(ctx context.Context, project *types.Project) (map[string]api.ImageSummary, error) {
|
||||
imageNames := utils.Set[string]{}
|
||||
for _, s := range project.Services {
|
||||
|
||||
@@ -774,11 +774,10 @@ func (s *composeService) getLinks(ctx context.Context, projectName string, servi
|
||||
}
|
||||
|
||||
for _, rawLink := range service.Links {
|
||||
linkSplit := strings.Split(rawLink, ":")
|
||||
linkServiceName := linkSplit[0]
|
||||
linkName := linkServiceName
|
||||
if len(linkSplit) == 2 {
|
||||
linkName = linkSplit[1] // linkName if informed like in: "serviceName:linkName"
|
||||
// linkName if informed like in: "serviceName[:linkName]"
|
||||
linkServiceName, linkName, ok := strings.Cut(rawLink, ":")
|
||||
if !ok {
|
||||
linkName = linkServiceName
|
||||
}
|
||||
cnts, err := getServiceContainers(linkServiceName)
|
||||
if err != nil {
|
||||
@@ -810,11 +809,9 @@ func (s *composeService) getLinks(ctx context.Context, projectName string, servi
|
||||
}
|
||||
|
||||
for _, rawExtLink := range service.ExternalLinks {
|
||||
extLinkSplit := strings.Split(rawExtLink, ":")
|
||||
externalLink := extLinkSplit[0]
|
||||
linkName := externalLink
|
||||
if len(extLinkSplit) == 2 {
|
||||
linkName = extLinkSplit[1]
|
||||
externalLink, linkName, ok := strings.Cut(rawExtLink, ":")
|
||||
if !ok {
|
||||
linkName = externalLink
|
||||
}
|
||||
links = append(links, format(externalLink, linkName))
|
||||
}
|
||||
|
||||
@@ -317,15 +317,15 @@ func splitCpArg(arg string) (ctr, path string) {
|
||||
return "", arg
|
||||
}
|
||||
|
||||
parts := strings.SplitN(arg, ":", 2)
|
||||
ctr, path, ok := strings.Cut(arg, ":")
|
||||
|
||||
if len(parts) == 1 || strings.HasPrefix(parts[0], ".") {
|
||||
if !ok || strings.HasPrefix(ctr, ".") {
|
||||
// Either there's no `:` in the arg
|
||||
// OR it's an explicit local relative path like `./file:name.txt`.
|
||||
return "", arg
|
||||
}
|
||||
|
||||
return parts[0], parts[1]
|
||||
return ctr, path
|
||||
}
|
||||
|
||||
func resolveLocalPath(localPath string) (absPath string, err error) {
|
||||
|
||||
@@ -241,11 +241,8 @@ func (s *composeService) getCreateConfigs(ctx context.Context,
|
||||
} // VOLUMES/MOUNTS/FILESYSTEMS
|
||||
tmpfs := map[string]string{}
|
||||
for _, t := range service.Tmpfs {
|
||||
if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
|
||||
tmpfs[arr[0]] = arr[1]
|
||||
} else {
|
||||
tmpfs[arr[0]] = ""
|
||||
}
|
||||
k, v, _ := strings.Cut(t, ":")
|
||||
tmpfs[k] = v
|
||||
}
|
||||
binds, mounts, err := s.buildContainerVolumes(ctx, *p, service, inherit)
|
||||
if err != nil {
|
||||
@@ -563,13 +560,13 @@ func defaultNetworkSettings(project *types.Project,
|
||||
func getRestartPolicy(service types.ServiceConfig) container.RestartPolicy {
|
||||
var restart container.RestartPolicy
|
||||
if service.Restart != "" {
|
||||
split := strings.Split(service.Restart, ":")
|
||||
name, num, ok := strings.Cut(service.Restart, ":")
|
||||
var attempts int
|
||||
if len(split) > 1 {
|
||||
attempts, _ = strconv.Atoi(split[1])
|
||||
if ok {
|
||||
attempts, _ = strconv.Atoi(num)
|
||||
}
|
||||
restart = container.RestartPolicy{
|
||||
Name: mapRestartPolicyCondition(split[0]),
|
||||
Name: mapRestartPolicyCondition(name),
|
||||
MaximumRetryCount: attempts,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ func (c *monitor) Start(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if inspect.State != nil && inspect.State.Restarting || inspect.State.Running {
|
||||
if inspect.State != nil && (inspect.State.Restarting || inspect.State.Running) {
|
||||
// State.Restarting is set by engine when container is configured to restart on exit
|
||||
// on ContainerRestart it doesn't (see https://github.com/moby/moby/issues/45538)
|
||||
// container state still is reported as "running"
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
|
||||
"github.com/compose-spec/compose-go/v2/types"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/buildx/driver"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
@@ -260,7 +260,11 @@ func ImageDigestResolver(ctx context.Context, file *configfile.ConfigFile, apiCl
|
||||
}
|
||||
}
|
||||
|
||||
func encodedAuth(ref reference.Named, configFile driver.Auth) (string, error) {
|
||||
type authProvider interface {
|
||||
GetAuthConfig(registryHostname string) (clitypes.AuthConfig, error)
|
||||
}
|
||||
|
||||
func encodedAuth(ref reference.Named, configFile authProvider) (string, error) {
|
||||
authConfig, err := configFile.GetAuthConfig(registry.GetAuthConfigKey(reference.Domain(ref)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
21
pkg/e2e/fixtures/image-volume-recreate/Dockerfile
Normal file
21
pkg/e2e/fixtures/image-volume-recreate/Dockerfile
Normal file
@@ -0,0 +1,21 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
#
|
||||
# Copyright 2020 Docker Compose CLI authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
ARG CONTENT=initial
|
||||
RUN echo "$CONTENT" > /app/content.txt
|
||||
18
pkg/e2e/fixtures/image-volume-recreate/compose.yaml
Normal file
18
pkg/e2e/fixtures/image-volume-recreate/compose.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
services:
|
||||
source:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: image-volume-source
|
||||
|
||||
consumer:
|
||||
image: alpine
|
||||
depends_on:
|
||||
- source
|
||||
command: ["cat", "/data/content.txt"]
|
||||
volumes:
|
||||
- type: image
|
||||
source: image-volume-source
|
||||
target: /data
|
||||
image:
|
||||
subpath: app
|
||||
@@ -212,11 +212,9 @@ func TestNetworkRecreate(t *testing.T) {
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/network-recreate/compose.yaml", "--project-name", projectName, "--progress=plain", "up", "-d")
|
||||
err := res.Stderr()
|
||||
fmt.Println(err)
|
||||
res.Assert(t, icmd.Expected{Err: `
|
||||
Container network_recreate-web-1 Stopped
|
||||
Network network_recreate_test Removed
|
||||
Network network_recreate_test Creating
|
||||
Network network_recreate_test Created
|
||||
Container network_recreate-web-1 Starting
|
||||
Container network_recreate-web-1 Started`})
|
||||
hasStopped := strings.Contains(err, "Stopped")
|
||||
hasResumed := strings.Contains(err, "Started") || strings.Contains(err, "Recreated")
|
||||
if !hasStopped || !hasResumed {
|
||||
t.Fatalf("unexpected output, missing expected events, stderr: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,3 +190,47 @@ func TestImageVolume(t *testing.T) {
|
||||
out := res.Combined()
|
||||
assert.Check(t, strings.Contains(out, "index.html"))
|
||||
}
|
||||
|
||||
func TestImageVolumeRecreateOnRebuild(t *testing.T) {
|
||||
c := NewCLI(t)
|
||||
const projectName = "compose-e2e-image-volume-recreate"
|
||||
t.Cleanup(func() {
|
||||
c.cleanupWithDown(t, projectName)
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "image-volume-source")
|
||||
})
|
||||
|
||||
version := c.RunDockerCmd(t, "version", "-f", "{{.Server.Version}}")
|
||||
major, _, found := strings.Cut(version.Combined(), ".")
|
||||
assert.Assert(t, found)
|
||||
if major == "26" || major == "27" {
|
||||
t.Skip("Skipping test due to docker version < 28")
|
||||
}
|
||||
|
||||
// First build and run with initial content
|
||||
c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "build", "--build-arg", "CONTENT=foo")
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "up", "-d")
|
||||
assert.Check(t, !strings.Contains(res.Combined(), "error"))
|
||||
|
||||
// Check initial content
|
||||
res = c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "logs", "consumer")
|
||||
assert.Check(t, strings.Contains(res.Combined(), "foo"), "Expected 'foo' in output, got: %s", res.Combined())
|
||||
|
||||
// Rebuild source image with different content
|
||||
c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "build", "--build-arg", "CONTENT=bar")
|
||||
|
||||
// Run up again - consumer should be recreated because source image changed
|
||||
res = c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "up", "-d")
|
||||
// The consumer container should be recreated
|
||||
assert.Check(t, strings.Contains(res.Combined(), "Recreate") || strings.Contains(res.Combined(), "Created"),
|
||||
"Expected container to be recreated, got: %s", res.Combined())
|
||||
|
||||
// Check updated content
|
||||
res = c.RunDockerComposeCmd(t, "-f", "./fixtures/image-volume-recreate/compose.yaml",
|
||||
"--project-name", projectName, "logs", "consumer")
|
||||
assert.Check(t, strings.Contains(res.Combined(), "bar"), "Expected 'bar' in output after rebuild, got: %s", res.Combined())
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsevents"
|
||||
@@ -38,6 +39,7 @@ type fseventNotify struct {
|
||||
stop chan struct{}
|
||||
|
||||
pathsWereWatching map[string]any
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
func (d *fseventNotify) loop() {
|
||||
@@ -81,6 +83,8 @@ func (d *fseventNotify) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.closeOnce = sync.Once{}
|
||||
|
||||
numberOfWatches.Add(int64(len(d.stream.Paths)))
|
||||
|
||||
err := d.stream.Start()
|
||||
@@ -92,11 +96,13 @@ func (d *fseventNotify) Start() error {
|
||||
}
|
||||
|
||||
func (d *fseventNotify) Close() error {
|
||||
numberOfWatches.Add(int64(-len(d.stream.Paths)))
|
||||
d.closeOnce.Do(func() {
|
||||
numberOfWatches.Add(int64(-len(d.stream.Paths)))
|
||||
|
||||
d.stream.Stop()
|
||||
close(d.errors)
|
||||
close(d.stop)
|
||||
d.stream.Stop()
|
||||
close(d.errors)
|
||||
close(d.stop)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
48
pkg/watch/watcher_darwin_test.go
Normal file
48
pkg/watch/watcher_darwin_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
//go:build fsnotify
|
||||
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestFseventNotifyCloseIdempotent(t *testing.T) {
|
||||
// Create a watcher with a temporary directory
|
||||
tmpDir := t.TempDir()
|
||||
watcher, err := newWatcher([]string{tmpDir})
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Start the watcher
|
||||
err = watcher.Start()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Close should work the first time
|
||||
err = watcher.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Close should be idempotent - calling it again should not panic
|
||||
err = watcher.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Even a third time should be safe
|
||||
err = watcher.Close()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
Reference in New Issue
Block a user