mirror of
https://github.com/docker/compose.git
synced 2026-02-10 02:29:25 +08:00
Compare commits
64 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00bd108aec | ||
|
|
792afb8d13 | ||
|
|
150449bbd2 | ||
|
|
8d0df18762 | ||
|
|
5b53f8e47f | ||
|
|
c5fef61383 | ||
|
|
ce3cb2b00c | ||
|
|
d9e73db8e6 | ||
|
|
d6b4d1c755 | ||
|
|
0baf24a269 | ||
|
|
0511b0c2b8 | ||
|
|
5bbdf3d84a | ||
|
|
52103cce74 | ||
|
|
020b57ca31 | ||
|
|
bfa54081d4 | ||
|
|
0be8e4a676 | ||
|
|
fd8ab2f7ac | ||
|
|
b406b393bf | ||
|
|
0a9d1277c5 | ||
|
|
c350f80d4b | ||
|
|
8a4095b507 | ||
|
|
0345461412 | ||
|
|
80856eacaf | ||
|
|
d7b1972d5e | ||
|
|
7c42776770 | ||
|
|
3b0742fd57 | ||
|
|
efd44de1b7 | ||
|
|
bdb3f91eb4 | ||
|
|
f94cb49062 | ||
|
|
e7ed070690 | ||
|
|
8a1bf5d28b | ||
|
|
7ef392004f | ||
|
|
f34f5b4d26 | ||
|
|
b0484700da | ||
|
|
f65fd02383 | ||
|
|
cf8dc46560 | ||
|
|
2cfbe63533 | ||
|
|
8318f66330 | ||
|
|
cb17c3c8a6 | ||
|
|
9174a99d27 | ||
|
|
4eb43c53fa | ||
|
|
150b88ab5d | ||
|
|
5159058c7e | ||
|
|
1ae191a936 | ||
|
|
3b2f3cdce3 | ||
|
|
47778f8b77 | ||
|
|
7d88edaf24 | ||
|
|
636c13f818 | ||
|
|
5a072b1ad5 | ||
|
|
ddceb1ac9d | ||
|
|
d48f28c72c | ||
|
|
2d16a05afa | ||
|
|
bb94ea034e | ||
|
|
0938c7e96f | ||
|
|
f429ee958a | ||
|
|
e9ded2c518 | ||
|
|
54e6e0bd8f | ||
|
|
3bc871e64b | ||
|
|
6ff15d9472 | ||
|
|
49bc0603e3 | ||
|
|
ce8a09b53f | ||
|
|
3dc8734897 | ||
|
|
852e192820 | ||
|
|
d9e7859664 |
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -7,7 +7,7 @@ concurrency:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v2'
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
2
.github/workflows/merge.yml
vendored
2
.github/workflows/merge.yml
vendored
@@ -7,7 +7,7 @@ concurrency:
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'v2'
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
|
||||
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '44 9 * * 4'
|
||||
push:
|
||||
branches: [ "v2" ]
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GO_VERSION=1.20.5
|
||||
ARG GO_VERSION=1.21.0
|
||||
ARG XX_VERSION=1.2.1
|
||||
ARG GOLANGCI_LINT_VERSION=v1.53.2
|
||||
ARG ADDLICENSE_VERSION=v1.0.0
|
||||
|
||||
16
README.md
16
README.md
@@ -6,13 +6,14 @@
|
||||
+ [Linux](#linux)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Contributing](#contributing)
|
||||
- [Legacy](#legacy)
|
||||
# Docker Compose v2
|
||||
|
||||
[](https://github.com/docker/compose/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/docker/compose/v2)
|
||||
[](https://github.com/docker/compose/actions?query=workflow%3Aci)
|
||||
[](https://github.com/docker/compose/actions?query=workflow%3Aci)
|
||||
[](https://goreportcard.com/report/github.com/docker/compose/v2)
|
||||
[](https://codecov.io/gh/docker/compose)
|
||||
[](https://codecov.io/gh/docker/compose)
|
||||
[](https://api.securityscorecards.dev/projects/github.com/docker/compose)
|
||||

|
||||
|
||||
@@ -23,12 +24,6 @@ your application are configured.
|
||||
Once you have a Compose file, you can create and start your application with a
|
||||
single command: `docker compose up`.
|
||||
|
||||
# About update and backward compatibility
|
||||
|
||||
Docker Compose V2 is a major version bump release of Docker Compose. It has been completely rewritten from scratch in Golang (V1 was in Python). The installation instructions for Compose V2 differ from V1. V2 is not a standalone binary anymore, and installation scripts will have to be adjusted. Some commands are different.
|
||||
|
||||
For a smooth transition from legacy docker-compose 1.xx, please consider installing [compose-switch](https://github.com/docker/compose-switch) to translate `docker-compose ...` commands into Compose V2's `docker compose .... `. Also check V2's `--compatibility` flag.
|
||||
|
||||
# Where to get Docker Compose
|
||||
|
||||
### Windows and macOS
|
||||
@@ -85,3 +80,8 @@ Want to help develop Docker Compose? Check out our
|
||||
|
||||
If you find an issue, please report it on the
|
||||
[issue tracker](https://github.com/docker/compose/issues/new/choose).
|
||||
|
||||
Legacy
|
||||
-------------
|
||||
|
||||
The Python version of Compose is available under the `v1` [branch](https://github.com/docker/compose/tree/v1)
|
||||
@@ -32,6 +32,7 @@ func alphaCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
cmd.AddCommand(
|
||||
watchCommand(p, backend),
|
||||
vizCommand(p, backend),
|
||||
publishCommand(p, backend),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package compose
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
@@ -65,3 +66,23 @@ func completeProjectNames(backend api.Service) func(cmd *cobra.Command, args []s
|
||||
return values, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
func completeProfileNames(p *ProjectOptions) validArgsFn {
|
||||
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
project, err := p.ToProject(nil)
|
||||
if err != nil {
|
||||
return nil, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
|
||||
allProfileNames := project.AllServices().GetProfiles()
|
||||
sort.Strings(allProfileNames)
|
||||
|
||||
var values []string
|
||||
for _, profileName := range allProfileNames {
|
||||
if strings.HasPrefix(profileName, toComplete) {
|
||||
values = append(values, profileName)
|
||||
}
|
||||
}
|
||||
return values, cobra.ShellCompDirectiveNoFileComp
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,12 +442,22 @@ func RootCommand(streams command.Cli, backend api.Service) *cobra.Command { //no
|
||||
"project-name",
|
||||
completeProjectNames(backend),
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"project-directory",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{}, cobra.ShellCompDirectiveFilterDirs
|
||||
},
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return []string{"yaml", "yml"}, cobra.ShellCompDirectiveFilterFileExt
|
||||
},
|
||||
)
|
||||
c.RegisterFlagCompletionFunc( //nolint:errcheck
|
||||
"profile",
|
||||
completeProfileNames(&opts),
|
||||
)
|
||||
|
||||
c.Flags().StringVar(&progress, "progress", buildx.PrinterModeAuto, fmt.Sprintf(`Set type of progress output (%s)`, strings.Join(printerModes, ", ")))
|
||||
|
||||
|
||||
@@ -233,11 +233,7 @@ func runConfigImages(streams api.Streams, opts configOptions, services []string)
|
||||
return err
|
||||
}
|
||||
for _, s := range project.Services {
|
||||
if s.Image != "" {
|
||||
fmt.Fprintln(streams.Out(), s.Image)
|
||||
} else {
|
||||
fmt.Fprintf(streams.Out(), "%s%s%s\n", project.Name, api.Separator, s.Name)
|
||||
}
|
||||
fmt.Fprintln(streams.Out(), api.GetImageNameOrDefault(s, project.Name))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
49
cmd/compose/publish.go
Normal file
49
cmd/compose/publish.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
)
|
||||
|
||||
func publishCommand(p *ProjectOptions, backend api.Service) *cobra.Command {
|
||||
opts := pushOptions{
|
||||
ProjectOptions: p,
|
||||
}
|
||||
publishCmd := &cobra.Command{
|
||||
Use: "publish [OPTIONS] [REPOSITORY]",
|
||||
Short: "Publish compose application",
|
||||
RunE: Adapt(func(ctx context.Context, args []string) error {
|
||||
return runPublish(ctx, backend, opts, args[0])
|
||||
}),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
return publishCmd
|
||||
}
|
||||
|
||||
func runPublish(ctx context.Context, backend api.Service, opts pushOptions, repository string) error {
|
||||
project, err := opts.ToProject(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return backend.Publish(ctx, project, repository)
|
||||
}
|
||||
14
docs/reference/compose_alpha_publish.md
Normal file
14
docs/reference/compose_alpha_publish.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# docker compose alpha publish
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Publish compose application
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------|:-----|:--------|:--------------------------------|
|
||||
| `--dry-run` | | | Execute command in dry run mode |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
@@ -4,9 +4,11 @@ long: Experimental commands
|
||||
pname: docker compose
|
||||
plink: docker_compose.yaml
|
||||
cname:
|
||||
- docker compose alpha publish
|
||||
- docker compose alpha viz
|
||||
- docker compose alpha watch
|
||||
clink:
|
||||
- docker_compose_alpha_publish.yaml
|
||||
- docker_compose_alpha_viz.yaml
|
||||
- docker_compose_alpha_watch.yaml
|
||||
inherited_options:
|
||||
|
||||
24
docs/reference/docker_compose_alpha_publish.yaml
Normal file
24
docs/reference/docker_compose_alpha_publish.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
command: docker compose alpha publish
|
||||
short: Publish compose application
|
||||
long: Publish compose application
|
||||
usage: docker compose alpha publish [OPTIONS] [REPOSITORY]
|
||||
pname: docker compose alpha
|
||||
plink: docker_compose_alpha.yaml
|
||||
inherited_options:
|
||||
- option: dry-run
|
||||
value_type: bool
|
||||
default_value: "false"
|
||||
description: Execute command in dry run mode
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: false
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
deprecated: false
|
||||
hidden: false
|
||||
experimental: false
|
||||
experimentalcli: true
|
||||
kubernetes: false
|
||||
swarm: false
|
||||
|
||||
@@ -6,7 +6,7 @@ Background:
|
||||
services:
|
||||
should_fail:
|
||||
image: alpine
|
||||
command: ls /does_not_exist
|
||||
command: ['sh', '-c', 'exit 123']
|
||||
sleep: # will be killed
|
||||
image: alpine
|
||||
command: ping localhost
|
||||
@@ -15,15 +15,22 @@ Background:
|
||||
|
||||
Scenario: Cascade stop
|
||||
When I run "compose up --abort-on-container-exit"
|
||||
Then the output contains "should_fail-1 exited with code 1"
|
||||
Then the output contains "should_fail-1 exited with code 123"
|
||||
And the output contains "Aborting on container exit..."
|
||||
And the exit code is 1
|
||||
And the exit code is 123
|
||||
|
||||
Scenario: Exit code from
|
||||
When I run "compose up --exit-code-from sleep"
|
||||
Then the output contains "should_fail-1 exited with code 1"
|
||||
When I run "compose up --exit-code-from should_fail"
|
||||
Then the output contains "should_fail-1 exited with code 123"
|
||||
And the output contains "Aborting on container exit..."
|
||||
And the exit code is 143
|
||||
And the exit code is 123
|
||||
|
||||
# TODO: this is currently not working propagating the exit code properly
|
||||
#Scenario: Exit code from (cascade stop)
|
||||
# When I run "compose up --exit-code-from sleep"
|
||||
# Then the output contains "should_fail-1 exited with code 123"
|
||||
# And the output contains "Aborting on container exit..."
|
||||
# And the exit code is 143
|
||||
|
||||
Scenario: Exit code from unknown service
|
||||
When I run "compose up --exit-code-from unknown"
|
||||
|
||||
26
go.mod
26
go.mod
@@ -1,20 +1,20 @@
|
||||
module github.com/docker/compose/v2
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/Microsoft/go-winio v0.6.1
|
||||
github.com/buger/goterm v1.0.4
|
||||
github.com/compose-spec/compose-go v1.16.0
|
||||
github.com/compose-spec/compose-go v1.18.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.7.2
|
||||
github.com/containerd/containerd v1.7.3
|
||||
github.com/cucumber/godog v0.0.0-00010101000000-000000000000 // replaced; see replace for the actual version used
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230601133803-97b1d649c493
|
||||
github.com/docker/buildx v0.11.1
|
||||
github.com/docker/cli v24.0.2+incompatible
|
||||
github.com/docker/buildx v0.11.2
|
||||
github.com/docker/cli v24.0.5+incompatible
|
||||
github.com/docker/cli-docs-tool v0.6.0
|
||||
github.com/docker/docker v24.0.4+incompatible
|
||||
github.com/docker/docker v24.0.5+incompatible // v24.0.5-dev
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/fsnotify/fsevents v0.1.1
|
||||
@@ -24,7 +24,7 @@ require (
|
||||
github.com/jonboulle/clockwork v0.4.0
|
||||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/moby/buildkit v0.11.0-rc3.0.20230609092854-67a08623b95a
|
||||
github.com/moby/buildkit v0.12.1 // v0.12 release branch
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/morikuni/aec v1.0.0
|
||||
@@ -44,7 +44,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.14.0
|
||||
go.uber.org/goleak v1.2.1
|
||||
golang.org/x/sync v0.3.0
|
||||
google.golang.org/grpc v1.56.2
|
||||
google.golang.org/grpc v1.57.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.5.0
|
||||
)
|
||||
@@ -71,7 +71,6 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.4 // indirect
|
||||
github.com/containerd/continuity v0.4.1 // indirect
|
||||
github.com/containerd/ttrpc v1.2.2 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cucumber/gherkin-go/v19 v19.0.3 // indirect
|
||||
github.com/cucumber/messages-go/v16 v16.0.1 // indirect
|
||||
@@ -148,7 +147,7 @@ require (
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230407161946-9e7a6df48576 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
@@ -163,7 +162,8 @@ require (
|
||||
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
@@ -172,7 +172,9 @@ require (
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
||||
84
go.sum
84
go.sum
@@ -26,7 +26,9 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -43,6 +45,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 h1:+vTEFqeoeur6XSq06bs+roX3YiT49gUniJK7Zky7Xjg=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
@@ -54,19 +57,23 @@ github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYr
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w=
|
||||
@@ -107,6 +114,7 @@ github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqR
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=
|
||||
github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -130,20 +138,26 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/compose-spec/compose-go v1.16.0 h1:HYk4uYWXgArHh6NG+WE4yGYayOXw+hjqJ+eJxpjWWjk=
|
||||
github.com/compose-spec/compose-go v1.16.0/go.mod h1:3yngGBGfls6FHGQsg4B1z6gz8ej9SOvmAJtxCwgbcnc=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go v1.18.1 h1:YVYYkV8fAHW/eCOgtqSe1tHrlaDVvwS8zgs6F5ukm/Y=
|
||||
github.com/compose-spec/compose-go v1.18.1/go.mod h1:zR2tP1+kZHi5vJz7PjpW6oMoDji/Js3GHjP+hfjf70Q=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
||||
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
||||
github.com/containerd/containerd v1.7.3 h1:cKwYKkP1eTj54bP3wCdXXBymmKRQMrWjkLSWZZJDa8o=
|
||||
github.com/containerd/containerd v1.7.3/go.mod h1:32FOM4/O0RkNg7AjQj3hDzN9cUGtu+HMvaKUNiqCZB8=
|
||||
github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU=
|
||||
github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/nydus-snapshotter v0.8.2 h1:7SOrMU2YmLzfbsr5J7liMZJlNi5WT6vtIOxLGv+iz7E=
|
||||
github.com/containerd/nydus-snapshotter v0.8.2/go.mod h1:UJILTN5LVBRY+dt8BGJbp72Xy729hUZsOugObEI3/O8=
|
||||
github.com/containerd/stargz-snapshotter v0.14.3 h1:OTUVZoPSPs8mGgmQUE1dqw3WX/3nrsmsurW7UPLWl1U=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
|
||||
github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=
|
||||
github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
|
||||
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
|
||||
@@ -153,6 +167,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cucumber/gherkin-go/v19 v19.0.3 h1:mMSKu1077ffLbTJULUfM5HPokgeBcIGboyeNUof1MdE=
|
||||
github.com/cucumber/gherkin-go/v19 v19.0.3/go.mod h1:jY/NP6jUtRSArQQJ5h1FXOUgk5fZK24qtE7vKi776Vw=
|
||||
github.com/cucumber/messages-go/v16 v16.0.0/go.mod h1:EJcyR5Mm5ZuDsKJnT2N9KRnBK30BGjtYotDKpwQ0v6g=
|
||||
@@ -167,17 +182,17 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zA
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230601133803-97b1d649c493 h1:fm5DpBD+A7o0+x9Nf+o9/4/qPGbfxLpr9qIPVuV8vQc=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20230601133803-97b1d649c493/go.mod h1:+fqBJ4vPYo4Uu1ZE4d+bUtTLRXfdSL3NvCZIZ9GHv58=
|
||||
github.com/docker/buildx v0.11.1 h1:xfmrAkOJrN+NLRcwhZn1iBgJVAK1dEBEv8lWu1Wxg14=
|
||||
github.com/docker/buildx v0.11.1/go.mod h1:qAxs3bsJEfVo7DOc9riES/f9Z187CeGM5nLPmadk8AA=
|
||||
github.com/docker/cli v24.0.2+incompatible h1:QdqR7znue1mtkXIJ+ruQMGQhpw2JzMJLRXp6zpzF6tM=
|
||||
github.com/docker/cli v24.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/buildx v0.11.2 h1:R3p9F0gnI4FwvQ0p40UwdX1T4ugap4UWxY3TFHoP4Ws=
|
||||
github.com/docker/buildx v0.11.2/go.mod h1:CWAABt10iIuGpleypA3103mplDfcGu0A2AvT03xfpTc=
|
||||
github.com/docker/cli v24.0.5+incompatible h1:WeBimjvS0eKdH4Ygx+ihVq1Q++xg36M/rMi4aXAvodc=
|
||||
github.com/docker/cli v24.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
|
||||
github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI=
|
||||
github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
|
||||
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
@@ -185,6 +200,7 @@ github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
@@ -198,6 +214,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=
|
||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -209,6 +226,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
@@ -216,6 +234,7 @@ github.com/fsnotify/fsevents v0.1.1 h1:/125uxJvvoSDDBPen6yUZbil8J9ydKZnnl3TWWmvn
|
||||
github.com/fsnotify/fsevents v0.1.1/go.mod h1:+d+hS27T6k5J8CRaPLKFgwKYcpS7GwW3Ule9+SC2ZRc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
|
||||
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@@ -243,6 +262,7 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
@@ -260,10 +280,12 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
@@ -296,6 +318,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
|
||||
github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY=
|
||||
github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -333,6 +356,7 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
@@ -396,6 +420,7 @@ github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk=
|
||||
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -411,6 +436,7 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
||||
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@@ -465,8 +491,8 @@ github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.11.0-rc3.0.20230609092854-67a08623b95a h1:1k3bAXwxC2N1FncWijq/43sLj2OVIZ11FT0APIXWhMg=
|
||||
github.com/moby/buildkit v0.11.0-rc3.0.20230609092854-67a08623b95a/go.mod h1:4sM7BBBqXOQ+vV6LrVAOAMhZI9cVNYV5RhZCl906a64=
|
||||
github.com/moby/buildkit v0.12.1 h1:vvMG7EZYCiQZpTtXQkvyeyj7HzT1JHhDWj+/aiGIzLM=
|
||||
github.com/moby/buildkit v0.12.1/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||
@@ -502,10 +528,12 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
@@ -516,7 +544,9 @@ github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVn
|
||||
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8=
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
@@ -557,7 +587,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
@@ -579,7 +608,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
@@ -588,6 +616,7 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
|
||||
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
|
||||
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||
@@ -605,6 +634,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
|
||||
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -626,13 +656,14 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230407161946-9e7a6df48576 h1:fZXPQDVh5fm2x7pA0CH1TtH80tiZ0L7i834kZqZN8Pw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230407161946-9e7a6df48576/go.mod h1:q1CxMSzcAbjUkVGHoZeQUcCaALnaE4XdWk+zJcgMYFw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb h1:uUe8rNyVXM8moActoBol6Xf6xX2GMr7SosR2EywMvGg=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb/go.mod h1:SxX/oNQ/ag6Vaoli547ipFK9J7BZn5JqJG0JE8lf8bA=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 h1:Y/M5lygoNPKwVNLMPXgVfsRT40CSFKXCxuU8LoHySjs=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236 h1:vMJBP3PQViZsF6cOINtvyMC8ptpLsyJ4EwyFnzuWNxc=
|
||||
github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -653,6 +684,7 @@ github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54t
|
||||
github.com/zmap/zcrypto v0.0.0-20220605182715-4dfcec6e9a8c h1:ufDm/IlBYZYLuiqvQuhpTKwrcAS2OlXEzWbDvTVGbSQ=
|
||||
github.com/zmap/zcrypto v0.0.0-20220605182715-4dfcec6e9a8c/go.mod h1:egdRkzUylATvPkWMpebZbXhv0FMEMJGX/ur0D3Csk2s=
|
||||
github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0=
|
||||
github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
@@ -661,6 +693,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 h1:ZjF6qLnAVNq6xUh0sK2mCEqwnRrpgr0mLALQXJL34NI=
|
||||
@@ -716,6 +749,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -741,8 +776,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -863,7 +898,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
@@ -1015,8 +1049,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@@ -1039,8 +1077,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
107
internal/sync/docker_cp.go
Normal file
107
internal/sync/docker_cp.go
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ComposeClient interface {
|
||||
Exec(ctx context.Context, projectName string, options api.RunOptions) (int, error)
|
||||
|
||||
Copy(ctx context.Context, projectName string, options api.CopyOptions) error
|
||||
}
|
||||
|
||||
type DockerCopy struct {
|
||||
client ComposeClient
|
||||
|
||||
projectName string
|
||||
|
||||
infoWriter io.Writer
|
||||
}
|
||||
|
||||
var _ Syncer = &DockerCopy{}
|
||||
|
||||
func NewDockerCopy(projectName string, client ComposeClient, infoWriter io.Writer) *DockerCopy {
|
||||
return &DockerCopy{
|
||||
projectName: projectName,
|
||||
client: client,
|
||||
infoWriter: infoWriter,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DockerCopy) Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error {
|
||||
var errs []error
|
||||
for i := range paths {
|
||||
if err := d.sync(ctx, service, paths[i]); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (d *DockerCopy) sync(ctx context.Context, service types.ServiceConfig, pathMapping PathMapping) error {
|
||||
scale := 1
|
||||
if service.Deploy != nil && service.Deploy.Replicas != nil {
|
||||
scale = int(*service.Deploy.Replicas)
|
||||
}
|
||||
|
||||
if fi, statErr := os.Stat(pathMapping.HostPath); statErr == nil {
|
||||
if fi.IsDir() {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := d.client.Exec(ctx, d.projectName, api.RunOptions{
|
||||
Service: service.Name,
|
||||
Command: []string{"mkdir", "-p", pathMapping.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to create %q from %s: %v", pathMapping.ContainerPath, service.Name, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s created\n", pathMapping.ContainerPath)
|
||||
} else {
|
||||
err := d.client.Copy(ctx, d.projectName, api.CopyOptions{
|
||||
Source: pathMapping.HostPath,
|
||||
Destination: fmt.Sprintf("%s:%s", service.Name, pathMapping.ContainerPath),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s updated\n", pathMapping.ContainerPath)
|
||||
}
|
||||
} else if errors.Is(statErr, fs.ErrNotExist) {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := d.client.Exec(ctx, d.projectName, api.RunOptions{
|
||||
Service: service.Name,
|
||||
Command: []string{"rm", "-rf", pathMapping.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to delete %q from %s: %v", pathMapping.ContainerPath, service.Name, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(d.infoWriter, "%s deleted from service\n", pathMapping.ContainerPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
42
internal/sync/shared.go
Normal file
42
internal/sync/shared.go
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
)
|
||||
|
||||
// PathMapping contains the Compose service and modified host system path.
|
||||
type PathMapping struct {
|
||||
// HostPath that was created/modified/deleted outside the container.
|
||||
//
|
||||
// This is the path as seen from the user's perspective, e.g.
|
||||
// - C:\Users\moby\Documents\hello-world\main.go (file on Windows)
|
||||
// - /Users/moby/Documents/hello-world (directory on macOS)
|
||||
HostPath string
|
||||
// ContainerPath for the target file inside the container (only populated
|
||||
// for sync events, not rebuild).
|
||||
//
|
||||
// This is the path as used in Docker CLI commands, e.g.
|
||||
// - /workdir/main.go
|
||||
// - /workdir/subdir
|
||||
ContainerPath string
|
||||
}
|
||||
|
||||
type Syncer interface {
|
||||
Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error
|
||||
}
|
||||
354
internal/sync/tar.go
Normal file
354
internal/sync/tar.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2018 The Tilt Dev Authors
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
type archiveEntry struct {
|
||||
path string
|
||||
info os.FileInfo
|
||||
header *tar.Header
|
||||
}
|
||||
|
||||
type LowLevelClient interface {
|
||||
ContainersForService(ctx context.Context, projectName string, serviceName string) ([]moby.Container, error)
|
||||
|
||||
Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error
|
||||
}
|
||||
|
||||
type Tar struct {
|
||||
client LowLevelClient
|
||||
|
||||
projectName string
|
||||
}
|
||||
|
||||
var _ Syncer = &Tar{}
|
||||
|
||||
func NewTar(projectName string, client LowLevelClient) *Tar {
|
||||
return &Tar{
|
||||
projectName: projectName,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tar) Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error {
|
||||
containers, err := t.client.ContainersForService(ctx, t.projectName, service.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pathsToCopy []PathMapping
|
||||
var pathsToDelete []string
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p.HostPath); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
pathsToDelete = append(pathsToDelete, p.ContainerPath)
|
||||
} else {
|
||||
pathsToCopy = append(pathsToCopy, p)
|
||||
}
|
||||
}
|
||||
|
||||
var deleteCmd []string
|
||||
if len(pathsToDelete) != 0 {
|
||||
deleteCmd = append([]string{"rm", "-rf"}, pathsToDelete...)
|
||||
}
|
||||
copyCmd := []string{"tar", "-v", "-C", "/", "-x", "-f", "-"}
|
||||
|
||||
var eg multierror.Group
|
||||
writers := make([]*io.PipeWriter, len(containers))
|
||||
for i := range containers {
|
||||
containerID := containers[i].ID
|
||||
r, w := io.Pipe()
|
||||
writers[i] = w
|
||||
eg.Go(func() error {
|
||||
if len(deleteCmd) != 0 {
|
||||
if err := t.client.Exec(ctx, containerID, deleteCmd, nil); err != nil {
|
||||
return fmt.Errorf("deleting paths in %s: %w", containerID, err)
|
||||
}
|
||||
}
|
||||
if err := t.client.Exec(ctx, containerID, copyCmd, r); err != nil {
|
||||
return fmt.Errorf("copying files to %s: %w", containerID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
multiWriter := newLossyMultiWriter(writers...)
|
||||
tarReader := tarArchive(pathsToCopy)
|
||||
defer func() {
|
||||
_ = tarReader.Close()
|
||||
multiWriter.Close()
|
||||
}()
|
||||
_, err = io.Copy(multiWriter, tarReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
multiWriter.Close()
|
||||
|
||||
return eg.Wait().ErrorOrNil()
|
||||
}
|
||||
|
||||
type ArchiveBuilder struct {
|
||||
tw *tar.Writer
|
||||
// A shared I/O buffer to help with file copying.
|
||||
copyBuf *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewArchiveBuilder(writer io.Writer) *ArchiveBuilder {
|
||||
tw := tar.NewWriter(writer)
|
||||
return &ArchiveBuilder{
|
||||
tw: tw,
|
||||
copyBuf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArchiveBuilder) Close() error {
|
||||
return a.tw.Close()
|
||||
}
|
||||
|
||||
// ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist.
|
||||
func (a *ArchiveBuilder) ArchivePathsIfExist(paths []PathMapping) error {
|
||||
// In order to handle overlapping syncs, we
|
||||
// 1) collect all the entries,
|
||||
// 2) de-dupe them, with last-one-wins semantics
|
||||
// 3) write all the entries
|
||||
//
|
||||
// It's not obvious that this is the correct behavior. A better approach
|
||||
// (that's more in-line with how syncs work) might ignore files in earlier
|
||||
// path mappings when we know they're going to be "synced" over.
|
||||
// There's a bunch of subtle product decisions about how overlapping path
|
||||
// mappings work that we're not sure about.
|
||||
var entries []archiveEntry
|
||||
for _, p := range paths {
|
||||
newEntries, err := a.entriesForPath(p.HostPath, p.ContainerPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inspecting %q: %w", p.HostPath, err)
|
||||
}
|
||||
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
|
||||
entries = dedupeEntries(entries)
|
||||
for _, entry := range entries {
|
||||
err := a.writeEntry(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archiving %q: %w", entry.path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ArchiveBuilder) writeEntry(entry archiveEntry) error {
|
||||
pathInTar := entry.path
|
||||
header := entry.header
|
||||
|
||||
if header.Typeflag != tar.TypeReg {
|
||||
// anything other than a regular file (e.g. dir, symlink) just needs the header
|
||||
if err := a.tw.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("writing %q header: %w", pathInTar, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.Open(pathInTar)
|
||||
if err != nil {
|
||||
// In case the file has been deleted since we last looked at it.
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
// The size header must match the number of contents bytes.
|
||||
//
|
||||
// There is room for a race condition here if something writes to the file
|
||||
// after we've read the file size.
|
||||
//
|
||||
// For small files, we avoid this by first copying the file into a buffer,
|
||||
// and using the size of the buffer to populate the header.
|
||||
//
|
||||
// For larger files, we don't want to copy the whole thing into a buffer,
|
||||
// because that would blow up heap size. There is some danger that this
|
||||
// will lead to a spurious error when the tar writer validates the sizes.
|
||||
// That error will be disruptive but will be handled as best as we
|
||||
// can downstream.
|
||||
useBuf := header.Size < 5000000
|
||||
if useBuf {
|
||||
a.copyBuf.Reset()
|
||||
_, err = io.Copy(a.copyBuf, file)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("copying %q: %w", pathInTar, err)
|
||||
}
|
||||
header.Size = int64(len(a.copyBuf.Bytes()))
|
||||
}
|
||||
|
||||
// wait to write the header until _after_ the file is successfully opened
|
||||
// to avoid generating an invalid tar entry that has a header but no contents
|
||||
// in the case the file has been deleted
|
||||
err = a.tw.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing %q header: %w", pathInTar, err)
|
||||
}
|
||||
|
||||
if useBuf {
|
||||
_, err = io.Copy(a.tw, a.copyBuf)
|
||||
} else {
|
||||
_, err = io.Copy(a.tw, file)
|
||||
}
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("copying %q: %w", pathInTar, err)
|
||||
}
|
||||
|
||||
// explicitly flush so that if the entry is invalid we will detect it now and
|
||||
// provide a more meaningful error
|
||||
if err := a.tw.Flush(); err != nil {
|
||||
return fmt.Errorf("finalizing %q: %w", pathInTar, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tarPath writes the given source path into tarWriter at the given dest (recursively for directories).
|
||||
// e.g. tarring my_dir --> dest d: d/file_a, d/file_b
|
||||
// If source path does not exist, quietly skips it and returns no err
|
||||
func (a *ArchiveBuilder) entriesForPath(localPath, containerPath string) ([]archiveEntry, error) {
|
||||
localInfo, err := os.Stat(localPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localPathIsDir := localInfo.IsDir()
|
||||
if localPathIsDir {
|
||||
// Make sure we can trim this off filenames to get valid relative filepaths
|
||||
if !strings.HasSuffix(localPath, string(filepath.Separator)) {
|
||||
localPath += string(filepath.Separator)
|
||||
}
|
||||
}
|
||||
|
||||
containerPath = strings.TrimPrefix(containerPath, "/")
|
||||
|
||||
result := make([]archiveEntry, 0)
|
||||
err = filepath.Walk(localPath, func(curLocalPath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("walking %q: %w", curLocalPath, err)
|
||||
}
|
||||
|
||||
linkname := ""
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
var err error
|
||||
linkname, err = os.Readlink(curLocalPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var name string
|
||||
//nolint:gocritic
|
||||
if localPathIsDir {
|
||||
// Name of file in tar should be relative to source directory...
|
||||
tmp, err := filepath.Rel(localPath, curLocalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making %q relative to %q: %w", curLocalPath, localPath, err)
|
||||
}
|
||||
// ...and live inside `dest`
|
||||
name = path.Join(containerPath, filepath.ToSlash(tmp))
|
||||
} else if strings.HasSuffix(containerPath, "/") {
|
||||
name = containerPath + filepath.Base(curLocalPath)
|
||||
} else {
|
||||
name = containerPath
|
||||
}
|
||||
|
||||
header, err := archive.FileInfoHeader(name, info, linkname)
|
||||
if err != nil {
|
||||
// Not all types of files are allowed in a tarball. That's OK.
|
||||
// Mimic the Docker behavior and just skip the file.
|
||||
return nil
|
||||
}
|
||||
|
||||
result = append(result, archiveEntry{
|
||||
path: curLocalPath,
|
||||
info: info,
|
||||
header: header,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func tarArchive(ops []PathMapping) io.ReadCloser {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
ab := NewArchiveBuilder(pw)
|
||||
err := ab.ArchivePathsIfExist(ops)
|
||||
if err != nil {
|
||||
_ = pw.CloseWithError(fmt.Errorf("adding files to tar: %w", err))
|
||||
} else {
|
||||
// propagate errors from the TarWriter::Close() because it performs a final
|
||||
// Flush() and any errors mean the tar is invalid
|
||||
if err := ab.Close(); err != nil {
|
||||
_ = pw.CloseWithError(fmt.Errorf("closing tar: %w", err))
|
||||
} else {
|
||||
_ = pw.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
// Dedupe the entries with last-entry-wins semantics.
|
||||
func dedupeEntries(entries []archiveEntry) []archiveEntry {
|
||||
seenIndex := make(map[string]int, len(entries))
|
||||
result := make([]archiveEntry, 0, len(entries))
|
||||
for i, entry := range entries {
|
||||
seenIndex[entry.header.Name] = i
|
||||
}
|
||||
for i, entry := range entries {
|
||||
if seenIndex[entry.header.Name] == i {
|
||||
result = append(result, entry)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
91
internal/sync/writer.go
Normal file
91
internal/sync/writer.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// lossyMultiWriter attempts to tee all writes to the provided io.PipeWriter
|
||||
// instances.
|
||||
//
|
||||
// If a writer fails during a Write call, the write-side of the pipe is then
|
||||
// closed with the error and no subsequent attempts are made to write to the
|
||||
// pipe.
|
||||
//
|
||||
// If all writers fail during a write, an error is returned.
|
||||
//
|
||||
// On Close, any remaining writers are closed.
|
||||
type lossyMultiWriter struct {
|
||||
writers []*io.PipeWriter
|
||||
}
|
||||
|
||||
// newLossyMultiWriter creates a new writer that *attempts* to tee all data written to it to the provided io.PipeWriter
|
||||
// instances. Rather than failing a write operation if any writer fails, writes only fail if there are no more valid
|
||||
// writers. Otherwise, errors for specific writers are propagated via CloseWithError.
|
||||
func newLossyMultiWriter(writers ...*io.PipeWriter) *lossyMultiWriter {
|
||||
// reverse the writers because during the write we iterate
|
||||
// backwards, so this way we'll end up writing in the same
|
||||
// order as the writers were passed to us
|
||||
writers = append([]*io.PipeWriter(nil), writers...)
|
||||
for i, j := 0, len(writers)-1; i < j; i, j = i+1, j-1 {
|
||||
writers[i], writers[j] = writers[j], writers[i]
|
||||
}
|
||||
|
||||
return &lossyMultiWriter{
|
||||
writers: writers,
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to each writer that is still active (i.e. has not failed/encountered an error on write).
|
||||
//
|
||||
// If a writer encounters an error during the write, the write side of the pipe is closed with the error
|
||||
// and no subsequent attempts will be made to write to that writer.
|
||||
//
|
||||
// An error is only returned from this function if ALL writers have failed.
|
||||
func (l *lossyMultiWriter) Write(p []byte) (int, error) {
|
||||
// NOTE: this function iterates backwards so that it can
|
||||
// safely remove elements during the loop
|
||||
for i := len(l.writers) - 1; i >= 0; i-- {
|
||||
written, err := l.writers[i].Write(p)
|
||||
if err == nil && written != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
// pipe writer close cannot fail
|
||||
_ = l.writers[i].CloseWithError(err)
|
||||
l.writers = append(l.writers[:i], l.writers[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(l.writers) == 0 {
|
||||
return 0, errors.New("no writers remaining")
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Close closes any still open (non-failed) writers.
|
||||
//
|
||||
// Failed writers have already been closed with an error.
|
||||
func (l *lossyMultiWriter) Close() {
|
||||
for i := range l.writers {
|
||||
// pipe writer close cannot fail
|
||||
_ = l.writers[i].Close()
|
||||
}
|
||||
}
|
||||
152
internal/sync/writer_test.go
Normal file
152
internal/sync/writer_test.go
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLossyMultiWriter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
const count = 5
|
||||
readers := make([]*bufReader, count)
|
||||
writers := make([]*io.PipeWriter, count)
|
||||
for i := 0; i < count; i++ {
|
||||
r, w := io.Pipe()
|
||||
readers[i] = newBufReader(ctx, r)
|
||||
writers[i] = w
|
||||
}
|
||||
|
||||
w := newLossyMultiWriter(writers...)
|
||||
t.Cleanup(w.Close)
|
||||
n, err := w.Write([]byte("hello world"))
|
||||
require.Equal(t, 11, n)
|
||||
require.NoError(t, err)
|
||||
for i := range readers {
|
||||
readers[i].waitForWrite(t)
|
||||
require.Equal(t, "hello world", string(readers[i].contents()))
|
||||
readers[i].reset()
|
||||
}
|
||||
|
||||
// even if a writer fails (in this case simulated by closing the receiving end of the pipe),
|
||||
// write operations should continue to return nil error but the writer should be closed
|
||||
// with an error
|
||||
const failIndex = 3
|
||||
require.NoError(t, readers[failIndex].r.CloseWithError(errors.New("oh no")))
|
||||
n, err = w.Write([]byte("hello"))
|
||||
require.Equal(t, 5, n)
|
||||
require.NoError(t, err)
|
||||
for i := range readers {
|
||||
readers[i].waitForWrite(t)
|
||||
if i == failIndex {
|
||||
err := readers[i].error()
|
||||
require.EqualError(t, err, "io: read/write on closed pipe")
|
||||
require.Empty(t, readers[i].contents())
|
||||
} else {
|
||||
require.Equal(t, "hello", string(readers[i].contents()))
|
||||
}
|
||||
}
|
||||
|
||||
// perform another write, verify there's still no errors
|
||||
n, err = w.Write([]byte(" world"))
|
||||
require.Equal(t, 6, n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
ctx context.Context
|
||||
r *io.PipeReader
|
||||
mu sync.Mutex
|
||||
err error
|
||||
data []byte
|
||||
writeSync chan struct{}
|
||||
}
|
||||
|
||||
func newBufReader(ctx context.Context, r *io.PipeReader) *bufReader {
|
||||
b := &bufReader{
|
||||
ctx: ctx,
|
||||
r: r,
|
||||
writeSync: make(chan struct{}),
|
||||
}
|
||||
go b.consume()
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *bufReader) waitForWrite(t testing.TB) {
|
||||
t.Helper()
|
||||
select {
|
||||
case <-b.writeSync:
|
||||
return
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatal("timed out waiting for write")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bufReader) consume() {
|
||||
defer close(b.writeSync)
|
||||
for {
|
||||
buf := make([]byte, 512)
|
||||
n, err := b.r.Read(buf)
|
||||
if n != 0 {
|
||||
b.mu.Lock()
|
||||
b.data = append(b.data, buf[:n]...)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
b.mu.Lock()
|
||||
b.err = err
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// prevent goroutine leak, tie lifetime to the test
|
||||
select {
|
||||
case b.writeSync <- struct{}{}:
|
||||
case <-b.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bufReader) contents() []byte {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b *bufReader) reset() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.data = nil
|
||||
}
|
||||
|
||||
func (b *bufReader) error() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.err
|
||||
}
|
||||
152
internal/tracing/attributes.go
Normal file
152
internal/tracing/attributes.go
Normal file
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// SpanOptions is a small helper type to make it easy to share the options helpers between
|
||||
// downstream functions that accept slices of trace.SpanStartOption and trace.EventOption.
|
||||
type SpanOptions []trace.SpanStartEventOption
|
||||
|
||||
func (s SpanOptions) SpanStartOptions() []trace.SpanStartOption {
|
||||
out := make([]trace.SpanStartOption, len(s))
|
||||
for i := range s {
|
||||
out[i] = s[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s SpanOptions) EventOptions() []trace.EventOption {
|
||||
out := make([]trace.EventOption, len(s))
|
||||
for i := range s {
|
||||
out[i] = s[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ProjectOptions returns common attributes from a Compose project.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ProjectOptions(proj *types.Project) SpanOptions {
|
||||
if proj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
disabledServiceNames := make([]string, len(proj.DisabledServices))
|
||||
for i := range proj.DisabledServices {
|
||||
disabledServiceNames[i] = proj.DisabledServices[i].Name
|
||||
}
|
||||
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("project.name", proj.Name),
|
||||
attribute.String("project.dir", proj.WorkingDir),
|
||||
attribute.StringSlice("project.compose_files", proj.ComposeFiles),
|
||||
attribute.StringSlice("project.services.active", proj.ServiceNames()),
|
||||
attribute.StringSlice("project.services.disabled", disabledServiceNames),
|
||||
attribute.StringSlice("project.profiles", proj.Profiles),
|
||||
attribute.StringSlice("project.volumes", proj.VolumeNames()),
|
||||
attribute.StringSlice("project.networks", proj.NetworkNames()),
|
||||
attribute.StringSlice("project.secrets", proj.SecretNames()),
|
||||
attribute.StringSlice("project.configs", proj.ConfigNames()),
|
||||
attribute.StringSlice("project.extensions", keys(proj.Extensions)),
|
||||
}
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceOptions returns common attributes from a Compose service.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ServiceOptions(service types.ServiceConfig) SpanOptions {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("service.name", service.Name),
|
||||
attribute.String("service.image", service.Image),
|
||||
attribute.StringSlice("service.networks", keys(service.Networks)),
|
||||
}
|
||||
|
||||
configNames := make([]string, len(service.Configs))
|
||||
for i := range service.Configs {
|
||||
configNames[i] = service.Configs[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.configs", configNames))
|
||||
|
||||
secretNames := make([]string, len(service.Secrets))
|
||||
for i := range service.Secrets {
|
||||
secretNames[i] = service.Secrets[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.secrets", secretNames))
|
||||
|
||||
volNames := make([]string, len(service.Volumes))
|
||||
for i := range service.Volumes {
|
||||
volNames[i] = service.Volumes[i].Source
|
||||
}
|
||||
attrs = append(attrs, attribute.StringSlice("service.volumes", volNames))
|
||||
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerOptions returns common attributes from a Moby container.
|
||||
//
|
||||
// For convenience, it's returned as a SpanOptions object to allow it to be
|
||||
// passed directly to the wrapping helper methods in this package such as
|
||||
// SpanWrapFunc.
|
||||
func ContainerOptions(container moby.Container) SpanOptions {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("container.id", container.ID),
|
||||
attribute.String("container.image", container.Image),
|
||||
unixTimeAttr("container.created_at", container.Created),
|
||||
}
|
||||
|
||||
if len(container.Names) != 0 {
|
||||
attrs = append(attrs, attribute.String("container.name", strings.TrimPrefix(container.Names[0], "/")))
|
||||
}
|
||||
|
||||
return []trace.SpanStartEventOption{
|
||||
trace.WithAttributes(attrs...),
|
||||
}
|
||||
}
|
||||
|
||||
func keys[T any](m map[string]T) []string {
|
||||
out := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
out = append(out, k)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func timeAttr(key string, value time.Time) attribute.KeyValue {
|
||||
return attribute.String(key, value.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func unixTimeAttr(key string, value int64) attribute.KeyValue {
|
||||
return timeAttr(key, time.Unix(value, 0).UTC())
|
||||
}
|
||||
@@ -69,7 +69,6 @@ func traceClientFromDockerContext(dockerCli command.Cli, otelEnv envMap) (otlptr
|
||||
cfg.Endpoint,
|
||||
grpc.WithContextDialer(DialInMemory),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing otel connection from docker context metadata: %v", err)
|
||||
|
||||
91
internal/tracing/wrap.go
Normal file
91
internal/tracing/wrap.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// SpanWrapFunc wraps a function that takes a context with a trace.Span, marking the status as codes.Error if the
|
||||
// wrapped function returns an error.
|
||||
//
|
||||
// The context passed to the function is created from the span to ensure correct propagation.
|
||||
//
|
||||
// NOTE: This function is nearly identical to SpanWrapFuncForErrGroup, except the latter is designed specially for
|
||||
// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
|
||||
// adding even more levels of function wrapping/indirection.
|
||||
func SpanWrapFunc(spanName string, opts SpanOptions, fn func(ctx context.Context) error) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
ctx, span := Tracer.Start(ctx, spanName, opts.SpanStartOptions()...)
|
||||
defer span.End()
|
||||
|
||||
if err := fn(ctx); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Ok, "")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SpanWrapFuncForErrGroup wraps a function that takes a context with a trace.Span, marking the status as codes.Error
|
||||
// if the wrapped function returns an error.
|
||||
//
|
||||
// The context passed to the function is created from the span to ensure correct propagation.
|
||||
//
|
||||
// NOTE: This function is nearly identical to SpanWrapFunc, except this function is designed specially for
|
||||
// convenience with errgroup.Group due to its prevalence throughout the codebase. The code is duplicated to avoid
|
||||
// adding even more levels of function wrapping/indirection.
|
||||
func SpanWrapFuncForErrGroup(ctx context.Context, spanName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
|
||||
return func() error {
|
||||
ctx, span := Tracer.Start(ctx, spanName, opts.SpanStartOptions()...)
|
||||
defer span.End()
|
||||
|
||||
if err := fn(ctx); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Ok, "")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// EventWrapFuncForErrGroup invokes a function and records an event, optionally including the returned
|
||||
// error as the "exception message" on the event.
|
||||
//
|
||||
// This is intended for lightweight usage to wrap errgroup.Group calls where a full span is not desired.
|
||||
func EventWrapFuncForErrGroup(ctx context.Context, eventName string, opts SpanOptions, fn func(ctx context.Context) error) func() error {
|
||||
return func() error {
|
||||
span := trace.SpanFromContext(ctx)
|
||||
eventOpts := opts.EventOptions()
|
||||
|
||||
err := fn(ctx)
|
||||
|
||||
if err != nil {
|
||||
eventOpts = append(eventOpts, trace.WithAttributes(semconv.ExceptionMessage(err.Error())))
|
||||
}
|
||||
span.AddEvent(eventName, eventOpts...)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,7 @@ type Service interface {
|
||||
Ps(ctx context.Context, projectName string, options PsOptions) ([]ContainerSummary, error)
|
||||
// List executes the equivalent to a `docker stack ls`
|
||||
List(ctx context.Context, options ListOptions) ([]Stack, error)
|
||||
// Convert translate compose model into backend's native format
|
||||
// Config executes the equivalent to a `compose config`
|
||||
Config(ctx context.Context, project *types.Project, options ConfigOptions) ([]byte, error)
|
||||
// Kill executes the equivalent to a `compose kill`
|
||||
Kill(ctx context.Context, projectName string, options KillOptions) error
|
||||
@@ -74,6 +74,8 @@ type Service interface {
|
||||
Events(ctx context.Context, projectName string, options EventsOptions) error
|
||||
// Port executes the equivalent to a `compose port`
|
||||
Port(ctx context.Context, projectName string, service string, port uint16, options PortOptions) (string, int, error)
|
||||
// Publish executes the equivalent to a `compose publish`
|
||||
Publish(ctx context.Context, project *types.Project, repository string) error
|
||||
// Images executes the equivalent of a `compose images`
|
||||
Images(ctx context.Context, projectName string, options ImagesOptions) ([]ImageSummary, error)
|
||||
// MaxConcurrency defines upper limit for concurrent operations against engine API
|
||||
@@ -145,7 +147,6 @@ func (o BuildOptions) Apply(project *types.Project) error {
|
||||
if service.Build == nil {
|
||||
continue
|
||||
}
|
||||
service.Image = GetImageNameOrDefault(service, project.Name)
|
||||
if platform != "" {
|
||||
if len(service.Build.Platforms) > 0 && !utils.StringContains(service.Build.Platforms, platform) {
|
||||
return fmt.Errorf("service %q build.platforms does not support value set by DOCKER_DEFAULT_PLATFORM: %s", service.Name, platform)
|
||||
|
||||
@@ -55,6 +55,7 @@ type ServiceProxy struct {
|
||||
DryRunModeFn func(ctx context.Context, dryRun bool) (context.Context, error)
|
||||
VizFn func(ctx context.Context, project *types.Project, options VizOptions) (string, error)
|
||||
WaitFn func(ctx context.Context, projectName string, options WaitOptions) (int64, error)
|
||||
PublishFn func(ctx context.Context, project *types.Project, repository string) error
|
||||
interceptors []Interceptor
|
||||
}
|
||||
|
||||
@@ -91,6 +92,7 @@ func (s *ServiceProxy) WithService(service Service) *ServiceProxy {
|
||||
s.TopFn = service.Top
|
||||
s.EventsFn = service.Events
|
||||
s.PortFn = service.Port
|
||||
s.PublishFn = service.Publish
|
||||
s.ImagesFn = service.Images
|
||||
s.WatchFn = service.Watch
|
||||
s.MaxConcurrencyFn = service.MaxConcurrency
|
||||
@@ -311,6 +313,10 @@ func (s *ServiceProxy) Port(ctx context.Context, projectName string, service str
|
||||
return s.PortFn(ctx, projectName, service, port, options)
|
||||
}
|
||||
|
||||
func (s *ServiceProxy) Publish(ctx context.Context, project *types.Project, repository string) error {
|
||||
return s.PublishFn(ctx, project, repository)
|
||||
}
|
||||
|
||||
// Images implements Service interface
|
||||
func (s *ServiceProxy) Images(ctx context.Context, project string, options ImagesOptions) ([]ImageSummary, error) {
|
||||
if s.ImagesFn == nil {
|
||||
|
||||
@@ -22,15 +22,19 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/buildx/build"
|
||||
_ "github.com/docker/buildx/driver/docker" // required to get default driver registered
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
xprogress "github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||
bclient "github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -42,9 +46,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/progress"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
// required to get default driver registered
|
||||
_ "github.com/docker/buildx/driver/docker"
|
||||
)
|
||||
|
||||
func (s *composeService) Build(ctx context.Context, project *types.Project, options api.BuildOptions) error {
|
||||
@@ -58,26 +61,52 @@ func (s *composeService) Build(ctx context.Context, project *types.Project, opti
|
||||
}, s.stdinfo(), "Building")
|
||||
}
|
||||
|
||||
func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions) (map[string]string, error) { //nolint:gocyclo
|
||||
args := options.Args.Resolve(envResolver(project.Environment))
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) build(ctx context.Context, project *types.Project, options api.BuildOptions) (map[string]string, error) {
|
||||
buildkitEnabled, err := s.dockerCli.BuildKitEnabled()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Progress needs its own context that lives longer than the
|
||||
// build one otherwise it won't read all the messages from
|
||||
// build and will lock
|
||||
progressCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// Initialize buildkit nodes
|
||||
var (
|
||||
b *builder.Builder
|
||||
nodes []builder.Node
|
||||
w *xprogress.Printer
|
||||
)
|
||||
if buildkitEnabled {
|
||||
builderName := options.Builder
|
||||
if builderName == "" {
|
||||
builderName = os.Getenv("BUILDX_BUILDER")
|
||||
}
|
||||
b, err = builder.New(s.dockerCli, builder.WithName(builderName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w, err := xprogress.NewPrinter(progressCtx, s.stdout(), os.Stdout, options.Progress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
nodes, err = b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Progress needs its own context that lives longer than the
|
||||
// build one otherwise it won't read all the messages from
|
||||
// build and will lock
|
||||
progressCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, err = xprogress.NewPrinter(progressCtx, s.stdout(), os.Stdout, options.Progress,
|
||||
xprogress.WithDesc(
|
||||
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
|
||||
fmt.Sprintf("%s:%s", b.Driver, b.Name),
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
builtIDs := make([]string, len(project.Services))
|
||||
builtDigests := make([]string, len(project.Services))
|
||||
err = InDependencyOrder(ctx, project, func(ctx context.Context, name string) error {
|
||||
if len(options.Services) > 0 && !utils.Contains(options.Services, name) {
|
||||
return nil
|
||||
@@ -89,16 +118,11 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
}
|
||||
|
||||
if !buildkitEnabled {
|
||||
if service.Build.Args == nil {
|
||||
service.Build.Args = args
|
||||
} else {
|
||||
service.Build.Args = service.Build.Args.OverrideBy(args)
|
||||
}
|
||||
id, err := s.doBuildClassic(ctx, service, options)
|
||||
id, err := s.doBuildClassic(ctx, project, service, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builtIDs[idx] = id
|
||||
builtDigests[idx] = id
|
||||
|
||||
if options.Push {
|
||||
return s.push(ctx, project, api.PushOptions{})
|
||||
@@ -114,13 +138,12 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buildOptions.BuildArgs = mergeArgs(buildOptions.BuildArgs, flatten(args))
|
||||
|
||||
digest, err := s.doBuildBuildkit(ctx, service.Name, buildOptions, w, options.Builder)
|
||||
digest, err := s.doBuildBuildkit(ctx, service.Name, buildOptions, w, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builtIDs[idx] = digest
|
||||
builtDigests[idx] = digest
|
||||
|
||||
return nil
|
||||
}, func(traversal *graphTraversal) {
|
||||
@@ -128,8 +151,10 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
})
|
||||
|
||||
// enforce all build event get consumed
|
||||
if errw := w.Wait(); errw != nil {
|
||||
return nil, errw
|
||||
if buildkitEnabled {
|
||||
if errw := w.Wait(); errw != nil {
|
||||
return nil, errw
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -137,9 +162,10 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
|
||||
}
|
||||
|
||||
imageIDs := map[string]string{}
|
||||
for i, d := range builtIDs {
|
||||
if d != "" {
|
||||
imageIDs[project.Services[i].Image] = d
|
||||
for i, imageDigest := range builtDigests {
|
||||
if imageDigest != "" {
|
||||
imageRef := api.GetImageNameOrDefault(project.Services[i], project.Name)
|
||||
imageIDs[imageRef] = imageDigest
|
||||
}
|
||||
}
|
||||
return imageIDs, err
|
||||
@@ -169,7 +195,11 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.pullRequiredImages(ctx, project, images, quietPull)
|
||||
err = tracing.SpanWrapFunc("project/pull", tracing.ProjectOptions(project),
|
||||
func(ctx context.Context) error {
|
||||
return s.pullRequiredImages(ctx, project, images, quietPull)
|
||||
},
|
||||
)(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -185,16 +215,24 @@ func (s *composeService) ensureImagesExists(ctx context.Context, project *types.
|
||||
}
|
||||
|
||||
if buildRequired {
|
||||
builtImages, err := s.build(ctx, project, api.BuildOptions{
|
||||
Progress: mode,
|
||||
})
|
||||
err = tracing.SpanWrapFunc("project/build", tracing.ProjectOptions(project),
|
||||
func(ctx context.Context) error {
|
||||
builtImages, err := s.build(ctx, project, api.BuildOptions{
|
||||
Progress: mode,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, digest := range builtImages {
|
||||
images[name] = digest
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, digest := range builtImages {
|
||||
images[name] = digest
|
||||
}
|
||||
}
|
||||
|
||||
// set digest as com.docker.compose.image label so we can detect outdated containers
|
||||
@@ -222,7 +260,8 @@ func (s *composeService) prepareProjectForBuild(project *types.Project, images m
|
||||
continue
|
||||
}
|
||||
|
||||
_, localImagePresent := images[service.Image]
|
||||
image := api.GetImageNameOrDefault(service, project.Name)
|
||||
_, localImagePresent := images[image]
|
||||
if localImagePresent && service.PullPolicy != types.PullPolicyBuild {
|
||||
service.Build = nil
|
||||
project.Services[i] = service
|
||||
@@ -291,17 +330,38 @@ func (s *composeService) getLocalImagesDigests(ctx context.Context, project *typ
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, options api.BuildOptions) (build.Options, error) {
|
||||
tags := []string{service.Image}
|
||||
// resolveAndMergeBuildArgs returns the final set of build arguments to use for the service image build.
|
||||
//
|
||||
// First, args directly defined via `build.args` in YAML are considered.
|
||||
// Then, any explicitly passed args in opts (e.g. via `--build-arg` on the CLI) are merged, overwriting any
|
||||
// keys that already exist.
|
||||
// Next, any keys without a value are resolved using the project environment.
|
||||
//
|
||||
// Finally, standard proxy variables based on the Docker client configuration are added, but will not overwrite
|
||||
// any values if already present.
|
||||
func resolveAndMergeBuildArgs(
|
||||
dockerCli command.Cli,
|
||||
project *types.Project,
|
||||
service types.ServiceConfig,
|
||||
opts api.BuildOptions,
|
||||
) types.MappingWithEquals {
|
||||
result := make(types.MappingWithEquals).
|
||||
OverrideBy(service.Build.Args).
|
||||
OverrideBy(opts.Args).
|
||||
Resolve(envResolver(project.Environment))
|
||||
|
||||
buildArgs := flatten(service.Build.Args.Resolve(envResolver(project.Environment)))
|
||||
|
||||
for k, v := range storeutil.GetProxyConfig(s.dockerCli) {
|
||||
if _, ok := buildArgs[k]; !ok {
|
||||
buildArgs[k] = v
|
||||
// proxy arguments do NOT override and should NOT have env resolution applied,
|
||||
// so they're handled last
|
||||
for k, v := range storeutil.GetProxyConfig(dockerCli) {
|
||||
if _, ok := result[k]; !ok {
|
||||
v := v
|
||||
result[k] = &v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *composeService) toBuildOptions(project *types.Project, service types.ServiceConfig, options api.BuildOptions) (build.Options, error) {
|
||||
plats, err := addPlatforms(project, service)
|
||||
if err != nil {
|
||||
return build.Options{}, err
|
||||
@@ -335,6 +395,7 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
sessionConfig = append(sessionConfig, secretsProvider)
|
||||
}
|
||||
|
||||
tags := []string{api.GetImageNameOrDefault(service, project.Name)}
|
||||
if len(service.Build.Tags) > 0 {
|
||||
tags = append(tags, service.Build.Tags...)
|
||||
}
|
||||
@@ -345,18 +406,19 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
|
||||
imageLabels := getImageBuildLabels(project, service)
|
||||
|
||||
push := options.Push && service.Image != ""
|
||||
exports := []bclient.ExportEntry{{
|
||||
Type: "docker",
|
||||
Attrs: map[string]string{
|
||||
"load": "true",
|
||||
"push": fmt.Sprint(options.Push),
|
||||
"push": fmt.Sprint(push),
|
||||
},
|
||||
}}
|
||||
if len(service.Build.Platforms) > 1 {
|
||||
exports = []bclient.ExportEntry{{
|
||||
Type: "image",
|
||||
Attrs: map[string]string{
|
||||
"push": fmt.Sprint(options.Push),
|
||||
"push": fmt.Sprint(push),
|
||||
},
|
||||
}}
|
||||
}
|
||||
@@ -372,7 +434,7 @@ func (s *composeService) toBuildOptions(project *types.Project, service types.Se
|
||||
CacheTo: pb.CreateCaches(cacheTo),
|
||||
NoCache: service.Build.NoCache,
|
||||
Pull: service.Build.Pull,
|
||||
BuildArgs: buildArgs,
|
||||
BuildArgs: flatten(resolveAndMergeBuildArgs(s.dockerCli, project, service, options)),
|
||||
Tags: tags,
|
||||
Target: service.Build.Target,
|
||||
Exports: exports,
|
||||
@@ -399,16 +461,6 @@ func flatten(in types.MappingWithEquals) types.Mapping {
|
||||
return out
|
||||
}
|
||||
|
||||
func mergeArgs(m ...types.Mapping) types.Mapping {
|
||||
merged := types.Mapping{}
|
||||
for _, mapping := range m {
|
||||
for key, val := range mapping {
|
||||
merged[key] = val
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func dockerFilePath(ctxName string, dockerfile string) string {
|
||||
if dockerfile == "" {
|
||||
return ""
|
||||
|
||||
@@ -34,18 +34,11 @@ import (
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
func (s *composeService) doBuildBuildkit(ctx context.Context, service string, opts build.Options, p *buildx.Printer, builderName string) (string, error) {
|
||||
b, err := builder.New(s.dockerCli, builder.WithName(builderName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var response map[string]*client.SolveResponse
|
||||
func (s *composeService) doBuildBuildkit(ctx context.Context, service string, opts build.Options, p *buildx.Printer, nodes []builder.Node) (string, error) {
|
||||
var (
|
||||
response map[string]*client.SolveResponse
|
||||
err error
|
||||
)
|
||||
if s.dryRun {
|
||||
response = s.dryRunBuildResponse(ctx, service, opts)
|
||||
} else {
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
@@ -45,7 +47,7 @@ import (
|
||||
)
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) doBuildClassic(ctx context.Context, service types.ServiceConfig, options api.BuildOptions) (string, error) {
|
||||
func (s *composeService) doBuildClassic(ctx context.Context, project *types.Project, service types.ServiceConfig, options api.BuildOptions) (string, error) {
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
@@ -159,8 +161,9 @@ func (s *composeService) doBuildClassic(ctx context.Context, service types.Servi
|
||||
for k, auth := range creds {
|
||||
authConfigs[k] = registry.AuthConfig(auth)
|
||||
}
|
||||
buildOptions := imageBuildOptions(service.Build)
|
||||
buildOptions.Tags = append(buildOptions.Tags, service.Image)
|
||||
buildOptions := imageBuildOptions(s.dockerCli, project, service, options)
|
||||
imageName := api.GetImageNameOrDefault(service, project.Name)
|
||||
buildOptions.Tags = append(buildOptions.Tags, imageName)
|
||||
buildOptions.Dockerfile = relDockerfile
|
||||
buildOptions.AuthConfigs = authConfigs
|
||||
buildOptions.Memory = options.Memory
|
||||
@@ -214,14 +217,15 @@ func isLocalDir(c string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func imageBuildOptions(config *types.BuildConfig) dockertypes.ImageBuildOptions {
|
||||
func imageBuildOptions(dockerCli command.Cli, project *types.Project, service types.ServiceConfig, options api.BuildOptions) dockertypes.ImageBuildOptions {
|
||||
config := service.Build
|
||||
return dockertypes.ImageBuildOptions{
|
||||
Version: dockertypes.BuilderV1,
|
||||
Tags: config.Tags,
|
||||
NoCache: config.NoCache,
|
||||
Remove: true,
|
||||
PullParent: config.Pull,
|
||||
BuildArgs: config.Args,
|
||||
BuildArgs: resolveAndMergeBuildArgs(dockerCli, project, service, options),
|
||||
Labels: config.Labels,
|
||||
NetworkMode: config.Network,
|
||||
ExtraHosts: config.ExtraHosts.AsList(),
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
@@ -58,6 +60,7 @@ func init() {
|
||||
func NewComposeService(dockerCli command.Cli) api.Service {
|
||||
return &composeService{
|
||||
dockerCli: dockerCli,
|
||||
clock: clockwork.NewRealClock(),
|
||||
maxConcurrency: -1,
|
||||
dryRun: false,
|
||||
}
|
||||
@@ -65,6 +68,7 @@ func NewComposeService(dockerCli command.Cli) api.Service {
|
||||
|
||||
type composeService struct {
|
||||
dockerCli command.Cli
|
||||
clock clockwork.Clock
|
||||
maxConcurrency int
|
||||
dryRun bool
|
||||
}
|
||||
@@ -203,6 +207,7 @@ func (s *composeService) projectFromName(containers Containers, projectName stri
|
||||
condition := ServiceConditionRunningOrHealthy
|
||||
// Let's restart the dependency by default if we don't have the info stored in the label
|
||||
restart := true
|
||||
required := true
|
||||
dependency := dcArr[0]
|
||||
|
||||
// backward compatibility
|
||||
@@ -212,7 +217,7 @@ func (s *composeService) projectFromName(containers Containers, projectName stri
|
||||
restart, _ = strconv.ParseBool(dcArr[2])
|
||||
}
|
||||
}
|
||||
service.DependsOn[dependency] = types.ServiceDependency{Condition: condition, Restart: restart}
|
||||
service.DependsOn[dependency] = types.ServiceDependency{Condition: condition, Restart: restart, Required: required}
|
||||
}
|
||||
}
|
||||
project.Services = append(project.Services, *service)
|
||||
|
||||
@@ -25,8 +25,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
containerType "github.com/docker/docker/api/types/container"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -93,17 +97,19 @@ func (c *convergence) apply(ctx context.Context, project *types.Project, options
|
||||
return err
|
||||
}
|
||||
|
||||
strategy := options.RecreateDependencies
|
||||
if utils.StringContains(options.Services, name) {
|
||||
strategy = options.Recreate
|
||||
}
|
||||
err = c.ensureService(ctx, project, service, strategy, options.Inherit, options.Timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tracing.SpanWrapFunc("service/apply", tracing.ServiceOptions(service), func(ctx context.Context) error {
|
||||
strategy := options.RecreateDependencies
|
||||
if utils.StringContains(options.Services, name) {
|
||||
strategy = options.Recreate
|
||||
}
|
||||
err = c.ensureService(ctx, project, service, strategy, options.Inherit, options.Timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.updateProject(project, name)
|
||||
return nil
|
||||
c.updateProject(project, name)
|
||||
return nil
|
||||
})(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -179,7 +185,8 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
if i >= expected {
|
||||
// Scale Down
|
||||
container := container
|
||||
eg.Go(func() error {
|
||||
traceOpts := append(tracing.ServiceOptions(service), tracing.ContainerOptions(container)...)
|
||||
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "service/scale/down", traceOpts, func(ctx context.Context) error {
|
||||
timeoutInSecond := utils.DurationSecondToInt(timeout)
|
||||
err := c.service.apiClient().ContainerStop(ctx, container.ID, containerType.StopOptions{
|
||||
Timeout: timeoutInSecond,
|
||||
@@ -188,7 +195,7 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
return err
|
||||
}
|
||||
return c.service.apiClient().ContainerRemove(ctx, container.ID, moby.ContainerRemoveOptions{})
|
||||
})
|
||||
}))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -198,11 +205,11 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
}
|
||||
if mustRecreate {
|
||||
i, container := i, container
|
||||
eg.Go(func() error {
|
||||
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(container), func(ctx context.Context) error {
|
||||
recreated, err := c.service.recreateContainer(ctx, project, service, container, inherit, timeout)
|
||||
updated[i] = recreated
|
||||
return err
|
||||
})
|
||||
}))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -218,9 +225,9 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
w.Event(progress.CreatedEvent(name))
|
||||
default:
|
||||
container := container
|
||||
eg.Go(func() error {
|
||||
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/start", tracing.ContainerOptions(container), func(ctx context.Context) error {
|
||||
return c.service.startContainer(ctx, container)
|
||||
})
|
||||
}))
|
||||
}
|
||||
updated[i] = container
|
||||
}
|
||||
@@ -231,7 +238,8 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
number := next + i
|
||||
name := getContainerName(project.Name, service, number)
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))}
|
||||
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error {
|
||||
opts := createOptions{
|
||||
AutoRemove: false,
|
||||
AttachStdin: false,
|
||||
@@ -241,7 +249,7 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
||||
container, err := c.service.createContainer(ctx, project, service, name, number, opts)
|
||||
updated[actual+i] = container
|
||||
return err
|
||||
})
|
||||
}))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -286,9 +294,18 @@ func containerEvents(containers Containers, eventFunc func(string) progress.Even
|
||||
return events
|
||||
}
|
||||
|
||||
func containerReasonEvents(containers Containers, eventFunc func(string, string) progress.Event, reason string) []progress.Event {
|
||||
events := []progress.Event{}
|
||||
for _, container := range containers {
|
||||
events = append(events, eventFunc(getContainerProgressName(container), reason))
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// ServiceConditionRunningOrHealthy is a service condition on status running or healthy
|
||||
const ServiceConditionRunningOrHealthy = "running_or_healthy"
|
||||
|
||||
//nolint:gocyclo
|
||||
func (s *composeService) waitDependencies(ctx context.Context, project *types.Project, dependencies types.DependsOnConfig, containers Containers) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
w := progress.ContextWriter(ctx)
|
||||
@@ -307,11 +324,20 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
<-ticker.C
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
switch config.Condition {
|
||||
case ServiceConditionRunningOrHealthy:
|
||||
healthy, err := s.isServiceHealthy(ctx, waitingFor, true)
|
||||
if err != nil {
|
||||
if !config.Required {
|
||||
w.Events(containerReasonEvents(waitingFor, progress.SkippedEvent, fmt.Sprintf("optional dependency %q is not running or is unhealthy", dep)))
|
||||
logrus.Warnf("optional dependency %q is not running or is unhealthy: %s", dep, err.Error())
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if healthy {
|
||||
@@ -321,6 +347,11 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
|
||||
case types.ServiceConditionHealthy:
|
||||
healthy, err := s.isServiceHealthy(ctx, waitingFor, false)
|
||||
if err != nil {
|
||||
if !config.Required {
|
||||
w.Events(containerReasonEvents(waitingFor, progress.SkippedEvent, fmt.Sprintf("optional dependency %q failed to start", dep)))
|
||||
logrus.Warnf("optional dependency %q failed to start: %s", dep, err.Error())
|
||||
return nil
|
||||
}
|
||||
w.Events(containerEvents(waitingFor, progress.ErrorEvent))
|
||||
return errors.Wrap(err, "dependency failed to start")
|
||||
}
|
||||
@@ -334,11 +365,22 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
|
||||
return err
|
||||
}
|
||||
if exited {
|
||||
w.Events(containerEvents(waitingFor, progress.Exited))
|
||||
if code != 0 {
|
||||
return fmt.Errorf("service %q didn't complete successfully: exit %d", dep, code)
|
||||
if code == 0 {
|
||||
w.Events(containerEvents(waitingFor, progress.Exited))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
messageSuffix := fmt.Sprintf("%q didn't complete successfully: exit %d", dep, code)
|
||||
if !config.Required {
|
||||
// optional -> mark as skipped & don't propagate error
|
||||
w.Events(containerReasonEvents(waitingFor, progress.SkippedEvent, fmt.Sprintf("optional dependency %s", messageSuffix)))
|
||||
logrus.Warnf("optional dependency %s", messageSuffix)
|
||||
return nil
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("service %s", messageSuffix)
|
||||
w.Events(containerReasonEvents(waitingFor, progress.ErrorMessageEvent, msg))
|
||||
return errors.New(msg)
|
||||
}
|
||||
default:
|
||||
logrus.Warnf("unsupported depends_on condition: %s", config.Condition)
|
||||
|
||||
@@ -236,8 +236,8 @@ func TestWaitDependencies(t *testing.T) {
|
||||
redisService := types.ServiceConfig{Name: "redis", Scale: 1}
|
||||
project := types.Project{Name: strings.ToLower(testProject), Services: []types.ServiceConfig{dbService, redisService}}
|
||||
dependencies := types.DependsOnConfig{
|
||||
"db": {Condition: types.ServiceConditionStarted},
|
||||
"redis": {Condition: types.ServiceConditionStarted},
|
||||
"db": {Condition: types.ServiceConditionStarted, Required: true},
|
||||
"redis": {Condition: types.ServiceConditionStarted, Required: true},
|
||||
}
|
||||
assert.NilError(t, tested.waitDependencies(context.Background(), &project, dependencies, nil))
|
||||
})
|
||||
|
||||
@@ -139,6 +139,7 @@ func prepareVolumes(p *types.Project) error {
|
||||
p.Services[i].DependsOn[service.Name].Condition == "" {
|
||||
p.Services[i].DependsOn[service.Name] = types.ServiceDependency{
|
||||
Condition: types.ServiceConditionStarted,
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -862,10 +863,14 @@ func buildContainerConfigMounts(p types.Project, s types.ServiceConfig) ([]mount
|
||||
target := config.Target
|
||||
if config.Target == "" {
|
||||
target = configsBaseDir + config.Source
|
||||
} else if !isUnixAbs(config.Target) {
|
||||
} else if !isAbsTarget(config.Target) {
|
||||
target = configsBaseDir + config.Target
|
||||
}
|
||||
|
||||
if config.UID != "" || config.GID != "" || config.Mode != nil {
|
||||
logrus.Warn("config `uid`, `gid` and `mode` are not supported, they will be ignored")
|
||||
}
|
||||
|
||||
definedConfig := p.Configs[config.Source]
|
||||
if definedConfig.External.External {
|
||||
return nil, fmt.Errorf("unsupported external config %s", definedConfig.Name)
|
||||
@@ -897,10 +902,14 @@ func buildContainerSecretMounts(p types.Project, s types.ServiceConfig) ([]mount
|
||||
target := secret.Target
|
||||
if secret.Target == "" {
|
||||
target = secretsDir + secret.Source
|
||||
} else if !isUnixAbs(secret.Target) {
|
||||
} else if !isAbsTarget(secret.Target) {
|
||||
target = secretsDir + secret.Target
|
||||
}
|
||||
|
||||
if secret.UID != "" || secret.GID != "" || secret.Mode != nil {
|
||||
logrus.Warn("secrets `uid`, `gid` and `mode` are not supported, they will be ignored")
|
||||
}
|
||||
|
||||
definedSecret := p.Secrets[secret.Source]
|
||||
if definedSecret.External.External {
|
||||
return nil, fmt.Errorf("unsupported external secret %s", definedSecret.Name)
|
||||
@@ -928,10 +937,24 @@ func buildContainerSecretMounts(p types.Project, s types.ServiceConfig) ([]mount
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func isAbsTarget(p string) bool {
|
||||
return isUnixAbs(p) || isWindowsAbs(p)
|
||||
}
|
||||
|
||||
func isUnixAbs(p string) bool {
|
||||
return strings.HasPrefix(p, "/")
|
||||
}
|
||||
|
||||
func isWindowsAbs(p string) bool {
|
||||
if strings.HasPrefix(p, "\\\\") {
|
||||
return true
|
||||
}
|
||||
if len(p) > 2 && p[1] == ':' {
|
||||
return p[2] == '\\'
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func buildMount(project types.Project, volume types.ServiceVolumeConfig) (mount.Mount, error) {
|
||||
source := volume.Source
|
||||
// on windows, filepath.IsAbs(source) is false for unix style abs path like /var/run/docker.sock.
|
||||
|
||||
@@ -124,7 +124,7 @@ func TestPrepareVolumes(t *testing.T) {
|
||||
Name: "aService",
|
||||
VolumesFrom: []string{"anotherService"},
|
||||
DependsOn: map[string]composetypes.ServiceDependency{
|
||||
"anotherService": {Condition: composetypes.ServiceConditionHealthy},
|
||||
"anotherService": {Condition: composetypes.ServiceConditionHealthy, Required: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -268,10 +268,15 @@ func NewGraph(project *types.Project, initialStatus ServiceStatus) (*Graph, erro
|
||||
graph.AddVertex(s.Name, s.Name, initialStatus)
|
||||
}
|
||||
|
||||
for _, s := range project.Services {
|
||||
for index, s := range project.Services {
|
||||
for _, name := range s.GetDependencies() {
|
||||
err := graph.AddEdge(s.Name, name)
|
||||
if err != nil {
|
||||
if !s.DependsOn[name].Required {
|
||||
delete(s.DependsOn, name)
|
||||
project.Services[index] = s
|
||||
continue
|
||||
}
|
||||
if api.IsNotFoundError(err) {
|
||||
ds, err := project.GetDisabledService(name)
|
||||
if err == nil {
|
||||
|
||||
41
pkg/compose/publish.go
Normal file
41
pkg/compose/publish.go
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/distribution/distribution/v3/reference"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
)
|
||||
|
||||
func (s *composeService) Publish(ctx context.Context, project *types.Project, repository string) error {
|
||||
err := s.Push(ctx, project, api.PushOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = reference.ParseDockerRef(repository)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO publish project.ComposeFiles
|
||||
|
||||
return api.ErrNotImplemented
|
||||
}
|
||||
@@ -313,8 +313,15 @@ func isServiceImageToBuild(service types.ServiceConfig, services []types.Service
|
||||
return true
|
||||
}
|
||||
|
||||
for _, depService := range services {
|
||||
if depService.Image == service.Image && depService.Build != nil {
|
||||
if service.Image == "" {
|
||||
// N.B. this should be impossible as service must have either `build` or `image` (or both)
|
||||
return false
|
||||
}
|
||||
|
||||
// look through the other services to see if another has a build definition for the same
|
||||
// image name
|
||||
for _, svc := range services {
|
||||
if svc.Image == service.Image && svc.Build != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func (s *composeService) restart(ctx context.Context, projectName string, option
|
||||
}
|
||||
}
|
||||
|
||||
// ignore depends_on relations which are not impacted by restarting service
|
||||
// ignore depends_on relations which are not impacted by restarting service or not required
|
||||
for i, service := range project.Services {
|
||||
for name, r := range service.DependsOn {
|
||||
if !r.Restart {
|
||||
|
||||
@@ -18,6 +18,7 @@ package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -36,11 +37,6 @@ func (s *composeService) RunOneOffContainer(ctx context.Context, project *types.
|
||||
return 0, err
|
||||
}
|
||||
|
||||
start := cmd.NewStartOptions()
|
||||
start.OpenStdin = !opts.Detach && opts.Interactive
|
||||
start.Attach = !opts.Detach
|
||||
start.Containers = []string{containerID}
|
||||
|
||||
// remove cancellable context signal handler so we can forward signals to container without compose to exit
|
||||
signal.Reset()
|
||||
|
||||
@@ -49,9 +45,14 @@ func (s *composeService) RunOneOffContainer(ctx context.Context, project *types.
|
||||
go cmd.ForwardAllSignals(ctx, s.dockerCli, containerID, sigc)
|
||||
defer signal.Stop(sigc)
|
||||
|
||||
err = cmd.RunStart(s.dockerCli, &start)
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
return sterr.StatusCode, nil
|
||||
err = cmd.RunStart(s.dockerCli, &cmd.StartOptions{
|
||||
OpenStdin: !opts.Detach && opts.Interactive,
|
||||
Attach: !opts.Detach,
|
||||
Containers: []string{containerID},
|
||||
})
|
||||
var stErr cli.StatusError
|
||||
if errors.As(err, &stErr) {
|
||||
return stErr.StatusCode, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func createTar(env string, config types.ServiceSecretConfig) (bytes.Buffer, erro
|
||||
target := config.Target
|
||||
if config.Target == "" {
|
||||
target = "/run/secrets/" + config.Source
|
||||
} else if !isUnixAbs(config.Target) {
|
||||
} else if !isAbsTarget(config.Target) {
|
||||
target = "/run/secrets/" + config.Target
|
||||
}
|
||||
|
||||
|
||||
@@ -108,6 +108,7 @@ func (s *composeService) start(ctx context.Context, projectName string, options
|
||||
for _, s := range project.Services {
|
||||
depends[s.Name] = types.ServiceDependency{
|
||||
Condition: getDependencyCondition(s, project),
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
if options.WaitTimeout > 0 {
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/compose/v2/internal/tracing"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
@@ -31,7 +33,7 @@ import (
|
||||
)
|
||||
|
||||
func (s *composeService) Up(ctx context.Context, project *types.Project, options api.UpOptions) error {
|
||||
err := progress.Run(ctx, func(ctx context.Context) error {
|
||||
err := progress.Run(ctx, tracing.SpanWrapFunc("project/up", tracing.ProjectOptions(project), func(ctx context.Context) error {
|
||||
err := s.create(ctx, project, options.Create)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -40,7 +42,7 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
|
||||
return s.start(ctx, project.Name, options.Start, nil)
|
||||
}
|
||||
return nil
|
||||
}, s.stdinfo())
|
||||
}), s.stdinfo())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (s *composeService) Viz(_ context.Context, project *types.Project, opts api
|
||||
// dot is the perfect layout for this use case since graph is directed and hierarchical
|
||||
graphBuilder.WriteString(opts.Indentation + "layout=dot;\n")
|
||||
|
||||
addNodes(&graphBuilder, graph, &opts)
|
||||
addNodes(&graphBuilder, graph, project.Name, &opts)
|
||||
graphBuilder.WriteByte('\n')
|
||||
|
||||
addEdges(&graphBuilder, graph, &opts)
|
||||
@@ -63,7 +63,7 @@ func (s *composeService) Viz(_ context.Context, project *types.Project, opts api
|
||||
|
||||
// addNodes adds the corresponding graphviz representation of all the nodes in the given graph to the graphBuilder
|
||||
// returns the same graphBuilder
|
||||
func addNodes(graphBuilder *strings.Builder, graph vizGraph, opts *api.VizOptions) *strings.Builder {
|
||||
func addNodes(graphBuilder *strings.Builder, graph vizGraph, projectName string, opts *api.VizOptions) *strings.Builder {
|
||||
for serviceNode := range graph {
|
||||
// write:
|
||||
// "service name" [style="filled" label<<font point-size="15">service name</font>
|
||||
@@ -107,7 +107,7 @@ func addNodes(graphBuilder *strings.Builder, graph vizGraph, opts *api.VizOption
|
||||
if opts.IncludeImageName {
|
||||
graphBuilder.WriteString("<font point-size=\"10\">")
|
||||
graphBuilder.WriteString("<br/><br/><b>Image:</b><br/>")
|
||||
graphBuilder.WriteString(serviceNode.Image)
|
||||
graphBuilder.WriteString(api.GetImageNameOrDefault(*serviceNode, projectName))
|
||||
graphBuilder.WriteString("</font>")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
|
||||
Copyright 2020 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
@@ -17,13 +17,19 @@ package compose
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
moby "github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/docker/compose/v2/internal/sync"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/jonboulle/clockwork"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
@@ -32,7 +38,6 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/api"
|
||||
"github.com/docker/compose/v2/pkg/utils"
|
||||
"github.com/docker/compose/v2/pkg/watch"
|
||||
)
|
||||
|
||||
@@ -40,9 +45,11 @@ type DevelopmentConfig struct {
|
||||
Watch []Trigger `json:"watch,omitempty"`
|
||||
}
|
||||
|
||||
type WatchAction string
|
||||
|
||||
const (
|
||||
WatchActionSync = "sync"
|
||||
WatchActionRebuild = "rebuild"
|
||||
WatchActionSync WatchAction = "sync"
|
||||
WatchActionRebuild WatchAction = "rebuild"
|
||||
)
|
||||
|
||||
type Trigger struct {
|
||||
@@ -52,53 +59,44 @@ type Trigger struct {
|
||||
Ignore []string `json:"ignore,omitempty"`
|
||||
}
|
||||
|
||||
const quietPeriod = 2 * time.Second
|
||||
const quietPeriod = 500 * time.Millisecond
|
||||
|
||||
// fileMapping contains the Compose service and modified host system path.
|
||||
//
|
||||
// For file sync, the container path is also included.
|
||||
// For rebuild, there is no container path, so it is always empty.
|
||||
type fileMapping struct {
|
||||
// Service that the file event is for.
|
||||
Service string
|
||||
// HostPath that was created/modified/deleted outside the container.
|
||||
//
|
||||
// This is the path as seen from the user's perspective, e.g.
|
||||
// - C:\Users\moby\Documents\hello-world\main.go
|
||||
// - /Users/moby/Documents/hello-world/main.go
|
||||
HostPath string
|
||||
// ContainerPath for the target file inside the container (only populated
|
||||
// for sync events, not rebuild).
|
||||
//
|
||||
// This is the path as used in Docker CLI commands, e.g.
|
||||
// - /workdir/main.go
|
||||
ContainerPath string
|
||||
// fileEvent contains the Compose service and modified host system path.
|
||||
type fileEvent struct {
|
||||
sync.PathMapping
|
||||
Action WatchAction
|
||||
}
|
||||
|
||||
// getSyncImplementation returns the the tar-based syncer unless it has been explicitly
|
||||
// disabled with `COMPOSE_EXPERIMENTAL_WATCH_TAR=0`. Note that the absence of the env
|
||||
// var means enabled.
|
||||
func (s *composeService) getSyncImplementation(project *types.Project) sync.Syncer {
|
||||
var useTar bool
|
||||
if useTarEnv, ok := os.LookupEnv("COMPOSE_EXPERIMENTAL_WATCH_TAR"); ok {
|
||||
useTar, _ = strconv.ParseBool(useTarEnv)
|
||||
} else {
|
||||
useTar = true
|
||||
}
|
||||
if useTar {
|
||||
return sync.NewTar(project.Name, tarDockerClient{s: s})
|
||||
}
|
||||
|
||||
return sync.NewDockerCopy(project.Name, s, s.stdinfo())
|
||||
}
|
||||
|
||||
func (s *composeService) Watch(ctx context.Context, project *types.Project, services []string, _ api.WatchOptions) error { //nolint: gocyclo
|
||||
needRebuild := make(chan fileMapping)
|
||||
needSync := make(chan fileMapping)
|
||||
|
||||
_, err := s.prepareProjectForBuild(project, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
clock := clockwork.NewRealClock()
|
||||
debounce(ctx, clock, quietPeriod, needRebuild, s.makeRebuildFn(ctx, project))
|
||||
return nil
|
||||
})
|
||||
|
||||
eg.Go(s.makeSyncFn(ctx, project, needSync))
|
||||
|
||||
ss, err := project.GetServices(services...)
|
||||
if err != nil {
|
||||
if err := project.ForServices(services); err != nil {
|
||||
return err
|
||||
}
|
||||
syncer := s.getSyncImplementation(project)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
watching := false
|
||||
for _, service := range ss {
|
||||
for i := range project.Services {
|
||||
service := project.Services[i]
|
||||
config, err := loadDevelopmentConfig(service, project)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -122,7 +120,10 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||
continue
|
||||
}
|
||||
|
||||
name := service.Name
|
||||
// set the service to always be built - watch triggers `Up()` when it receives a rebuild event
|
||||
service.PullPolicy = types.PullPolicyBuild
|
||||
project.Services[i] = service
|
||||
|
||||
dockerIgnores, err := watch.LoadDockerIgnore(service.Build.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -164,7 +165,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||
|
||||
eg.Go(func() error {
|
||||
defer watcher.Close() //nolint:errcheck
|
||||
return s.watch(ctx, name, watcher, config.Watch, needSync, needRebuild)
|
||||
return s.watch(ctx, project, service.Name, watcher, syncer, config.Watch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -175,7 +176,17 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func (s *composeService) watch(ctx context.Context, name string, watcher watch.Notify, triggers []Trigger, needSync chan fileMapping, needRebuild chan fileMapping) error {
|
||||
func (s *composeService) watch(
|
||||
ctx context.Context,
|
||||
project *types.Project,
|
||||
name string,
|
||||
watcher watch.Notify,
|
||||
syncer sync.Syncer,
|
||||
triggers []Trigger,
|
||||
) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
ignores := make([]watch.PathMatcher, len(triggers))
|
||||
for i, trigger := range triggers {
|
||||
ignore, err := watch.NewDockerPatternMatcher(trigger.Path, trigger.Ignore)
|
||||
@@ -185,60 +196,82 @@ func (s *composeService) watch(ctx context.Context, name string, watcher watch.N
|
||||
ignores[i] = ignore
|
||||
}
|
||||
|
||||
WATCH:
|
||||
events := make(chan fileEvent)
|
||||
batchEvents := batchDebounceEvents(ctx, s.clock, quietPeriod, events)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case batch := <-batchEvents:
|
||||
start := time.Now()
|
||||
logrus.Debugf("batch start: service[%s] count[%d]", name, len(batch))
|
||||
if err := s.handleWatchBatch(ctx, project, name, batch, syncer); err != nil {
|
||||
logrus.Warnf("Error handling changed files for service %s: %v", name, err)
|
||||
}
|
||||
logrus.Debugf("batch complete: service[%s] duration[%s] count[%d]",
|
||||
name, time.Since(start), len(batch))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case event := <-watcher.Events():
|
||||
hostPath := event.Path()
|
||||
|
||||
for i, trigger := range triggers {
|
||||
logrus.Debugf("change detected on %s - comparing with %s", hostPath, trigger.Path)
|
||||
if watch.IsChild(trigger.Path, hostPath) {
|
||||
|
||||
match, err := ignores[i].Matches(hostPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if match {
|
||||
logrus.Debugf("%s is matching ignore pattern", hostPath)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(s.stdinfo(), "change detected on %s\n", hostPath)
|
||||
|
||||
f := fileMapping{
|
||||
HostPath: hostPath,
|
||||
Service: name,
|
||||
}
|
||||
|
||||
switch trigger.Action {
|
||||
case WatchActionSync:
|
||||
logrus.Debugf("modified file %s triggered sync", hostPath)
|
||||
rel, err := filepath.Rel(trigger.Path, hostPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// always use Unix-style paths for inside the container
|
||||
f.ContainerPath = path.Join(trigger.Target, rel)
|
||||
needSync <- f
|
||||
case WatchActionRebuild:
|
||||
logrus.Debugf("modified file %s requires image to be rebuilt", hostPath)
|
||||
needRebuild <- f
|
||||
default:
|
||||
return fmt.Errorf("watch action %q is not supported", trigger)
|
||||
}
|
||||
continue WATCH
|
||||
}
|
||||
}
|
||||
case err := <-watcher.Errors():
|
||||
return err
|
||||
case event := <-watcher.Events():
|
||||
hostPath := event.Path()
|
||||
for i, trigger := range triggers {
|
||||
logrus.Debugf("change for %s - comparing with %s", hostPath, trigger.Path)
|
||||
if fileEvent := maybeFileEvent(trigger, hostPath, ignores[i]); fileEvent != nil {
|
||||
events <- *fileEvent
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maybeFileEvent returns a file event object if hostPath is valid for the provided trigger and ignore
|
||||
// rules.
|
||||
//
|
||||
// Any errors are logged as warnings and nil (no file event) is returned.
|
||||
func maybeFileEvent(trigger Trigger, hostPath string, ignore watch.PathMatcher) *fileEvent {
|
||||
if !watch.IsChild(trigger.Path, hostPath) {
|
||||
return nil
|
||||
}
|
||||
isIgnored, err := ignore.Matches(hostPath)
|
||||
if err != nil {
|
||||
logrus.Warnf("error ignore matching %q: %v", hostPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if isIgnored {
|
||||
logrus.Debugf("%s is matching ignore pattern", hostPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
var containerPath string
|
||||
if trigger.Target != "" {
|
||||
rel, err := filepath.Rel(trigger.Path, hostPath)
|
||||
if err != nil {
|
||||
logrus.Warnf("error making %s relative to %s: %v", hostPath, trigger.Path, err)
|
||||
return nil
|
||||
}
|
||||
// always use Unix-style paths for inside the container
|
||||
containerPath = path.Join(trigger.Target, rel)
|
||||
}
|
||||
|
||||
return &fileEvent{
|
||||
Action: WatchAction(trigger.Action),
|
||||
PathMapping: sync.PathMapping{
|
||||
HostPath: hostPath,
|
||||
ContainerPath: containerPath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func loadDevelopmentConfig(service types.ServiceConfig, project *types.Project) (*DevelopmentConfig, error) {
|
||||
var config DevelopmentConfig
|
||||
y, ok := service.Extensions["x-develop"]
|
||||
@@ -249,16 +282,25 @@ func loadDevelopmentConfig(service types.ServiceConfig, project *types.Project)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseDir, err := filepath.EvalSymlinks(project.WorkingDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolving symlink for %q: %w", project.WorkingDir, err)
|
||||
}
|
||||
|
||||
for i, trigger := range config.Watch {
|
||||
if !filepath.IsAbs(trigger.Path) {
|
||||
trigger.Path = filepath.Join(project.WorkingDir, trigger.Path)
|
||||
trigger.Path = filepath.Join(baseDir, trigger.Path)
|
||||
}
|
||||
if p, err := filepath.EvalSymlinks(trigger.Path); err == nil {
|
||||
// this might fail because the path doesn't exist, etc.
|
||||
trigger.Path = p
|
||||
}
|
||||
trigger.Path = filepath.Clean(trigger.Path)
|
||||
if trigger.Path == "" {
|
||||
return nil, errors.New("watch rules MUST define a path")
|
||||
}
|
||||
|
||||
if trigger.Action == WatchActionRebuild && service.Build == nil {
|
||||
if trigger.Action == string(WatchActionRebuild) && service.Build == nil {
|
||||
return nil, fmt.Errorf("service %s doesn't have a build section, can't apply 'rebuild' on watch", service.Name)
|
||||
}
|
||||
|
||||
@@ -267,125 +309,54 @@ func loadDevelopmentConfig(service types.ServiceConfig, project *types.Project)
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func (s *composeService) makeRebuildFn(ctx context.Context, project *types.Project) func(services rebuildServices) {
|
||||
for i, service := range project.Services {
|
||||
service.PullPolicy = types.PullPolicyBuild
|
||||
project.Services[i] = service
|
||||
}
|
||||
return func(services rebuildServices) {
|
||||
serviceNames := make([]string, 0, len(services))
|
||||
allPaths := make(utils.Set[string])
|
||||
for serviceName, paths := range services {
|
||||
serviceNames = append(serviceNames, serviceName)
|
||||
for p := range paths {
|
||||
allPaths.Add(p)
|
||||
// batchDebounceEvents groups identical file events within a sliding time window and writes the results to the returned
|
||||
// channel.
|
||||
//
|
||||
// The returned channel is closed when the debouncer is stopped via context cancellation or by closing the input channel.
|
||||
func batchDebounceEvents(ctx context.Context, clock clockwork.Clock, delay time.Duration, input <-chan fileEvent) <-chan []fileEvent {
|
||||
out := make(chan []fileEvent)
|
||||
go func() {
|
||||
defer close(out)
|
||||
seen := make(map[fileEvent]time.Time)
|
||||
flushEvents := func() {
|
||||
if len(seen) == 0 {
|
||||
return
|
||||
}
|
||||
events := make([]fileEvent, 0, len(seen))
|
||||
for e := range seen {
|
||||
events = append(events, e)
|
||||
}
|
||||
// sort batch by oldest -> newest
|
||||
// (if an event is seen > 1 per batch, it gets the latest timestamp)
|
||||
sort.SliceStable(events, func(i, j int) bool {
|
||||
x := events[i]
|
||||
y := events[j]
|
||||
return seen[x].Before(seen[y])
|
||||
})
|
||||
out <- events
|
||||
seen = make(map[fileEvent]time.Time)
|
||||
}
|
||||
|
||||
fmt.Fprintf(
|
||||
s.stdinfo(),
|
||||
"Rebuilding %s after changes were detected:%s\n",
|
||||
strings.Join(serviceNames, ", "),
|
||||
strings.Join(append([]string{""}, allPaths.Elements()...), "\n - "),
|
||||
)
|
||||
err := s.Up(ctx, project, api.UpOptions{
|
||||
Create: api.CreateOptions{
|
||||
Services: serviceNames,
|
||||
Inherit: true,
|
||||
},
|
||||
Start: api.StartOptions{
|
||||
Services: serviceNames,
|
||||
Project: project,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(s.stderr(), "Application failed to start after update\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *composeService) makeSyncFn(ctx context.Context, project *types.Project, needSync <-chan fileMapping) func() error {
|
||||
return func() error {
|
||||
t := clock.NewTicker(delay)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case opt := <-needSync:
|
||||
service, err := project.GetService(opt.Service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scale := 1
|
||||
if service.Deploy != nil && service.Deploy.Replicas != nil {
|
||||
scale = int(*service.Deploy.Replicas)
|
||||
}
|
||||
|
||||
if fi, statErr := os.Stat(opt.HostPath); statErr == nil {
|
||||
if fi.IsDir() {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := s.Exec(ctx, project.Name, api.RunOptions{
|
||||
Service: opt.Service,
|
||||
Command: []string{"mkdir", "-p", opt.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to create %q from %s: %v", opt.ContainerPath, opt.Service, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(s.stdinfo(), "%s created\n", opt.ContainerPath)
|
||||
} else {
|
||||
err := s.Copy(ctx, project.Name, api.CopyOptions{
|
||||
Source: opt.HostPath,
|
||||
Destination: fmt.Sprintf("%s:%s", opt.Service, opt.ContainerPath),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(s.stdinfo(), "%s updated\n", opt.ContainerPath)
|
||||
}
|
||||
} else if errors.Is(statErr, fs.ErrNotExist) {
|
||||
for i := 1; i <= scale; i++ {
|
||||
_, err := s.Exec(ctx, project.Name, api.RunOptions{
|
||||
Service: opt.Service,
|
||||
Command: []string{"rm", "-rf", opt.ContainerPath},
|
||||
Index: i,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to delete %q from %s: %v", opt.ContainerPath, opt.Service, err)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(s.stdinfo(), "%s deleted from service\n", opt.ContainerPath)
|
||||
return
|
||||
case <-t.Chan():
|
||||
flushEvents()
|
||||
case e, ok := <-input:
|
||||
if !ok {
|
||||
// input channel was closed
|
||||
flushEvents()
|
||||
return
|
||||
}
|
||||
seen[e] = time.Now()
|
||||
t.Reset(delay)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type rebuildServices map[string]utils.Set[string]
|
||||
|
||||
func debounce(ctx context.Context, clock clockwork.Clock, delay time.Duration, input <-chan fileMapping, fn func(services rebuildServices)) {
|
||||
services := make(rebuildServices)
|
||||
t := clock.NewTimer(delay)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.Chan():
|
||||
if len(services) > 0 {
|
||||
go fn(services)
|
||||
services = make(rebuildServices)
|
||||
}
|
||||
case e := <-input:
|
||||
t.Reset(delay)
|
||||
svc, ok := services[e.Service]
|
||||
if !ok {
|
||||
svc = make(utils.Set[string])
|
||||
services[e.Service] = svc
|
||||
}
|
||||
svc.Add(e.HostPath)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
func checkIfPathAlreadyBindMounted(watchPath string, volumes []types.ServiceVolumeConfig) bool {
|
||||
@@ -396,3 +367,149 @@ func checkIfPathAlreadyBindMounted(watchPath string, volumes []types.ServiceVolu
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type tarDockerClient struct {
|
||||
s *composeService
|
||||
}
|
||||
|
||||
func (t tarDockerClient) ContainersForService(ctx context.Context, projectName string, serviceName string) ([]moby.Container, error) {
|
||||
containers, err := t.s.getContainers(ctx, projectName, oneOffExclude, true, serviceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (t tarDockerClient) Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error {
|
||||
execCfg := moby.ExecConfig{
|
||||
Cmd: cmd,
|
||||
AttachStdout: false,
|
||||
AttachStderr: true,
|
||||
AttachStdin: in != nil,
|
||||
Tty: false,
|
||||
}
|
||||
execCreateResp, err := t.s.apiClient().ContainerExecCreate(ctx, containerID, execCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startCheck := moby.ExecStartCheck{Tty: false, Detach: false}
|
||||
conn, err := t.s.apiClient().ContainerExecAttach(ctx, execCreateResp.ID, startCheck)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
var eg errgroup.Group
|
||||
if in != nil {
|
||||
eg.Go(func() error {
|
||||
defer func() {
|
||||
_ = conn.CloseWrite()
|
||||
}()
|
||||
_, err := io.Copy(conn.Conn, in)
|
||||
return err
|
||||
})
|
||||
}
|
||||
eg.Go(func() error {
|
||||
_, err := io.Copy(t.s.stdinfo(), conn.Reader)
|
||||
return err
|
||||
})
|
||||
|
||||
err = t.s.apiClient().ContainerExecStart(ctx, execCreateResp.ID, startCheck)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// although the errgroup is not tied directly to the context, the operations
|
||||
// in it are reading/writing to the connection, which is tied to the context,
|
||||
// so they won't block indefinitely
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execResult, err := t.s.apiClient().ContainerExecInspect(ctx, execCreateResp.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if execResult.Running {
|
||||
return errors.New("process still running")
|
||||
}
|
||||
if execResult.ExitCode != 0 {
|
||||
return fmt.Errorf("exit code %d", execResult.ExitCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *composeService) handleWatchBatch(
|
||||
ctx context.Context,
|
||||
project *types.Project,
|
||||
serviceName string,
|
||||
batch []fileEvent,
|
||||
syncer sync.Syncer,
|
||||
) error {
|
||||
pathMappings := make([]sync.PathMapping, len(batch))
|
||||
for i := range batch {
|
||||
if batch[i].Action == WatchActionRebuild {
|
||||
fmt.Fprintf(
|
||||
s.stdinfo(),
|
||||
"Rebuilding %s after changes were detected:%s\n",
|
||||
serviceName,
|
||||
strings.Join(append([]string{""}, batch[i].HostPath), "\n - "),
|
||||
)
|
||||
err := s.Up(ctx, project, api.UpOptions{
|
||||
Create: api.CreateOptions{
|
||||
Services: []string{serviceName},
|
||||
Inherit: true,
|
||||
},
|
||||
Start: api.StartOptions{
|
||||
Services: []string{serviceName},
|
||||
Project: project,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(s.stderr(), "Application failed to start after update\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
pathMappings[i] = batch[i].PathMapping
|
||||
}
|
||||
|
||||
writeWatchSyncMessage(s.stdinfo(), serviceName, pathMappings)
|
||||
|
||||
service, err := project.GetService(serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := syncer.Sync(ctx, service, pathMappings); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeWatchSyncMessage prints out a message about the sync for the changed paths.
|
||||
func writeWatchSyncMessage(w io.Writer, serviceName string, pathMappings []sync.PathMapping) {
|
||||
const maxPathsToShow = 10
|
||||
if len(pathMappings) <= maxPathsToShow || logrus.IsLevelEnabled(logrus.DebugLevel) {
|
||||
hostPathsToSync := make([]string, len(pathMappings))
|
||||
for i := range pathMappings {
|
||||
hostPathsToSync[i] = pathMappings[i].HostPath
|
||||
}
|
||||
fmt.Fprintf(
|
||||
w,
|
||||
"Syncing %s after changes were detected:%s\n",
|
||||
serviceName,
|
||||
strings.Join(append([]string{""}, hostPathsToSync...), "\n - "),
|
||||
)
|
||||
} else {
|
||||
hostPathsToSync := make([]string, len(pathMappings))
|
||||
for i := range pathMappings {
|
||||
hostPathsToSync[i] = pathMappings[i].HostPath
|
||||
}
|
||||
fmt.Fprintf(
|
||||
w,
|
||||
"Syncing %s after %d changes were detected\n",
|
||||
serviceName,
|
||||
len(pathMappings),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,45 +16,60 @@ package compose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/compose/v2/pkg/watch"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/docker/compose/v2/pkg/mocks"
|
||||
moby "github.com/docker/docker/api/types"
|
||||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/docker/compose/v2/internal/sync"
|
||||
|
||||
"github.com/docker/compose/v2/pkg/watch"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func Test_debounce(t *testing.T) {
|
||||
ch := make(chan fileMapping)
|
||||
var (
|
||||
ran int
|
||||
got []string
|
||||
)
|
||||
func TestDebounceBatching(t *testing.T) {
|
||||
ch := make(chan fileEvent)
|
||||
clock := clockwork.NewFakeClock()
|
||||
ctx, stop := context.WithCancel(context.Background())
|
||||
t.Cleanup(stop)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
debounce(ctx, clock, quietPeriod, ch, func(services rebuildServices) {
|
||||
for svc := range services {
|
||||
got = append(got, svc)
|
||||
}
|
||||
ran++
|
||||
stop()
|
||||
})
|
||||
return nil
|
||||
})
|
||||
|
||||
eventBatchCh := batchDebounceEvents(ctx, clock, quietPeriod, ch)
|
||||
for i := 0; i < 100; i++ {
|
||||
ch <- fileMapping{Service: "test"}
|
||||
var action WatchAction = "a"
|
||||
if i%2 == 0 {
|
||||
action = "b"
|
||||
}
|
||||
ch <- fileEvent{Action: action}
|
||||
}
|
||||
assert.Equal(t, ran, 0)
|
||||
// we sent 100 events + the debouncer
|
||||
clock.BlockUntil(101)
|
||||
clock.Advance(quietPeriod)
|
||||
err := eg.Wait()
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, ran, 1)
|
||||
assert.DeepEqual(t, got, []string{"test"})
|
||||
select {
|
||||
case batch := <-eventBatchCh:
|
||||
require.ElementsMatch(t, batch, []fileEvent{
|
||||
{Action: "a"},
|
||||
{Action: "b"},
|
||||
})
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatal("timed out waiting for events")
|
||||
}
|
||||
clock.BlockUntil(1)
|
||||
clock.Advance(quietPeriod)
|
||||
|
||||
// there should only be a single batch
|
||||
select {
|
||||
case batch := <-eventBatchCh:
|
||||
t.Fatalf("unexpected events: %v", batch)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
// channel is empty
|
||||
}
|
||||
}
|
||||
|
||||
type testWatcher struct {
|
||||
@@ -78,73 +93,106 @@ func (t testWatcher) Errors() chan error {
|
||||
return t.errors
|
||||
}
|
||||
|
||||
func Test_sync(t *testing.T) {
|
||||
needSync := make(chan fileMapping)
|
||||
needRebuild := make(chan fileMapping)
|
||||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
||||
defer cancelFunc()
|
||||
func TestWatch_Sync(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
cli := mocks.NewMockCli(mockCtrl)
|
||||
cli.EXPECT().Err().Return(os.Stderr).AnyTimes()
|
||||
apiClient := mocks.NewMockAPIClient(mockCtrl)
|
||||
apiClient.EXPECT().ContainerList(gomock.Any(), gomock.Any()).Return([]moby.Container{
|
||||
testContainer("test", "123", false),
|
||||
}, nil).AnyTimes()
|
||||
cli.EXPECT().Client().Return(apiClient).AnyTimes()
|
||||
|
||||
run := func() watch.Notify {
|
||||
watcher := testWatcher{
|
||||
events: make(chan watch.FileEvent, 1),
|
||||
errors: make(chan error),
|
||||
}
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancelFunc)
|
||||
|
||||
go func() {
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
|
||||
service := composeService{
|
||||
dockerCli: cli,
|
||||
}
|
||||
err = service.watch(ctx, "test", watcher, []Trigger{
|
||||
{
|
||||
Path: "/src",
|
||||
Action: "sync",
|
||||
Target: "/work",
|
||||
Ignore: []string{"ignore"},
|
||||
},
|
||||
{
|
||||
Path: "/",
|
||||
Action: "rebuild",
|
||||
},
|
||||
}, needSync, needRebuild)
|
||||
assert.NilError(t, err)
|
||||
}()
|
||||
return watcher
|
||||
proj := types.Project{
|
||||
Services: []types.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("synchronize file", func(t *testing.T) {
|
||||
watcher := run()
|
||||
watcher.Events() <- watch.NewFileEvent("/src/changed")
|
||||
select {
|
||||
case actual := <-needSync:
|
||||
assert.DeepEqual(t, fileMapping{Service: "test", HostPath: "/src/changed", ContainerPath: "/work/changed"}, actual)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("timeout")
|
||||
}
|
||||
})
|
||||
watcher := testWatcher{
|
||||
events: make(chan watch.FileEvent),
|
||||
errors: make(chan error),
|
||||
}
|
||||
|
||||
t.Run("ignore", func(t *testing.T) {
|
||||
watcher := run()
|
||||
watcher.Events() <- watch.NewFileEvent("/src/ignore")
|
||||
select {
|
||||
case <-needSync:
|
||||
t.Error("file event should have been ignored")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// expected
|
||||
syncer := newFakeSyncer()
|
||||
clock := clockwork.NewFakeClock()
|
||||
go func() {
|
||||
service := composeService{
|
||||
dockerCli: cli,
|
||||
clock: clock,
|
||||
}
|
||||
})
|
||||
err := service.watch(ctx, &proj, "test", watcher, syncer, []Trigger{
|
||||
{
|
||||
Path: "/sync",
|
||||
Action: "sync",
|
||||
Target: "/work",
|
||||
Ignore: []string{"ignore"},
|
||||
},
|
||||
{
|
||||
Path: "/rebuild",
|
||||
Action: "rebuild",
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
}()
|
||||
|
||||
t.Run("rebuild", func(t *testing.T) {
|
||||
watcher := run()
|
||||
watcher.Events() <- watch.NewFileEvent("/dependencies.yaml")
|
||||
select {
|
||||
case event := <-needRebuild:
|
||||
assert.Equal(t, "test", event.Service)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("timeout")
|
||||
}
|
||||
})
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/changed")
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/changed/sub")
|
||||
clock.BlockUntil(3)
|
||||
clock.Advance(quietPeriod)
|
||||
select {
|
||||
case actual := <-syncer.synced:
|
||||
require.ElementsMatch(t, []sync.PathMapping{
|
||||
{HostPath: "/sync/changed", ContainerPath: "/work/changed"},
|
||||
{HostPath: "/sync/changed/sub", ContainerPath: "/work/changed/sub"},
|
||||
}, actual)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("timeout")
|
||||
}
|
||||
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/ignore")
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/ignore/sub")
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/changed")
|
||||
clock.BlockUntil(4)
|
||||
clock.Advance(quietPeriod)
|
||||
select {
|
||||
case actual := <-syncer.synced:
|
||||
require.ElementsMatch(t, []sync.PathMapping{
|
||||
{HostPath: "/sync/changed", ContainerPath: "/work/changed"},
|
||||
}, actual)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("timed out waiting for events")
|
||||
}
|
||||
|
||||
watcher.Events() <- watch.NewFileEvent("/rebuild")
|
||||
watcher.Events() <- watch.NewFileEvent("/sync/changed")
|
||||
clock.BlockUntil(4)
|
||||
clock.Advance(quietPeriod)
|
||||
select {
|
||||
case batch := <-syncer.synced:
|
||||
t.Fatalf("received unexpected events: %v", batch)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// expected
|
||||
}
|
||||
// TODO: there's not a great way to assert that the rebuild attempt happened
|
||||
}
|
||||
|
||||
type fakeSyncer struct {
|
||||
synced chan []sync.PathMapping
|
||||
}
|
||||
|
||||
func newFakeSyncer() *fakeSyncer {
|
||||
return &fakeSyncer{
|
||||
synced: make(chan []sync.PathMapping),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeSyncer) Sync(_ context.Context, _ types.ServiceConfig, paths []sync.PathMapping) error {
|
||||
f.synced <- paths
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,9 @@ package e2e
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -36,8 +38,8 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
|
||||
t.Run(env+" build named and unnamed images", func(t *testing.T) {
|
||||
// ensure local test run does not reuse previously build image
|
||||
c.RunDockerOrExitError(t, "rmi", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "custom-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
|
||||
|
||||
res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build")
|
||||
|
||||
@@ -48,8 +50,8 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
|
||||
t.Run(env+" build with build-arg", func(t *testing.T) {
|
||||
// ensure local test run does not reuse previously build image
|
||||
c.RunDockerOrExitError(t, "rmi", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "custom-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
|
||||
|
||||
c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "build", "--build-arg", "FOO=BAR")
|
||||
|
||||
@@ -59,8 +61,8 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
|
||||
t.Run(env+" build with build-arg set by env", func(t *testing.T) {
|
||||
// ensure local test run does not reuse previously build image
|
||||
c.RunDockerOrExitError(t, "rmi", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "custom-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
|
||||
|
||||
icmd.RunCmd(c.NewDockerComposeCmd(t,
|
||||
"--project-directory",
|
||||
@@ -70,7 +72,7 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
"FOO"),
|
||||
func(cmd *icmd.Cmd) {
|
||||
cmd.Env = append(cmd.Env, "FOO=BAR")
|
||||
})
|
||||
}).Assert(t, icmd.Success)
|
||||
|
||||
res := c.RunDockerCmd(t, "image", "inspect", "build-test-nginx")
|
||||
res.Assert(t, icmd.Expected{Out: `"FOO": "BAR"`})
|
||||
@@ -90,8 +92,9 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run(env+" build as part of up", func(t *testing.T) {
|
||||
c.RunDockerOrExitError(t, "rmi", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "custom-nginx")
|
||||
// ensure local test run does not reuse previously build image
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
|
||||
|
||||
res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "up", "-d")
|
||||
t.Cleanup(func() {
|
||||
@@ -111,7 +114,7 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
t.Run(env+" no rebuild when up again", func(t *testing.T) {
|
||||
res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "up", "-d")
|
||||
|
||||
assert.Assert(t, !strings.Contains(res.Stdout(), "COPY static"), res.Stdout())
|
||||
assert.Assert(t, !strings.Contains(res.Stdout(), "COPY static"))
|
||||
})
|
||||
|
||||
t.Run(env+" rebuild when up --build", func(t *testing.T) {
|
||||
@@ -121,10 +124,15 @@ func TestLocalComposeBuild(t *testing.T) {
|
||||
res.Assert(t, icmd.Expected{Out: "COPY static2 /usr/share/nginx/html"})
|
||||
})
|
||||
|
||||
t.Run(env+" build --push ignored for unnamed images", func(t *testing.T) {
|
||||
res := c.RunDockerComposeCmd(t, "--workdir", "fixtures/build-test", "build", "--push", "nginx")
|
||||
assert.Assert(t, !strings.Contains(res.Stdout(), "failed to push"), res.Stdout())
|
||||
})
|
||||
|
||||
t.Run(env+" cleanup build project", func(t *testing.T) {
|
||||
c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test", "down")
|
||||
c.RunDockerCmd(t, "rmi", "build-test-nginx")
|
||||
c.RunDockerCmd(t, "rmi", "custom-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "build-test-nginx")
|
||||
c.RunDockerOrExitError(t, "rmi", "-f", "custom-nginx")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -361,10 +369,21 @@ func TestBuildPrivileged(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("use build privileged mode to run insecure build command", func(t *testing.T) {
|
||||
res := c.RunDockerComposeCmdNoCheck(t, "--project-directory", "fixtures/build-test/privileged", "build")
|
||||
assert.NilError(t, res.Error, res.Stderr())
|
||||
res.Assert(t, icmd.Expected{Out: "CapEff:\t0000003fffffffff"})
|
||||
res := c.RunDockerComposeCmd(t, "--project-directory", "fixtures/build-test/privileged", "build")
|
||||
capEffRe := regexp.MustCompile("CapEff:\t([0-9a-f]+)")
|
||||
matches := capEffRe.FindStringSubmatch(res.Stdout())
|
||||
assert.Equal(t, 2, len(matches), "Did not match CapEff in output, matches: %v", matches)
|
||||
|
||||
capEff, err := strconv.ParseUint(matches[1], 16, 64)
|
||||
assert.NilError(t, err, "Parsing CapEff: %s", matches[1])
|
||||
|
||||
// NOTE: can't use constant from x/sys/unix or tests won't compile on macOS/Windows
|
||||
// #define CAP_SYS_ADMIN 21
|
||||
// https://github.com/torvalds/linux/blob/v6.1/include/uapi/linux/capability.h#L278
|
||||
const capSysAdmin = 0x15
|
||||
if capEff&capSysAdmin != capSysAdmin {
|
||||
t.Fatalf("CapEff %s is missing CAP_SYS_ADMIN", matches[1])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,12 @@ func TestLocalComposeExec(t *testing.T) {
|
||||
return ret
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
c.RunDockerComposeCmd(t, cmdArgs("down", "--timeout=0")...)
|
||||
}
|
||||
cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
c.RunDockerComposeCmd(t, cmdArgs("up", "-d")...)
|
||||
|
||||
t.Run("exec true", func(t *testing.T) {
|
||||
|
||||
@@ -144,4 +144,12 @@ func TestLocalComposeRun(t *testing.T) {
|
||||
|
||||
c.RunDockerComposeCmd(t, "-f", "./fixtures/run-test/deps.yaml", "down", "--remove-orphans")
|
||||
})
|
||||
|
||||
t.Run("run with not required dependency", func(t *testing.T) {
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "run", "foo")
|
||||
assert.Assert(t, strings.Contains(res.Combined(), "foo"), res.Combined())
|
||||
assert.Assert(t, !strings.Contains(res.Combined(), "bar"), res.Combined())
|
||||
|
||||
c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "down", "--remove-orphans")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -281,8 +281,12 @@ func TestStopWithDependenciesAttached(t *testing.T) {
|
||||
const projectName = "compose-e2e-stop-with-deps"
|
||||
c := NewParallelCLI(t, WithEnv("COMMAND=echo hello"))
|
||||
|
||||
t.Run("up", func(t *testing.T) {
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "-p", projectName, "up", "--attach-dependencies", "foo")
|
||||
res.Assert(t, icmd.Expected{Out: "exited with code 0"})
|
||||
})
|
||||
cleanup := func() {
|
||||
c.RunDockerComposeCmd(t, "-p", projectName, "down", "--remove-orphans", "--timeout=0")
|
||||
}
|
||||
cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/compose.yaml", "-p", projectName, "up", "--attach-dependencies", "foo")
|
||||
res.Assert(t, icmd.Expected{Out: "exited with code 0"})
|
||||
}
|
||||
|
||||
@@ -51,8 +51,18 @@ func TestUpExitCodeFrom(t *testing.T) {
|
||||
c := NewParallelCLI(t)
|
||||
const projectName = "e2e-exit-code-from"
|
||||
|
||||
res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start-fail/start-depends_on-long-lived.yaml", "--project-name", projectName, "up", "--exit-code-from=failure", "failure")
|
||||
res.Assert(t, icmd.Expected{ExitCode: 42})
|
||||
|
||||
c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
|
||||
}
|
||||
|
||||
func TestUpExitCodeFromContainerKilled(t *testing.T) {
|
||||
c := NewParallelCLI(t)
|
||||
const projectName = "e2e-exit-code-from-kill"
|
||||
|
||||
res := c.RunDockerComposeCmdNoCheck(t, "-f", "fixtures/start-fail/start-depends_on-long-lived.yaml", "--project-name", projectName, "up", "--exit-code-from=test")
|
||||
res.Assert(t, icmd.Expected{ExitCode: 137})
|
||||
res.Assert(t, icmd.Expected{ExitCode: 143})
|
||||
|
||||
c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
services:
|
||||
base:
|
||||
image: base
|
||||
init: true
|
||||
build:
|
||||
context: .
|
||||
dockerfile: base.dockerfile
|
||||
service:
|
||||
init: true
|
||||
depends_on:
|
||||
- base
|
||||
build:
|
||||
|
||||
@@ -4,6 +4,7 @@ services:
|
||||
command: echo 'hello world'
|
||||
longrunning:
|
||||
image: alpine
|
||||
init: true
|
||||
depends_on:
|
||||
oneshot:
|
||||
condition: service_completed_successfully
|
||||
|
||||
11
pkg/e2e/fixtures/dependencies/deps-not-required.yaml
Normal file
11
pkg/e2e/fixtures/dependencies/deps-not-required.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
services:
|
||||
foo:
|
||||
image: bash
|
||||
command: echo "foo"
|
||||
depends_on:
|
||||
bar:
|
||||
required: false
|
||||
condition: service_healthy
|
||||
bar:
|
||||
image: nginx:alpine
|
||||
profiles: [not-required]
|
||||
@@ -3,6 +3,7 @@ services:
|
||||
my-service:
|
||||
image: alpine
|
||||
command: tail -f /dev/null
|
||||
init: true
|
||||
depends_on:
|
||||
nginx: {condition: service_healthy}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
services:
|
||||
foo:
|
||||
image: alpine
|
||||
init: true
|
||||
entrypoint: ["sleep", "600"]
|
||||
networks:
|
||||
default:
|
||||
@@ -9,4 +10,4 @@ networks:
|
||||
default:
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 10.1.0.0/16
|
||||
- subnet: 10.1.0.0/16
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
services:
|
||||
ping:
|
||||
image: alpine
|
||||
init: true
|
||||
command: ping localhost -c ${REPEAT:-1}
|
||||
hello:
|
||||
image: alpine
|
||||
|
||||
@@ -6,12 +6,14 @@ services:
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
|
||||
db:
|
||||
image: gtardif/sentences-db
|
||||
init: true
|
||||
networks:
|
||||
- dbnet
|
||||
- closesnetworkname1
|
||||
- closesnetworkname2
|
||||
words:
|
||||
image: gtardif/sentences-api
|
||||
init: true
|
||||
ports:
|
||||
- "8080:8080"
|
||||
networks:
|
||||
@@ -19,6 +21,7 @@ services:
|
||||
- servicenet
|
||||
web:
|
||||
image: gtardif/sentences-web
|
||||
init: true
|
||||
ports:
|
||||
- "80:80"
|
||||
labels:
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
services:
|
||||
with-restart:
|
||||
image: alpine
|
||||
init: true
|
||||
command: tail -f /dev/null
|
||||
depends_on:
|
||||
nginx: {condition: service_healthy, restart: true}
|
||||
|
||||
no-restart:
|
||||
image: alpine
|
||||
init: true
|
||||
command: tail -f /dev/null
|
||||
depends_on:
|
||||
nginx: { condition: service_healthy }
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
services:
|
||||
restart:
|
||||
image: alpine
|
||||
init: true
|
||||
command: ash -c "if [[ -f /tmp/restart.lock ]] ; then sleep infinity; else touch /tmp/restart.lock; fi"
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
services:
|
||||
db:
|
||||
image: gtardif/sentences-db
|
||||
init: true
|
||||
words:
|
||||
image: gtardif/sentences-api
|
||||
init: true
|
||||
ports:
|
||||
- "95:8080"
|
||||
web:
|
||||
image: gtardif/sentences-web
|
||||
init: true
|
||||
ports:
|
||||
- "90:80"
|
||||
labels:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
services:
|
||||
fail:
|
||||
image: alpine
|
||||
init: true
|
||||
command: sleep infinity
|
||||
healthcheck:
|
||||
test: "false"
|
||||
@@ -8,6 +9,7 @@ services:
|
||||
retries: 3
|
||||
depends:
|
||||
image: alpine
|
||||
init: true
|
||||
command: sleep infinity
|
||||
depends_on:
|
||||
fail:
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
services:
|
||||
safe:
|
||||
image: 'alpine'
|
||||
init: true
|
||||
command: ['/bin/sh', '-c', 'sleep infinity'] # never exiting
|
||||
failure:
|
||||
image: 'alpine'
|
||||
command: ['/bin/sh', '-c', 'sleep 2 ; echo "exiting" ; exit 42']
|
||||
init: true
|
||||
command: ['/bin/sh', '-c', 'sleep 1 ; echo "exiting with error" ; exit 42']
|
||||
test:
|
||||
image: 'alpine'
|
||||
init: true
|
||||
command: ['/bin/sh', '-c', 'sleep 99999 ; echo "tests are OK"'] # very long job
|
||||
depends_on: [safe]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
services:
|
||||
stderr:
|
||||
image: alpine
|
||||
init: true
|
||||
command: /bin/ash /log_to_stderr.sh
|
||||
volumes:
|
||||
- ./log_to_stderr.sh:/log_to_stderr.sh
|
||||
|
||||
34
pkg/e2e/fixtures/watch/compose.yaml
Normal file
34
pkg/e2e/fixtures/watch/compose.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
x-dev: &x-dev
|
||||
watch:
|
||||
- action: sync
|
||||
path: ./data
|
||||
target: /app/data
|
||||
ignore:
|
||||
- '*.foo'
|
||||
- ./ignored
|
||||
|
||||
services:
|
||||
alpine:
|
||||
build:
|
||||
dockerfile_inline: |-
|
||||
FROM alpine
|
||||
RUN mkdir -p /app/data
|
||||
init: true
|
||||
command: sleep infinity
|
||||
x-develop: *x-dev
|
||||
busybox:
|
||||
build:
|
||||
dockerfile_inline: |-
|
||||
FROM busybox
|
||||
RUN mkdir -p /app/data
|
||||
init: true
|
||||
command: sleep infinity
|
||||
x-develop: *x-dev
|
||||
debian:
|
||||
build:
|
||||
dockerfile_inline: |-
|
||||
FROM debian
|
||||
RUN mkdir -p /app/data
|
||||
init: true
|
||||
command: sleep infinity
|
||||
x-develop: *x-dev
|
||||
1
pkg/e2e/fixtures/watch/data/hello.txt
Normal file
1
pkg/e2e/fixtures/watch/data/hello.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello world
|
||||
@@ -153,3 +153,16 @@ func TestScaleDoesntRecreate(t *testing.T) {
|
||||
assert.Check(t, !strings.Contains(res.Combined(), "Recreated"))
|
||||
|
||||
}
|
||||
|
||||
func TestUpWithDependencyNotRequired(t *testing.T) {
|
||||
c := NewCLI(t)
|
||||
const projectName = "compose-e2e-dependency-not-required"
|
||||
t.Cleanup(func() {
|
||||
c.RunDockerComposeCmd(t, "--project-name", projectName, "down")
|
||||
})
|
||||
|
||||
res := c.RunDockerComposeCmd(t, "-f", "./fixtures/dependencies/deps-not-required.yaml", "--project-name", projectName,
|
||||
"--profile", "not-required", "up", "-d")
|
||||
assert.Assert(t, strings.Contains(res.Combined(), "foo"), res.Combined())
|
||||
assert.Assert(t, strings.Contains(res.Combined(), " optional dependency \"bar\" failed to start"), res.Combined())
|
||||
}
|
||||
|
||||
197
pkg/e2e/watch_test.go
Normal file
197
pkg/e2e/watch_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
/*
|
||||
Copyright 2023 Docker Compose CLI authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/distribution/v3/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
services := []string{"alpine", "busybox", "debian"}
|
||||
t.Run("docker cp", func(t *testing.T) {
|
||||
for _, svcName := range services {
|
||||
t.Run(svcName, func(t *testing.T) {
|
||||
t.Helper()
|
||||
doTest(t, svcName, false)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("tar", func(t *testing.T) {
|
||||
for _, svcName := range services {
|
||||
t.Run(svcName, func(t *testing.T) {
|
||||
t.Helper()
|
||||
doTest(t, svcName, true)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: these tests all share a single Compose file but are safe to run concurrently
|
||||
func doTest(t *testing.T, svcName string, tarSync bool) {
|
||||
tmpdir := t.TempDir()
|
||||
dataDir := filepath.Join(tmpdir, "data")
|
||||
writeDataFile := func(name string, contents string) {
|
||||
t.Helper()
|
||||
dest := filepath.Join(dataDir, name)
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(dest), 0o700))
|
||||
t.Logf("writing %q to %q", contents, dest)
|
||||
require.NoError(t, os.WriteFile(dest, []byte(contents+"\n"), 0o600))
|
||||
}
|
||||
|
||||
composeFilePath := filepath.Join(tmpdir, "compose.yaml")
|
||||
CopyFile(t, filepath.Join("fixtures", "watch", "compose.yaml"), composeFilePath)
|
||||
|
||||
projName := "e2e-watch-" + svcName
|
||||
env := []string{
|
||||
"COMPOSE_FILE=" + composeFilePath,
|
||||
"COMPOSE_PROJECT_NAME=" + projName,
|
||||
"COMPOSE_EXPERIMENTAL_WATCH_TAR=" + strconv.FormatBool(tarSync),
|
||||
}
|
||||
|
||||
cli := NewCLI(t, WithEnv(env...))
|
||||
|
||||
cleanup := func() {
|
||||
cli.RunDockerComposeCmd(t, "down", svcName, "--timeout=0", "--remove-orphans", "--volumes")
|
||||
}
|
||||
cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cli.RunDockerComposeCmd(t, "up", svcName, "--wait", "--build")
|
||||
|
||||
cmd := cli.NewDockerComposeCmd(t, "--verbose", "alpha", "watch", svcName)
|
||||
// stream output since watch runs in the background
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
r := icmd.StartCmd(cmd)
|
||||
require.NoError(t, r.Error)
|
||||
t.Cleanup(func() {
|
||||
// IMPORTANT: watch doesn't exit on its own, don't leak processes!
|
||||
if r.Cmd.Process != nil {
|
||||
_ = r.Cmd.Process.Kill()
|
||||
}
|
||||
})
|
||||
var testComplete atomic.Bool
|
||||
go func() {
|
||||
// if the process exits abnormally before the test is done, fail the test
|
||||
if err := r.Cmd.Wait(); err != nil && !t.Failed() && !testComplete.Load() {
|
||||
assert.Check(t, cmp.Nil(err))
|
||||
}
|
||||
}()
|
||||
|
||||
require.NoError(t, os.Mkdir(dataDir, 0o700))
|
||||
|
||||
checkFileContents := func(path string, contents string) poll.Check {
|
||||
return func(pollLog poll.LogT) poll.Result {
|
||||
if r.Cmd.ProcessState != nil {
|
||||
return poll.Error(fmt.Errorf("watch process exited early: %s", r.Cmd.ProcessState))
|
||||
}
|
||||
res := icmd.RunCmd(cli.NewDockerComposeCmd(t, "exec", svcName, "cat", path))
|
||||
if strings.Contains(res.Stdout(), contents) {
|
||||
return poll.Success()
|
||||
}
|
||||
return poll.Continue(res.Combined())
|
||||
}
|
||||
}
|
||||
|
||||
waitForFlush := func() {
|
||||
sentinelVal := uuid.Generate().String()
|
||||
writeDataFile("wait.txt", sentinelVal)
|
||||
poll.WaitOn(t, checkFileContents("/app/data/wait.txt", sentinelVal))
|
||||
}
|
||||
|
||||
t.Logf("Writing to a file until Compose watch is up and running")
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
writeDataFile("hello.txt", "hello world")
|
||||
return checkFileContents("/app/data/hello.txt", "hello world")(t)
|
||||
}, poll.WithDelay(time.Second))
|
||||
|
||||
t.Logf("Modifying file contents")
|
||||
writeDataFile("hello.txt", "hello watch")
|
||||
poll.WaitOn(t, checkFileContents("/app/data/hello.txt", "hello watch"))
|
||||
|
||||
t.Logf("Deleting file")
|
||||
require.NoError(t, os.Remove(filepath.Join(dataDir, "hello.txt")))
|
||||
waitForFlush()
|
||||
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/hello.txt").
|
||||
Assert(t, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "No such file or directory",
|
||||
})
|
||||
|
||||
t.Logf("Writing to ignored paths")
|
||||
writeDataFile("data.foo", "ignored")
|
||||
writeDataFile(filepath.Join("ignored", "hello.txt"), "ignored")
|
||||
waitForFlush()
|
||||
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/data.foo").
|
||||
Assert(t, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "No such file or directory",
|
||||
},
|
||||
)
|
||||
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/ignored").
|
||||
Assert(t, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "No such file or directory",
|
||||
},
|
||||
)
|
||||
|
||||
t.Logf("Creating subdirectory")
|
||||
require.NoError(t, os.Mkdir(filepath.Join(dataDir, "subdir"), 0o700))
|
||||
waitForFlush()
|
||||
cli.RunDockerComposeCmd(t, "exec", svcName, "stat", "/app/data/subdir")
|
||||
|
||||
t.Logf("Writing to file in subdirectory")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "a")
|
||||
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "a"))
|
||||
|
||||
t.Logf("Writing to file multiple times")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "x")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "y")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "z")
|
||||
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "z"))
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "z")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "y")
|
||||
writeDataFile(filepath.Join("subdir", "file.txt"), "x")
|
||||
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "x"))
|
||||
|
||||
t.Logf("Deleting directory")
|
||||
require.NoError(t, os.RemoveAll(filepath.Join(dataDir, "subdir")))
|
||||
waitForFlush()
|
||||
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/subdir").
|
||||
Assert(t, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
Err: "No such file or directory",
|
||||
},
|
||||
)
|
||||
|
||||
testComplete.Store(true)
|
||||
}
|
||||
@@ -266,6 +266,20 @@ func (mr *MockServiceMockRecorder) Ps(ctx, projectName, options interface{}) *go
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ps", reflect.TypeOf((*MockService)(nil).Ps), ctx, projectName, options)
|
||||
}
|
||||
|
||||
// Publish mocks base method.
|
||||
func (m *MockService) Publish(ctx context.Context, project *types.Project, repository string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Publish", ctx, project, repository)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Publish indicates an expected call of Publish.
|
||||
func (mr *MockServiceMockRecorder) Publish(ctx, project, repository interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockService)(nil).Publish), ctx, project, repository)
|
||||
}
|
||||
|
||||
// Pull mocks base method.
|
||||
func (m *MockService) Pull(ctx context.Context, project *types.Project, options api.PullOptions) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -73,6 +73,10 @@ func (w *ttyWriter) Stop() {
|
||||
func (w *ttyWriter) Event(e Event) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.event(e)
|
||||
}
|
||||
|
||||
func (w *ttyWriter) event(e Event) {
|
||||
if !utils.StringContains(w.eventIDs, e.ID) {
|
||||
w.eventIDs = append(w.eventIDs, e.ID)
|
||||
}
|
||||
@@ -80,9 +84,14 @@ func (w *ttyWriter) Event(e Event) {
|
||||
last := w.events[e.ID]
|
||||
switch e.Status {
|
||||
case Done, Error, Warning:
|
||||
if last.Status != e.Status {
|
||||
if last.endTime.IsZero() {
|
||||
last.stop()
|
||||
}
|
||||
case Working:
|
||||
if !last.endTime.IsZero() {
|
||||
// already done, don't overwrite
|
||||
return
|
||||
}
|
||||
}
|
||||
last.Status = e.Status
|
||||
last.Text = e.Text
|
||||
@@ -106,8 +115,10 @@ func (w *ttyWriter) Event(e Event) {
|
||||
}
|
||||
|
||||
func (w *ttyWriter) Events(events []Event) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
for _, e := range events {
|
||||
w.Event(e)
|
||||
w.event(e)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -121,8 +121,8 @@ func newWatcher(paths []string, ignore PathMatcher) (Notify, error) {
|
||||
dw := &fseventNotify{
|
||||
ignore: ignore,
|
||||
stream: &fsevents.EventStream{
|
||||
Latency: 1 * time.Millisecond,
|
||||
Flags: fsevents.FileEvents,
|
||||
Latency: 50 * time.Millisecond,
|
||||
Flags: fsevents.FileEvents | fsevents.IgnoreSelf,
|
||||
// NOTE(dmiller): this corresponds to the `sinceWhen` parameter in FSEventStreamCreate
|
||||
// https://developer.apple.com/documentation/coreservices/1443980-fseventstreamcreate
|
||||
EventID: fsevents.LatestEventID(),
|
||||
|
||||
Reference in New Issue
Block a user