summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAkihiro Suda <suda.akihiro@lab.ntt.co.jp>2016-12-07 07:16:48 +0000
committerAkihiro Suda <suda.akihiro@lab.ntt.co.jp>2017-02-28 02:10:09 +0000
commit28797019b3f0eb288c4b201237cb992ec148dd4c (patch)
tree58efb35d387e69e2c1114f9b50ff75eee303e05f
parent7fb83eb7629856756e3ed7b1f20cedc87584354b (diff)
downloaddocker-28797019b3f0eb288c4b201237cb992ec148dd4c.tar.gz
[EXPERIMENTAL] Integration Test on Swarm
This commit adds contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh, which enables IT to be running in parallel, using Swarm-mode and Funker. Please refer to contrib/integration-cli-on-swarm/README.md The test takes almost 5 to 6 minutes, with 10 n1-standard-4 GCE instances. $ ./contrib/integration-cli-on-swarm/integration-cli-on-swarm.sh --push-worker-image example.gcr.io/foo/bar --replicas 30 --shuffle 2016/12/29 08:32:15 Loaded 1618 tests (30 chunks) 2016/12/29 08:32:15 Executing 30 chunks in parallel, against "integration-cli-worker" 2016/12/29 08:32:15 Executing chunk 0 (contains 54 test filters) .. 2016/12/29 08:34:34 Finished chunk 28 [1/30] with 54 test filters in 2m19.098068003s, code=0. 2016/12/29 08:34:38 Finished chunk 12 [2/30] with 54 test filters in 2m23.088569511s, code=0. 2016/12/29 08:34:48 Finished chunk 10 [3/30] with 54 test filters in 2m33.880679079s, code=0. 2016/12/29 08:34:54 Finished chunk 20 [4/30] with 54 test filters in 2m39.973747028s, code=0. 2016/12/29 08:35:11 Finished chunk 18 [5/30] with 54 test filters in 2m56.28384361s, code=0. 2016/12/29 08:35:11 Finished chunk 29 [6/30] with 52 test filters in 2m56.54047088s, code=0. 2016/12/29 08:35:15 Finished chunk 1 [7/30] with 54 test filters in 3m0.285044426s, code=0. 2016/12/29 08:35:22 Finished chunk 6 [8/30] with 54 test filters in 3m7.211775338s, code=0. 2016/12/29 08:35:24 Finished chunk 25 [9/30] with 54 test filters in 3m9.938413009s, code=0. 2016/12/29 08:35:30 Finished chunk 27 [10/30] with 54 test filters in 3m15.219834368s, code=0. 2016/12/29 08:35:36 Finished chunk 9 [11/30] with 54 test filters in 3m21.615434162s, code=0. 2016/12/29 08:35:41 Finished chunk 13 [12/30] with 54 test filters in 3m26.576907401s, code=0. 2016/12/29 08:35:45 Finished chunk 17 [13/30] with 54 test filters in 3m30.290752537s, code=0. 2016/12/29 08:35:53 Finished chunk 2 [14/30] with 54 test filters in 3m38.148423321s, code=0. 2016/12/29 08:35:55 Finished chunk 24 [15/30] with 54 test filters in 3m40.09669137s, code=0. 2016/12/29 08:35:57 Finished chunk 8 [16/30] with 54 test filters in 3m42.299945108s, code=0. 2016/12/29 08:35:57 Finished chunk 22 [17/30] with 54 test filters in 3m42.946558809s, code=0. 2016/12/29 08:35:59 Finished chunk 23 [18/30] with 54 test filters in 3m44.232557165s, code=0. 2016/12/29 08:36:02 Finished chunk 3 [19/30] with 54 test filters in 3m47.112051358s, code=0. 2016/12/29 08:36:11 Finished chunk 15 [20/30] with 54 test filters in 3m56.340656645s, code=0. 2016/12/29 08:36:11 Finished chunk 11 [21/30] with 54 test filters in 3m56.882401231s, code=0. 2016/12/29 08:36:22 Finished chunk 19 [22/30] with 54 test filters in 4m7.551093516s, code=0. 2016/12/29 08:36:23 Finished chunk 21 [23/30] with 54 test filters in 4m8.221093446s, code=0. 2016/12/29 08:36:25 Finished chunk 16 [24/30] with 54 test filters in 4m10.450451705s, code=0. 2016/12/29 08:36:27 Finished chunk 5 [25/30] with 54 test filters in 4m12.162272692s, code=0. 2016/12/29 08:36:28 Finished chunk 14 [26/30] with 54 test filters in 4m13.977801031s, code=0. 2016/12/29 08:36:29 Finished chunk 0 [27/30] with 54 test filters in 4m14.34086812s, code=0. 2016/12/29 08:36:49 Finished chunk 26 [28/30] with 54 test filters in 4m34.437085539s, code=0. 2016/12/29 08:37:14 Finished chunk 7 [29/30] with 54 test filters in 4m59.22902721s, code=0. 2016/12/29 08:37:20 Finished chunk 4 [30/30] with 54 test filters in 5m5.103469214s, code=0. 2016/12/29 08:37:20 Executed 30 chunks in 5m5.104379119s. PASS: 30, FAIL: 0. Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
-rw-r--r--.dockerignore2
-rw-r--r--.gitignore1
-rw-r--r--Makefile24
-rw-r--r--hack/integration-cli-on-swarm/README.md66
-rw-r--r--hack/integration-cli-on-swarm/agent/Dockerfile6
-rw-r--r--hack/integration-cli-on-swarm/agent/master/call.go132
-rw-r--r--hack/integration-cli-on-swarm/agent/master/master.go65
-rw-r--r--hack/integration-cli-on-swarm/agent/master/set.go28
-rw-r--r--hack/integration-cli-on-swarm/agent/master/set_test.go63
-rw-r--r--hack/integration-cli-on-swarm/agent/types/types.go18
-rw-r--r--hack/integration-cli-on-swarm/agent/vendor.conf2
-rw-r--r--hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE191
-rw-r--r--hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go50
-rw-r--r--hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go54
-rw-r--r--hack/integration-cli-on-swarm/agent/worker/executor.go109
-rw-r--r--hack/integration-cli-on-swarm/agent/worker/worker.go68
-rw-r--r--hack/integration-cli-on-swarm/host/compose.go121
-rw-r--r--hack/integration-cli-on-swarm/host/dockercmd.go64
-rw-r--r--hack/integration-cli-on-swarm/host/enumerate.go55
-rw-r--r--hack/integration-cli-on-swarm/host/enumerate_test.go84
-rw-r--r--hack/integration-cli-on-swarm/host/host.go190
-rw-r--r--hack/integration-cli-on-swarm/host/volume.go88
-rw-r--r--hack/make/.integration-test-helpers1
-rw-r--r--integration-cli/daemon/daemon.go9
24 files changed, 1486 insertions, 5 deletions
diff --git a/.dockerignore b/.dockerignore
index 2ce8321a7e..4a56f2e00c 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -3,3 +3,5 @@ bundles
vendor/pkg
.go-pkg-cache
.git
+hack/integration-cli-on-swarm/integration-cli-on-swarm
+
diff --git a/.gitignore b/.gitignore
index be8b03d17b..218339eccd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,4 @@ man/man1
man/man5
man/man8
vendor/pkg/
+hack/integration-cli-on-swarm/integration-cli-on-swarm
diff --git a/Makefile b/Makefile
index 65589227a8..b1c25b334c 100644
--- a/Makefile
+++ b/Makefile
@@ -58,10 +58,11 @@ DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docke
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
-PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
+PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
-DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),$(DOCKER_MOUNT))
+DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
+DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE)
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
@@ -74,6 +75,9 @@ export BUILD_APT_MIRROR
SWAGGER_DOCS_PORT ?= 9000
+INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
+INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
+
# if this session isn't interactive, then we don't want to allocate a
# TTY, which would fail, but if it is interactive, we do want to attach
# so that the user can send e.g. ^C through.
@@ -172,3 +176,19 @@ swagger-docs: ## preview the API documentation
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
-p $(SWAGGER_DOCS_PORT):80 \
bfirsh/redoc:1.6.2
+
+build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
+ @echo "Building hack/integration-cli-on-swarm"
+ go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
+ @echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
+ docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
+# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
+ @echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
+ $(eval tmp := integration-cli-worker-tmp)
+# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
+# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
+ docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
+ docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
+ docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
+ docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
+ docker rm -f $(tmp)
diff --git a/hack/integration-cli-on-swarm/README.md b/hack/integration-cli-on-swarm/README.md
new file mode 100644
index 0000000000..0c4b47700c
--- /dev/null
+++ b/hack/integration-cli-on-swarm/README.md
@@ -0,0 +1,66 @@
+# Integration Testing on Swarm
+
+IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
+
+## Architecture
+
+### Master service
+
+ - Works as a funker caller
+ - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
+
+### Worker service
+
+ - Works as a funker callee
+ - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
+
+### Client
+
+ - Controls master and workers via `docker stack`
+ - No need to have a local daemon
+
+Typically, the master and workers are supposed to be running on a cloud environment,
+while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
+
+## Requirement
+
+ - Docker daemon 1.13 or later
+ - Private registry for distributed execution with multiple nodes
+
+## Usage
+
+### Step 1: Prepare images
+
+ $ make build-integration-cli-on-swarm
+
+Following environment variables are known to work in this step:
+
+ - `BUILDFLAGS`
+ - `DOCKER_INCREMENTAL_BINARY`
+
+### Step 2: Execute tests
+
+ $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
+
+Following environment variables are known to work in this step:
+
+ - `DOCKER_GRAPHDRIVER`
+ - `DOCKER_EXPERIMENTAL`
+
+#### Flags
+
+Basic flags:
+
+ - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
+ - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
+ - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
+
+Experimental flags for mitigating makespan nonuniformity:
+
+ - `-shuffle`: Shuffle the test filter strings
+
+Flags for debugging IT on Swarm itself:
+
+ - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
+ - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
+ - `-dry-run`: skip the actual workload
diff --git a/hack/integration-cli-on-swarm/agent/Dockerfile b/hack/integration-cli-on-swarm/agent/Dockerfile
new file mode 100644
index 0000000000..c2bc2f195f
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/Dockerfile
@@ -0,0 +1,6 @@
+# this Dockerfile is solely used for the master image.
+# Please refer to the top-level Makefile for the worker image.
+FROM golang:1.7
+ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent
+RUN go build -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master
+ENTRYPOINT ["/master"]
diff --git a/hack/integration-cli-on-swarm/agent/master/call.go b/hack/integration-cli-on-swarm/agent/master/call.go
new file mode 100644
index 0000000000..858c2c0724
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/master/call.go
@@ -0,0 +1,132 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/bfirsh/funker-go"
+ "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
+)
+
+const (
+ // funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3
+ // When all the funker replicas are busy in their own job, we cannot connect to funker.
+ funkerRetryTimeout = 1 * time.Hour
+ funkerRetryDuration = 1 * time.Second
+)
+
+// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes)
+func ticker(d time.Duration) chan struct{} {
+ t := time.NewTicker(d)
+ stop := make(chan struct{})
+ go func() {
+ for {
+ select {
+ case <-t.C:
+ log.Printf("tick (just for keeping CI job active) per %s", d.String())
+ case <-stop:
+ t.Stop()
+ }
+ }
+ }()
+ return stop
+}
+
+func executeTests(funkerName string, testChunks [][]string) error {
+ tickerStopper := ticker(9*time.Minute + 55*time.Second)
+ defer func() {
+ close(tickerStopper)
+ }()
+ begin := time.Now()
+ log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName)
+ var wg sync.WaitGroup
+ var passed, failed uint32
+ for chunkID, tests := range testChunks {
+ log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests))
+ wg.Add(1)
+ go func(chunkID int, tests []string) {
+ defer wg.Done()
+ chunkBegin := time.Now()
+ result, err := executeTestChunkWithRetry(funkerName, types.Args{
+ ChunkID: chunkID,
+ Tests: tests,
+ })
+ if result.RawLog != "" {
+ for _, s := range strings.Split(result.RawLog, "\n") {
+ log.Printf("Log (chunk %d): %s", chunkID, s)
+ }
+ }
+ if err != nil {
+ log.Printf("Error while executing chunk %d: %v",
+ chunkID, err)
+ atomic.AddUint32(&failed, 1)
+ } else {
+ if result.Code == 0 {
+ atomic.AddUint32(&passed, 1)
+ } else {
+ atomic.AddUint32(&failed, 1)
+ }
+ log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.",
+ chunkID, passed+failed, len(testChunks), len(tests),
+ time.Now().Sub(chunkBegin), result.Code)
+ }
+ }(chunkID, tests)
+ }
+ wg.Wait()
+ // TODO: print actual tests rather than chunks
+ log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.",
+ len(testChunks), time.Now().Sub(begin), passed, failed)
+ if failed > 0 {
+ return fmt.Errorf("%d chunks failed", failed)
+ }
+ return nil
+}
+
+func executeTestChunk(funkerName string, args types.Args) (types.Result, error) {
+ ret, err := funker.Call(funkerName, args)
+ if err != nil {
+ return types.Result{}, err
+ }
+ tmp, err := json.Marshal(ret)
+ if err != nil {
+ return types.Result{}, err
+ }
+ var result types.Result
+ err = json.Unmarshal(tmp, &result)
+ return result, err
+}
+
+func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) {
+ begin := time.Now()
+ for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ {
+ result, err := executeTestChunk(funkerName, args)
+ if err == nil {
+ log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i)
+ return result, nil
+ }
+ if errorSeemsInteresting(err) {
+ log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v",
+ funkerName, args.ChunkID, i, err)
+ }
+ // TODO: non-constant sleep
+ time.Sleep(funkerRetryDuration)
+ }
+ return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout)
+}
+
+// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3
+func errorSeemsInteresting(err error) bool {
+ boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"}
+ errS := err.Error()
+ for _, boringS := range boringSubstrs {
+ if strings.Contains(errS, boringS) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/hack/integration-cli-on-swarm/agent/master/master.go b/hack/integration-cli-on-swarm/agent/master/master.go
new file mode 100644
index 0000000000..a0d9a0d381
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/master/master.go
@@ -0,0 +1,65 @@
+package main
+
+import (
+ "errors"
+ "flag"
+ "io/ioutil"
+ "log"
+ "strings"
+)
+
+func main() {
+ if err := xmain(); err != nil {
+ log.Fatalf("fatal error: %v", err)
+ }
+}
+
+func xmain() error {
+ workerService := flag.String("worker-service", "", "Name of worker service")
+ chunks := flag.Int("chunks", 0, "Number of chunks")
+ input := flag.String("input", "", "Path to input file")
+ randSeed := flag.Int64("rand-seed", int64(0), "Random seed")
+ shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
+ flag.Parse()
+ if *workerService == "" {
+ return errors.New("worker-service unset")
+ }
+ if *chunks == 0 {
+ return errors.New("chunks unset")
+ }
+ if *input == "" {
+ return errors.New("input unset")
+ }
+
+ tests, err := loadTests(*input)
+ if err != nil {
+ return err
+ }
+ testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed)
+ log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks))
+ return executeTests(*workerService, testChunks)
+}
+
+func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string {
+ // shuffling (experimental) mitigates makespan nonuniformity
+ // Not sure this can cause some locality problem..
+ if shuffle {
+ shuffleStrings(tests, randSeed)
+ }
+ return chunkStrings(tests, numChunks)
+}
+
+func loadTests(filename string) ([]string, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var tests []string
+ for _, line := range strings.Split(string(b), "\n") {
+ s := strings.TrimSpace(line)
+ if s != "" {
+ tests = append(tests, s)
+ }
+ }
+ return tests, nil
+}
diff --git a/hack/integration-cli-on-swarm/agent/master/set.go b/hack/integration-cli-on-swarm/agent/master/set.go
new file mode 100644
index 0000000000..d28c41da7f
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/master/set.go
@@ -0,0 +1,28 @@
+package main
+
+import (
+ "math/rand"
+)
+
+// chunkStrings chunks the string slice
+func chunkStrings(x []string, numChunks int) [][]string {
+ var result [][]string
+ chunkSize := (len(x) + numChunks - 1) / numChunks
+ for i := 0; i < len(x); i += chunkSize {
+ ub := i + chunkSize
+ if ub > len(x) {
+ ub = len(x)
+ }
+ result = append(result, x[i:ub])
+ }
+ return result
+}
+
+// shuffleStrings shuffles strings
+func shuffleStrings(x []string, seed int64) {
+ r := rand.New(rand.NewSource(seed))
+ for i := range x {
+ j := r.Intn(i + 1)
+ x[i], x[j] = x[j], x[i]
+ }
+}
diff --git a/hack/integration-cli-on-swarm/agent/master/set_test.go b/hack/integration-cli-on-swarm/agent/master/set_test.go
new file mode 100644
index 0000000000..dfb7a0b4f8
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/master/set_test.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func generateInput(inputLen int) []string {
+ input := []string{}
+ for i := 0; i < inputLen; i++ {
+ input = append(input, fmt.Sprintf("s%d", i))
+ }
+
+ return input
+}
+
+func testChunkStrings(t *testing.T, inputLen, numChunks int) {
+ t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks)
+ input := generateInput(inputLen)
+ result := chunkStrings(input, numChunks)
+ t.Logf("result has %d chunks", len(result))
+ inputReconstructedFromResult := []string{}
+ for i, chunk := range result {
+ t.Logf("chunk %d has %d elements", i, len(chunk))
+ inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...)
+ }
+ if !reflect.DeepEqual(input, inputReconstructedFromResult) {
+ t.Fatal("input != inputReconstructedFromResult")
+ }
+}
+
+func TestChunkStrings_4_4(t *testing.T) {
+ testChunkStrings(t, 4, 4)
+}
+
+func TestChunkStrings_4_1(t *testing.T) {
+ testChunkStrings(t, 4, 1)
+}
+
+func TestChunkStrings_1_4(t *testing.T) {
+ testChunkStrings(t, 1, 4)
+}
+
+func TestChunkStrings_1000_8(t *testing.T) {
+ testChunkStrings(t, 1000, 8)
+}
+
+func TestChunkStrings_1000_9(t *testing.T) {
+ testChunkStrings(t, 1000, 9)
+}
+
+func testShuffleStrings(t *testing.T, inputLen int, seed int64) {
+ t.Logf("inputLen=%d, seed=%d", inputLen, seed)
+ x := generateInput(inputLen)
+ shuffleStrings(x, seed)
+ t.Logf("shuffled: %v", x)
+}
+
+func TestShuffleStrings_100(t *testing.T) {
+ testShuffleStrings(t, 100, time.Now().UnixNano())
+}
diff --git a/hack/integration-cli-on-swarm/agent/types/types.go b/hack/integration-cli-on-swarm/agent/types/types.go
new file mode 100644
index 0000000000..fc598f0330
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/types/types.go
@@ -0,0 +1,18 @@
+package types
+
+// Args is the type for funker args
+type Args struct {
+ // ChunkID is an unique number of the chunk
+ ChunkID int `json:"chunk_id"`
+ // Tests is the set of the strings that are passed as `-check.f` filters
+ Tests []string `json:"tests"`
+}
+
+// Result is the type for funker result
+type Result struct {
+ // ChunkID corresponds to Args.ChunkID
+ ChunkID int `json:"chunk_id"`
+ // Code is the exit code
+ Code int `json:"code"`
+ RawLog string `json:"raw_log"`
+}
diff --git a/hack/integration-cli-on-swarm/agent/vendor.conf b/hack/integration-cli-on-swarm/agent/vendor.conf
new file mode 100644
index 0000000000..efd6d6d049
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/vendor.conf
@@ -0,0 +1,2 @@
+# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
+github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE
new file mode 100644
index 0000000000..75191a4dc7
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go
new file mode 100644
index 0000000000..ac0338d2d7
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/call.go
@@ -0,0 +1,50 @@
+package funker
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net"
+ "time"
+)
+
+// Call a Funker function
+func Call(name string, args interface{}) (interface{}, error) {
+ argsJSON, err := json.Marshal(args)
+ if err != nil {
+ return nil, err
+ }
+
+ addr, err := net.ResolveTCPAddr("tcp", name+":9999")
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := net.DialTCP("tcp", nil, addr)
+ if err != nil {
+ return nil, err
+ }
+ // Keepalive is a workaround for docker/docker#29655 .
+ // The implementation of FIN_WAIT2 seems weird on Swarm-mode.
+ // It seems always refuseing any packet after 60 seconds.
+ //
+ // TODO: remove this workaround if the issue gets resolved on the Docker side
+ if err := conn.SetKeepAlive(true); err != nil {
+ return nil, err
+ }
+ if err := conn.SetKeepAlivePeriod(30 * time.Second); err != nil {
+ return nil, err
+ }
+ if _, err = conn.Write(argsJSON); err != nil {
+ return nil, err
+ }
+ if err = conn.CloseWrite(); err != nil {
+ return nil, err
+ }
+ retJSON, err := ioutil.ReadAll(conn)
+ if err != nil {
+ return nil, err
+ }
+ var ret interface{}
+ err = json.Unmarshal(retJSON, &ret)
+ return ret, err
+}
diff --git a/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go
new file mode 100644
index 0000000000..89878994dc
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/vendor/github.com/bfirsh/funker-go/handle.go
@@ -0,0 +1,54 @@
+package funker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "reflect"
+)
+
+// Handle a Funker function.
+func Handle(handler interface{}) error {
+ handlerValue := reflect.ValueOf(handler)
+ handlerType := handlerValue.Type()
+ if handlerType.Kind() != reflect.Func || handlerType.NumIn() != 1 || handlerType.NumOut() != 1 {
+ return fmt.Errorf("Handler must be a function with a single parameter and single return value.")
+ }
+ argsValue := reflect.New(handlerType.In(0))
+
+ listener, err := net.Listen("tcp", ":9999")
+ if err != nil {
+ return err
+ }
+ conn, err := listener.Accept()
+ if err != nil {
+ return err
+ }
+ // We close listener, because we only allow single request.
+ // Note that TCP "backlog" cannot be used for that purpose.
+ // http://www.perlmonks.org/?node_id=940662
+ if err = listener.Close(); err != nil {
+ return err
+ }
+ argsJSON, err := ioutil.ReadAll(conn)
+ if err != nil {
+ return err
+ }
+ err = json.Unmarshal(argsJSON, argsValue.Interface())
+ if err != nil {
+ return err
+ }
+
+ ret := handlerValue.Call([]reflect.Value{argsValue.Elem()})[0].Interface()
+ retJSON, err := json.Marshal(ret)
+ if err != nil {
+ return err
+ }
+
+ if _, err = conn.Write(retJSON); err != nil {
+ return err
+ }
+
+ return conn.Close()
+}
diff --git a/hack/integration-cli-on-swarm/agent/worker/executor.go b/hack/integration-cli-on-swarm/agent/worker/executor.go
new file mode 100644
index 0000000000..fc9960e91d
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/worker/executor.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+// testChunkExecutor executes integration-cli binary.
+// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests.
+type testChunkExecutor func(image string, tests []string) (int64, string, error)
+
+func dryTestChunkExecutor(image string, tests []string) (int64, string, error) {
+ return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil
+}
+
+// privilegedTestChunkExecutor invokes a privileged container from the worker
+// service via bind-mounted API socket so as to execute the test chunk
+func privilegedTestChunkExecutor(image string, tests []string) (int64, string, error) {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ return 0, "", err
+ }
+ // propagate variables from the host (needs to be defined in the compose file)
+ experimental := os.Getenv("DOCKER_EXPERIMENTAL")
+ graphdriver := os.Getenv("DOCKER_GRAPHDRIVER")
+ if graphdriver == "" {
+ info, err := cli.Info(context.Background())
+ if err != nil {
+ return 0, "", err
+ }
+ graphdriver = info.Driver
+ }
+ // `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration-cli`)
+ // but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work.
+ //
+ // Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs`
+ //
+ // see integration-cli/daemon/daemon.go
+ daemonDest := "/daemon_dest"
+ config := container.Config{
+ Image: image,
+ Env: []string{
+ "TESTFLAGS=-check.f " + strings.Join(tests, "|"),
+ "KEEPBUNDLE=1",
+ "DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli
+ "DOCKER_EXPERIMENTAL=" + experimental,
+ "DOCKER_GRAPHDRIVER=" + graphdriver,
+ "DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest,
+ },
+ // TODO: set label?
+ Entrypoint: []string{"hack/dind"},
+ Cmd: []string{"hack/make.sh", "test-integration-cli"},
+ }
+ hostConfig := container.HostConfig{
+ AutoRemove: true,
+ Privileged: true,
+ Mounts: []mount.Mount{
+ {
+ Type: mount.TypeVolume,
+ Target: daemonDest,
+ },
+ },
+ }
+ id, stream, err := runContainer(context.Background(), cli, config, hostConfig)
+ if err != nil {
+ return 0, "", err
+ }
+ var b bytes.Buffer
+ teeContainerStream(&b, os.Stdout, os.Stderr, stream)
+ rc, err := cli.ContainerWait(context.Background(), id)
+ if err != nil {
+ return 0, "", err
+ }
+ return rc, b.String(), nil
+}
+
+func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) {
+ created, err := cli.ContainerCreate(context.Background(),
+ &config, &hostConfig, nil, "")
+ if err != nil {
+ return "", nil, err
+ }
+ if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil {
+ return "", nil, err
+ }
+ stream, err := cli.ContainerLogs(ctx,
+ created.ID,
+ types.ContainerLogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: true,
+ })
+ return created.ID, stream, err
+}
+
+func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) {
+ stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream)
+ stream.Close()
+}
diff --git a/hack/integration-cli-on-swarm/agent/worker/worker.go b/hack/integration-cli-on-swarm/agent/worker/worker.go
new file mode 100644
index 0000000000..4b3c6bdfa8
--- /dev/null
+++ b/hack/integration-cli-on-swarm/agent/worker/worker.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/bfirsh/funker-go"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
+)
+
+func main() {
+ if err := xmain(); err != nil {
+ log.Fatalf("fatal error: %v", err)
+ }
+}
+
+func validImageDigest(s string) bool {
+ return reference.DigestRegexp.FindString(s) != ""
+}
+
+func xmain() error {
+ workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself")
+ dryRun := flag.Bool("dry-run", false, "Dry run")
+ flag.Parse()
+ if !validImageDigest(*workerImageDigest) {
+ // Because of issue #29582.
+ // `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag.
+ // So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally`
+ return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest)
+ }
+ executor := privilegedTestChunkExecutor
+ if *dryRun {
+ executor = dryTestChunkExecutor
+ }
+ return handle(*workerImageDigest, executor)
+}
+
+func handle(workerImageDigest string, executor testChunkExecutor) error {
+ log.Printf("Waiting for a funker request")
+ return funker.Handle(func(args *types.Args) types.Result {
+ log.Printf("Executing chunk %d, contains %d test filters",
+ args.ChunkID, len(args.Tests))
+ begin := time.Now()
+ code, rawLog, err := executor(workerImageDigest, args.Tests)
+ if err != nil {
+ log.Printf("Error while executing chunk %d: %v", args.ChunkID, err)
+ if code == 0 {
+ // Make sure this is a failure
+ code = 1
+ }
+ return types.Result{
+ ChunkID: args.ChunkID,
+ Code: int(code),
+ RawLog: rawLog,
+ }
+ }
+ elapsed := time.Now().Sub(begin)
+ log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed)
+ return types.Result{
+ ChunkID: args.ChunkID,
+ Code: int(code),
+ RawLog: rawLog,
+ }
+ })
+}
diff --git a/hack/integration-cli-on-swarm/host/compose.go b/hack/integration-cli-on-swarm/host/compose.go
new file mode 100644
index 0000000000..7b5f955150
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/compose.go
@@ -0,0 +1,121 @@
+package main
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "text/template"
+
+ "github.com/docker/docker/client"
+)
+
+const composeTemplate = `# generated by integration-cli-on-swarm
+version: "3"
+
+services:
+ worker:
+ image: "{{.WorkerImage}}"
+ command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}"]
+ networks:
+ - net
+ volumes:
+# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}}
+ - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}}
+ deploy:
+ mode: replicated
+ replicas: {{.Replicas}}
+ restart_policy:
+# The restart condition needs to be any for funker function
+ condition: any
+
+ master:
+ image: "{{.MasterImage}}"
+ command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"]
+ networks:
+ - net
+ volumes:
+ - {{.Volume}}:/mnt
+ deploy:
+ mode: replicated
+ replicas: 1
+ restart_policy:
+ condition: none
+ placement:
+# Make sure the master can access the volume
+ constraints: [node.id == {{.SelfNodeID}}]
+
+networks:
+ net:
+
+volumes:
+ {{.Volume}}:
+ external: true
+`
+
+type composeOptions struct {
+ Replicas int
+ Chunks int
+ MasterImage string
+ WorkerImage string
+ Volume string
+ Shuffle bool
+ RandSeed int64
+ DryRun bool
+}
+
+type composeTemplateOptions struct {
+ composeOptions
+ WorkerImageDigest string
+ SelfNodeID string
+ EnvDockerGraphDriver string
+ EnvDockerExperimental string
+}
+
+// createCompose creates "dir/docker-compose.yml".
+// If dir is empty, TempDir() is used.
+func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) {
+ if dir == "" {
+ var err error
+ dir, err = ioutil.TempDir("", "integration-cli-on-swarm-")
+ if err != nil {
+ return "", err
+ }
+ }
+ resolved := composeTemplateOptions{}
+ resolved.composeOptions = opts
+ workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName)
+ if err != nil {
+ return "", err
+ }
+ if len(workerImageInspect.RepoDigests) > 0 {
+ resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0]
+ } else {
+ // fall back for non-pushed image
+ resolved.WorkerImageDigest = workerImageInspect.ID
+ }
+ info, err := cli.Info(context.Background())
+ if err != nil {
+ return "", err
+ }
+ resolved.SelfNodeID = info.Swarm.NodeID
+ resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER")
+ resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL")
+ composeFilePath := filepath.Join(dir, "docker-compose.yml")
+ tmpl, err := template.New("").Parse(composeTemplate)
+ if err != nil {
+ return "", err
+ }
+ f, err := os.Create(composeFilePath)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ if err = tmpl.Execute(f, resolved); err != nil {
+ return "", err
+ }
+ return composeFilePath, nil
+}
diff --git a/hack/integration-cli-on-swarm/host/dockercmd.go b/hack/integration-cli-on-swarm/host/dockercmd.go
new file mode 100644
index 0000000000..10ea0ecc24
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/dockercmd.go
@@ -0,0 +1,64 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/client"
+)
+
+func system(commands [][]string) error {
+ for _, c := range commands {
+ cmd := exec.Command(c[0], c[1:]...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Env = os.Environ()
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func pushImage(unusedCli *client.Client, remote, local string) error {
+ // FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...)
+ return system([][]string{
+ {"docker", "image", "tag", local, remote},
+ {"docker", "image", "push", remote},
+ })
+}
+
+func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error {
+ // FIXME: eliminate os/exec (but stack is implemented in CLI ...)
+ return system([][]string{
+ {"docker", "stack", "deploy",
+ "--compose-file", composeFilePath,
+ "--with-registry-auth",
+ stackName},
+ })
+}
+
+func hasStack(unusedCli *client.Client, stackName string) bool {
+ // FIXME: eliminate os/exec (but stack is implemented in CLI ...)
+ out, err := exec.Command("docker", "stack", "ls").Output()
+ if err != nil {
+ panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out)))
+ }
+ // FIXME: not accurate
+ return strings.Contains(string(out), stackName)
+}
+
+func removeStack(unusedCli *client.Client, stackName string) error {
+ // FIXME: eliminate os/exec (but stack is implemented in CLI ...)
+ if err := system([][]string{
+ {"docker", "stack", "rm", stackName},
+ }); err != nil {
+ return err
+ }
+ // FIXME
+ time.Sleep(10 * time.Second)
+ return nil
+}
diff --git a/hack/integration-cli-on-swarm/host/enumerate.go b/hack/integration-cli-on-swarm/host/enumerate.go
new file mode 100644
index 0000000000..08e5ac7f19
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/enumerate.go
@@ -0,0 +1,55 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+)
+
+var testFuncRegexp *regexp.Regexp
+
+func init() {
+ testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`)
+}
+
+func enumerateTestsForBytes(b []byte) ([]string, error) {
+ var tests []string
+ submatches := testFuncRegexp.FindAllSubmatch(b, -1)
+ for _, submatch := range submatches {
+ if len(submatch) == 3 {
+ tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2]))
+ }
+ }
+ return tests, nil
+}
+
+// enumareteTests enumerates valid `-check.f` strings for all the test functions.
+// Note that we use regexp rather than parsing Go files for performance reason.
+// (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing)
+// The files needs to be `gofmt`-ed
+//
+// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`):
+// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$"
+// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$"
+// ...
+// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$"
+func enumerateTests(wd string) ([]string, error) {
+ testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go"))
+ if err != nil {
+ return nil, err
+ }
+ var allTests []string
+ for _, testGoFile := range testGoFiles {
+ b, err := ioutil.ReadFile(testGoFile)
+ if err != nil {
+ return nil, err
+ }
+ tests, err := enumerateTestsForBytes(b)
+ if err != nil {
+ return nil, err
+ }
+ allTests = append(allTests, tests...)
+ }
+ return allTests, nil
+}
diff --git a/hack/integration-cli-on-swarm/host/enumerate_test.go b/hack/integration-cli-on-swarm/host/enumerate_test.go
new file mode 100644
index 0000000000..d6049ae52e
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/enumerate_test.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+func getRepoTopDir(t *testing.T) string {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ wd = filepath.Clean(wd)
+ suffix := "hack/integration-cli-on-swarm/host"
+ if !strings.HasSuffix(wd, suffix) {
+ t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd)
+ }
+ return filepath.Clean(filepath.Join(wd, "../../.."))
+}
+
+func TestEnumerateTests(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ tests, err := enumerateTests(getRepoTopDir(t))
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(tests)
+ t.Logf("enumerated %d test filter strings:", len(tests))
+ for _, s := range tests {
+ t.Logf("- %q", s)
+ }
+}
+
+func TestEnumerateTestsForBytes(t *testing.T) {
+ b := []byte(`package main
+import (
+ "github.com/go-check/check"
+)
+
+func (s *FooSuite) TestA(c *check.C) {
+}
+
+func (s *FooSuite) TestAAA(c *check.C) {
+}
+
+func (s *BarSuite) TestBar(c *check.C) {
+}
+
+func (x *FooSuite) TestC(c *check.C) {
+}
+
+func (*FooSuite) TestD(c *check.C) {
+}
+
+// should not be counted
+func (s *FooSuite) testE(c *check.C) {
+}
+
+// counted, although we don't support ungofmt file
+ func (s *FooSuite) TestF (c *check.C){}
+`)
+ expected := []string{
+ "FooSuite.TestA$",
+ "FooSuite.TestAAA$",
+ "BarSuite.TestBar$",
+ "FooSuite.TestC$",
+ "FooSuite.TestD$",
+ "FooSuite.TestF$",
+ }
+
+ actual, err := enumerateTestsForBytes(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, actual) {
+ t.Fatalf("expected %q, got %q", expected, actual)
+ }
+}
diff --git a/hack/integration-cli-on-swarm/host/host.go b/hack/integration-cli-on-swarm/host/host.go
new file mode 100644
index 0000000000..254cf0dbbd
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/host.go
@@ -0,0 +1,190 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/pkg/stdcopy"
+)
+
+const (
+ defaultStackName = "integration-cli-on-swarm"
+ defaultVolumeName = "integration-cli-on-swarm"
+ defaultMasterImageName = "integration-cli-master"
+ defaultWorkerImageName = "integration-cli-worker"
+)
+
+func main() {
+ if err := xmain(); err != nil {
+ logrus.Fatalf("fatal error: %v", err)
+ }
+}
+
+// xmain can call os.Exit()
+func xmain() error {
+ // Should we use cobra maybe?
+ replicas := flag.Int("replicas", 1, "Number of worker service replica")
+ chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)")
+ pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distribuetd execution. (empty == not to push)")
+ shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
+ // flags below are rarely used
+ randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == curent time)")
+ filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings")
+ dryRun := flag.Bool("dry-run", false, "Dry run")
+ flag.Parse()
+ if *chunks == 0 {
+ *chunks = *replicas
+ }
+ if *randSeed == int64(0) {
+ *randSeed = time.Now().UnixNano()
+ }
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ return err
+ }
+ if hasStack(cli, defaultStackName) {
+ logrus.Infof("Removing stack %s", defaultStackName)
+ removeStack(cli, defaultStackName)
+ }
+ if hasVolume(cli, defaultVolumeName) {
+ logrus.Infof("Removing volume %s", defaultVolumeName)
+ removeVolume(cli, defaultVolumeName)
+ }
+ if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil {
+ return err
+ }
+ workerImageForStack := defaultWorkerImageName
+ if *pushWorkerImage != "" {
+ logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage)
+ if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil {
+ return err
+ }
+ workerImageForStack = *pushWorkerImage
+ }
+ compose, err := createCompose("", cli, composeOptions{
+ Replicas: *replicas,
+ Chunks: *chunks,
+ MasterImage: defaultMasterImageName,
+ WorkerImage: workerImageForStack,
+ Volume: defaultVolumeName,
+ Shuffle: *shuffle,
+ RandSeed: *randSeed,
+ DryRun: *dryRun,
+ })
+ if err != nil {
+ return err
+ }
+ filters, err := filtersBytes(*filtersFile)
+ if err != nil {
+ return err
+ }
+ logrus.Infof("Creating volume %s with input data", defaultVolumeName)
+ if err = createVolumeWithData(cli,
+ defaultVolumeName,
+ map[string][]byte{"/input": filters},
+ defaultMasterImageName); err != nil {
+ return err
+ }
+ logrus.Infof("Deploying stack %s from %s", defaultStackName, compose)
+ defer func() {
+ logrus.Infof("NOTE: You may want to inspect or clean up following resources:")
+ logrus.Infof(" - Stack: %s", defaultStackName)
+ logrus.Infof(" - Volume: %s", defaultVolumeName)
+ logrus.Infof(" - Compose file: %s", compose)
+ logrus.Infof(" - Master image: %s", defaultMasterImageName)
+ logrus.Infof(" - Worker image: %s", workerImageForStack)
+ }()
+ if err = deployStack(cli, defaultStackName, compose); err != nil {
+ return err
+ }
+ logrus.Infof("The log will be displayed here after some duration."+
+ "You can watch the live status via `docker service logs %s_worker`",
+ defaultStackName)
+ masterContainerID, err := waitForMasterUp(cli, defaultStackName)
+ if err != nil {
+ return err
+ }
+ rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID)
+ if err != nil {
+ return err
+ }
+ logrus.Infof("Exit status: %d", rc)
+ os.Exit(int(rc))
+ return nil
+}
+
+func ensureImages(cli *client.Client, images []string) error {
+ for _, image := range images {
+ _, _, err := cli.ImageInspectWithRaw(context.Background(), image)
+ if err != nil {
+ return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v",
+ image, err)
+ }
+ }
+ return nil
+}
+
+func filtersBytes(optionalFiltersFile string) ([]byte, error) {
+ var b []byte
+ if optionalFiltersFile == "" {
+ tests, err := enumerateTests(".")
+ if err != nil {
+ return b, err
+ }
+ b = []byte(strings.Join(tests, "\n") + "\n")
+ } else {
+ var err error
+ b, err = ioutil.ReadFile(optionalFiltersFile)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func waitForMasterUp(cli *client.Client, stackName string) (string, error) {
+ // FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping
+ time.Sleep(10 * time.Second)
+
+ fil := filters.NewArgs()
+ fil.Add("label", "com.docker.stack.namespace="+stackName)
+ // FIXME(AkihiroSuda): we should not rely on internal service naming convention
+ fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master")
+ masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
+ All: true,
+ Filters: fil,
+ })
+ if err != nil {
+ return "", err
+ }
+ if len(masters) == 0 {
+ return "", fmt.Errorf("master not running in stack %s?", stackName)
+ }
+ return masters[0].ID, nil
+}
+
+func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) {
+ stream, err := cli.ContainerLogs(context.Background(),
+ containerID,
+ types.ContainerLogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Follow: true,
+ })
+ if err != nil {
+ return 1, err
+ }
+ stdcopy.StdCopy(stdout, stderr, stream)
+ stream.Close()
+ return cli.ContainerWait(context.Background(), containerID)
+}
diff --git a/hack/integration-cli-on-swarm/host/volume.go b/hack/integration-cli-on-swarm/host/volume.go
new file mode 100644
index 0000000000..c2f96984a0
--- /dev/null
+++ b/hack/integration-cli-on-swarm/host/volume.go
@@ -0,0 +1,88 @@
+package main
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "io"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/volume"
+ "github.com/docker/docker/client"
+)
+
+func createTar(data map[string][]byte) (io.Reader, error) {
+ var b bytes.Buffer
+ tw := tar.NewWriter(&b)
+ for path, datum := range data {
+ hdr := tar.Header{
+ Name: path,
+ Mode: 0644,
+ Size: int64(len(datum)),
+ }
+ if err := tw.WriteHeader(&hdr); err != nil {
+ return nil, err
+ }
+ _, err := tw.Write(datum)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return &b, nil
+}
+
+// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar"))
+// Internally, a container is created from the image so as to provision the data to the volume,
+// which is attached to the container.
+func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error {
+ _, err := cli.VolumeCreate(context.Background(),
+ volume.VolumesCreateBody{
+ Driver: "local",
+ Name: volumeName,
+ })
+ if err != nil {
+ return err
+ }
+ mnt := "/mnt"
+ miniContainer, err := cli.ContainerCreate(context.Background(),
+ &container.Config{
+ Image: image,
+ },
+ &container.HostConfig{
+ Mounts: []mount.Mount{
+ {
+ Type: mount.TypeVolume,
+ Source: volumeName,
+ Target: mnt,
+ },
+ },
+ }, nil, "")
+ if err != nil {
+ return err
+ }
+ tr, err := createTar(data)
+ if err != nil {
+ return err
+ }
+ if cli.CopyToContainer(context.Background(),
+ miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil {
+ return err
+ }
+ return cli.ContainerRemove(context.Background(),
+ miniContainer.ID,
+ types.ContainerRemoveOptions{})
+}
+
+func hasVolume(cli *client.Client, volumeName string) bool {
+ _, err := cli.VolumeInspect(context.Background(), volumeName)
+ return err == nil
+}
+
+func removeVolume(cli *client.Client, volumeName string) error {
+ return cli.VolumeRemove(context.Background(), volumeName, true)
+}
diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers
index d9ba88009f..7a086d5fe8 100644
--- a/hack/make/.integration-test-helpers
+++ b/hack/make/.integration-test-helpers
@@ -60,6 +60,7 @@ test_env() {
# use "env -i" to tightly control the environment variables that bleed into the tests
env -i \
DEST="$DEST" \
+ DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \
DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \
DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \
diff --git a/integration-cli/daemon/daemon.go b/integration-cli/daemon/daemon.go
index 37104293b6..87e848852d 100644
--- a/integration-cli/daemon/daemon.go
+++ b/integration-cli/daemon/daemon.go
@@ -77,12 +77,15 @@ type clientConfig struct {
}
// New returns a Daemon instance to be used for testing.
-// This will create a directory such as d123456789 in the folder specified by $DEST.
+// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
// The daemon will not automatically start.
func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon {
- dest := os.Getenv("DEST")
+ dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
- t.Fatalf("Please set the DEST environment variable")
+ dest = os.Getenv("DEST")
+ }
+ if dest == "" {
+ t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
}
if err := os.MkdirAll(SockRoot, 0700); err != nil {