summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Makefile2
-rw-r--r--api/api.go3
-rw-r--r--api/client.go5
-rw-r--r--archive/archive.go4
-rw-r--r--archive/archive_test.go2
-rw-r--r--archive/changes.go2
-rw-r--r--archive/diff.go44
-rw-r--r--archive/wrap.go2
-rw-r--r--container.go7
-rw-r--r--docker/docker.go3
-rw-r--r--docs/sources/reference/api/remote_api_client_libraries.rst13
-rw-r--r--docs/sources/reference/commandline/cli.rst5
-rw-r--r--engine/engine_test.go13
-rw-r--r--hack/infrastructure/docker-ci.rst56
-rw-r--r--hack/infrastructure/docker-ci/Dockerfile66
-rw-r--r--hack/infrastructure/docker-ci/README.rst75
-rw-r--r--hack/infrastructure/docker-ci/VERSION2
-rw-r--r--hack/infrastructure/docker-ci/buildbot/README.rst1
-rw-r--r--hack/infrastructure/docker-ci/buildbot/buildbot.conf18
-rw-r--r--hack/infrastructure/docker-ci/buildbot/github.py9
-rw-r--r--hack/infrastructure/docker-ci/buildbot/master.cfg173
-rw-r--r--hack/infrastructure/docker-ci/buildbot/requirements.txt9
-rwxr-xr-xhack/infrastructure/docker-ci/buildbot/setup.sh59
-rw-r--r--hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml22
-rw-r--r--hack/infrastructure/docker-ci/dcr/prod/settings.yml5
-rw-r--r--hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml22
-rw-r--r--hack/infrastructure/docker-ci/dcr/stage/settings.yml5
-rwxr-xr-xhack/infrastructure/docker-ci/deployment.py171
-rwxr-xr-xhack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh32
-rw-r--r--hack/infrastructure/docker-ci/docker-test/Dockerfile25
-rwxr-xr-xhack/infrastructure/docker-ci/docker-test/test_docker.sh33
l---------hack/infrastructure/docker-ci/dockertest/docker1
l---------hack/infrastructure/docker-ci/dockertest/docker-registry1
-rwxr-xr-xhack/infrastructure/docker-ci/dockertest/nightlyrelease13
-rwxr-xr-xhack/infrastructure/docker-ci/dockertest/project8
-rw-r--r--hack/infrastructure/docker-ci/nginx/nginx.conf12
-rw-r--r--hack/infrastructure/docker-ci/nightlyrelease/Dockerfile30
-rw-r--r--hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh40
-rw-r--r--hack/infrastructure/docker-ci/registry-coverage/Dockerfile18
-rwxr-xr-xhack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh18
-rwxr-xr-xhack/infrastructure/docker-ci/setup.sh54
-rw-r--r--hack/infrastructure/docker-ci/testbuilder/Dockerfile12
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/docker-registry.sh12
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/docker.sh18
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/testbuilder.sh40
-rwxr-xr-xhack/infrastructure/docker-ci/tool/backup.py47
-rwxr-xr-xhack/vendor.sh8
-rw-r--r--integration/api_test.go101
-rw-r--r--integration/utils_test.go2
-rw-r--r--networkdriver/network_test.go2
-rw-r--r--pkg/netlink/MAINTAINERS2
-rw-r--r--reflink_copy_linux.go55
-rw-r--r--reflink_copy_unsupported.go16
-rw-r--r--server.go2
-rw-r--r--tags_unit_test.go2
-rw-r--r--utils/tarsum.go2
-rw-r--r--utils_test.go2
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go304
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go79
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go402
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go425
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go20
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go20
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go32
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go284
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tarbin0 -> 3072 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tarbin0 -> 1024 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tarbin0 -> 10240 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt1
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt1
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tarbin0 -> 3072 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tarbin0 -> 2048 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tarbin0 -> 3584 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tarbin0 -> 4096 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tarbin0 -> 3584 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tarbin0 -> 5120 bytes
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go383
-rw-r--r--vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go433
79 files changed, 3051 insertions, 741 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 339f6f9122..c4095641cb 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -129,7 +129,7 @@ pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below:
```
-Docker Developer Grant and Certificate of Origin 1.1
+Docker Developer Certificate of Origin 1.1
By making a contribution to the Docker Project ("Project"), I represent and
warrant that:
diff --git a/Makefile b/Makefile
index 168707a80f..e124d1d7e6 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
DOCKER_IMAGE := docker:$(GIT_BRANCH)
DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH)
-DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles "$(DOCKER_IMAGE)"
+DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)"
default: binary
diff --git a/api/api.go b/api/api.go
index 2cfc75631e..2ba27288c0 100644
--- a/api/api.go
+++ b/api/api.go
@@ -943,6 +943,9 @@ func postContainersCopy(eng *engine.Engine, version float64, w http.ResponseWrit
streamJSON(job, w, false)
if err := job.Run(); err != nil {
utils.Errorf("%s", err.Error())
+ if strings.Contains(err.Error(), "No such container") {
+ w.WriteHeader(http.StatusNotFound)
+ }
}
return nil
}
diff --git a/api/client.go b/api/client.go
index f1f336d5f4..605970c0fd 100644
--- a/api/client.go
+++ b/api/client.go
@@ -136,7 +136,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
- suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output")
+ suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build")
if err := cmd.Parse(args); err != nil {
@@ -1961,6 +1961,9 @@ func (cli *DockerCli) CmdCp(args ...string) error {
if stream != nil {
defer stream.Close()
}
+ if statusCode == 404 {
+ return fmt.Errorf("No such container: %v", info[0])
+ }
if err != nil {
return err
}
diff --git a/archive/archive.go b/archive/archive.go
index 3bd3af2761..c0551d9fa3 100644
--- a/archive/archive.go
+++ b/archive/archive.go
@@ -1,8 +1,8 @@
package archive
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"compress/bzip2"
"compress/gzip"
"errors"
@@ -186,7 +186,7 @@ func addTarFile(path, name string, tw *tar.Writer) error {
return nil
}
-func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) error {
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error {
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
diff --git a/archive/archive_test.go b/archive/archive_test.go
index 164fc8fe27..8badd58bd7 100644
--- a/archive/archive_test.go
+++ b/archive/archive_test.go
@@ -1,8 +1,8 @@
package archive
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"fmt"
"io"
"io/ioutil"
diff --git a/archive/changes.go b/archive/changes.go
index 25406f5cec..b46b13bbe7 100644
--- a/archive/changes.go
+++ b/archive/changes.go
@@ -1,7 +1,7 @@
package archive
import (
- "archive/tar"
+ "code.google.com/p/go/src/pkg/archive/tar"
"fmt"
"github.com/dotcloud/docker/utils"
"io"
diff --git a/archive/diff.go b/archive/diff.go
index de1efacf34..6a778390bb 100644
--- a/archive/diff.go
+++ b/archive/diff.go
@@ -1,8 +1,10 @@
package archive
import (
- "archive/tar"
+ "code.google.com/p/go/src/pkg/archive/tar"
+ "fmt"
"io"
+ "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -42,6 +44,9 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
var dirs []*tar.Header
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
@@ -72,6 +77,22 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil {
+ return err
+ }
+ }
continue
}
@@ -96,7 +117,26 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
}
}
- if err := createTarFile(path, dest, hdr, tr); err != nil {
+ srcData := io.Reader(tr)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData); err != nil {
return err
}
diff --git a/archive/wrap.go b/archive/wrap.go
index dfb335c0b6..981420b3fe 100644
--- a/archive/wrap.go
+++ b/archive/wrap.go
@@ -1,8 +1,8 @@
package archive
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"io/ioutil"
)
diff --git a/container.go b/container.go
index 3740a7fb73..f55b8c3c7b 100644
--- a/container.go
+++ b/container.go
@@ -1256,6 +1256,13 @@ func (container *Container) Stop(seconds int) error {
}
func (container *Container) Restart(seconds int) error {
+ // Avoid unnecessarily unmounting and then directly mounting
+ // the container when the container stops and then starts
+ // again
+ if err := container.Mount(); err == nil {
+ defer container.Unmount()
+ }
+
if err := container.Stop(seconds); err != nil {
return err
}
diff --git a/docker/docker.go b/docker/docker.go
index b4d7879397..a552b8318a 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -58,6 +58,9 @@ func main() {
// If we do not have a host, default to unix socket
defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET)
}
+ if _, err := api.ValidateHost(defaultHost); err != nil {
+ log.Fatal(err)
+ }
flHosts.Set(defaultHost)
}
diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst
index c7ced0055e..362fa6fe3d 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.rst
+++ b/docs/sources/reference/api/remote_api_client_libraries.rst
@@ -1,6 +1,6 @@
:title: Remote API Client Libraries
:description: Various client libraries available to use with the Docker remote API
-:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go
+:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, JavaScript, Erlang, Go
==================================
@@ -21,15 +21,18 @@ and we will add the libraries here.
+----------------------+----------------+--------------------------------------------+----------+
| Ruby | docker-api | https://github.com/swipely/docker-api | Active |
+----------------------+----------------+--------------------------------------------+----------+
-| Javascript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
+| JavaScript (NodeJS) | dockerode | https://github.com/apocas/dockerode | Active |
+| | | Install via NPM: `npm install dockerode` | |
++----------------------+----------------+--------------------------------------------+----------+
+| JavaScript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active |
| | | Install via NPM: `npm install docker.io` | |
+----------------------+----------------+--------------------------------------------+----------+
-| Javascript | docker-js | https://github.com/dgoujard/docker-js | Active |
+| JavaScript | docker-js | https://github.com/dgoujard/docker-js | Active |
+----------------------+----------------+--------------------------------------------+----------+
-| Javascript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active |
+| JavaScript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active |
| **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+----------+
-| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
+| JavaScript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active |
| **WebUI** | | | |
+----------------------+----------------+--------------------------------------------+----------+
| Java | docker-java | https://github.com/kpelykh/docker-java | Active |
diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst
index 032076b941..7ba0123065 100644
--- a/docs/sources/reference/commandline/cli.rst
+++ b/docs/sources/reference/commandline/cli.rst
@@ -186,7 +186,7 @@ Examples:
Build a new container image from the source code at PATH
-t, --time="": Repository name (and optionally a tag) to be applied
to the resulting image in case of success.
- -q, --quiet=false: Suppress verbose build output.
+ -q, --quiet=false: Suppress the verbose output generated by the containers.
--no-cache: Do not use the cache when building the image.
--rm: Remove intermediate containers after a successful build
@@ -194,7 +194,8 @@ The files at ``PATH`` or ``URL`` are called the "context" of the build. The
build process may refer to any of the files in the context, for example when
using an :ref:`ADD <dockerfile_add>` instruction. When a single ``Dockerfile``
is given as ``URL``, then no context is set. When a Git repository is set as
-``URL``, then the repository is used as the context
+``URL``, then the repository is used as the context. Git repositories are
+cloned with their submodules (`git clone --recursive`).
.. _cli_build_examples:
diff --git a/engine/engine_test.go b/engine/engine_test.go
index 065a19f492..da59610727 100644
--- a/engine/engine_test.go
+++ b/engine/engine_test.go
@@ -4,6 +4,7 @@ import (
"io/ioutil"
"os"
"path"
+ "path/filepath"
"testing"
)
@@ -64,6 +65,18 @@ func TestEngineRoot(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
+ // We expect Root to resolve to an absolute path.
+ // FIXME: this should not be necessary.
+ // Until the above FIXME is implemented, let's check for the
+ // current behavior.
+ tmp, err = filepath.EvalSymlinks(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err = filepath.Abs(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
dir := path.Join(tmp, "dir")
eng, err := New(dir)
if err != nil {
diff --git a/hack/infrastructure/docker-ci.rst b/hack/infrastructure/docker-ci.rst
deleted file mode 100644
index 0be530d302..0000000000
--- a/hack/infrastructure/docker-ci.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-docker-ci
-=========
-
-docker-ci is our buildbot continuous integration server,
-building and testing docker, hosted on EC2 and reachable at
-http://docker-ci.dotcloud.com
-
-
-Deployment
-==========
-
-# Load AWS credentials
-export AWS_ACCESS_KEY_ID=''
-export AWS_SECRET_ACCESS_KEY=''
-export AWS_KEYPAIR_NAME=''
-export AWS_SSH_PRIVKEY=''
-
-# Load buildbot credentials and config
-export BUILDBOT_PWD=''
-export IRC_PWD=''
-export IRC_CHANNEL='docker-dev'
-export SMTP_USER=''
-export SMTP_PWD=''
-export EMAIL_RCP=''
-
-# Load registry test credentials
-export REGISTRY_USER=''
-export REGISTRY_PWD=''
-
-cd docker/testing
-vagrant up --provider=aws
-
-
-github pull request
-===================
-
-The entire docker pull request test workflow is event driven by github. Its
-usage is fully automatic and the results are logged in docker-ci.dotcloud.com
-
-Each time there is a pull request on docker's github project, github connects
-to docker-ci using github's rest API documented in http://developer.github.com/v3/repos/hooks
-The issued command to program github's notification PR event was:
-curl -u GITHUB_USER:GITHUB_PASSWORD -d '{"name":"web","active":true,"events":["pull_request"],"config":{"url":"http://docker-ci.dotcloud.com:8011/change_hook/github?project=docker"}}' https://api.github.com/repos/dotcloud/docker/hooks
-
-buildbot (0.8.7p1) was patched using ./testing/buildbot/github.py, so it
-can understand the PR data github sends to it. Originally PR #1603 (ee64e099e0)
-implemented this capability. Also we added a new scheduler to exclusively filter
-PRs. and the 'pullrequest' builder to rebase the PR on top of master and test it.
-
-
-nighthly release
-================
-
-The nightly release process is done by buildbot, running a DinD container that downloads
-the docker repository and builds the release container. The resulting docker
-binary is then tested, and if everything is fine, the release is done.
diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile
index d894330ffa..fd795f4d45 100644
--- a/hack/infrastructure/docker-ci/Dockerfile
+++ b/hack/infrastructure/docker-ci/Dockerfile
@@ -1,47 +1,29 @@
-# VERSION: 0.25
-# DOCKER-VERSION 0.6.6
-# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
-# DESCRIPTION: Deploy docker-ci on Digital Ocean
-# COMMENTS:
-# CONFIG_JSON is an environment variable json string loaded as:
-#
-# export CONFIG_JSON='
-# { "DROPLET_NAME": "docker-ci",
-# "DO_CLIENT_ID": "Digital_Ocean_client_id",
-# "DO_API_KEY": "Digital_Ocean_api_key",
-# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
-# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
-# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
-# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
-# "BUILDBOT_PWD": "Buildbot_server_password",
-# "IRC_PWD": "Buildbot_IRC_password",
-# "SMTP_USER": "SMTP_server_user",
-# "SMTP_PWD": "SMTP_server_password",
-# "PKG_ACCESS_KEY": "Docker_release_S3_bucket_access_key",
-# "PKG_SECRET_KEY": "Docker_release_S3_bucket_secret_key",
-# "PKG_GPG_PASSPHRASE": "Docker_release_gpg_passphrase",
-# "INDEX_AUTH": "Index_encripted_user_password",
-# "REGISTRY_USER": "Registry_test_user",
-# "REGISTRY_PWD": "Registry_test_password",
-# "REGISTRY_BUCKET": "Registry_S3_bucket_name",
-# "REGISTRY_ACCESS_KEY": "Registry_S3_bucket_access_key",
-# "REGISTRY_SECRET_KEY": "Registry_S3_bucket_secret_key",
-# "IRC_CHANNEL": "Buildbot_IRC_channel",
-# "EMAIL_RCP": "Buildbot_mailing_receipient" }'
-#
-#
-# TO_BUILD: docker build -t docker-ci .
-# TO_DEPLOY: docker run -e CONFIG_JSON="${CONFIG_JSON}" docker-ci
+# DOCKER-VERSION: 0.7.6
+# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
+# DESCRIPTION: docker-ci continuous integration service
+# TO_BUILD: docker build -rm -t docker-ci/docker-ci .
+# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \
+# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci
from ubuntu:12.04
+maintainer Daniel Mizyrycki <daniel@dotcloud.com>
-run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
- > /etc/apt/sources.list
-run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
- python-pip ssh rsync less vim
-run pip install requests fabric
+ENV DEBIAN_FRONTEND noninteractive
+RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \
+ /etc/apt/sources.list; apt-get update
+RUN apt-get install -y --no-install-recommends python2.7 python-dev \
+ libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx
+RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py
+RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py
-# Add deployment code and set default container command
-add . /docker-ci
-cmd "/docker-ci/deployment.py"
+RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+RUN echo 'deb http://get.docker.io/ubuntu docker main' > \
+ /etc/apt/sources.list.d/docker.list; apt-get update
+RUN apt-get install -y lxc-docker-0.8.0
+RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto
+RUN ln -s /var/socket/docker.sock /run/docker.sock
+ADD . /docker-ci
+RUN /docker-ci/setup.sh
+
+ENTRYPOINT ["supervisord", "-n"]
diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst
index 33a14359bf..3e429ffdd5 100644
--- a/hack/infrastructure/docker-ci/README.rst
+++ b/hack/infrastructure/docker-ci/README.rst
@@ -1,26 +1,65 @@
-=======
-testing
-=======
+=========
+docker-ci
+=========
-This directory contains docker-ci testing related files.
+This directory contains docker-ci continuous integration system.
+As expected, it is a fully dockerized and deployed using
+docker-container-runner.
+docker-ci is based on Buildbot, a continuous integration system designed
+to automate the build/test cycle. By automatically rebuilding and testing
+the tree each time something has changed, build problems are pinpointed
+quickly, before other developers are inconvenienced by the failure.
+We are running buildbot at Rackspace to verify docker and docker-registry
+pass tests, and check for coverage code details.
+docker-ci instance is at https://docker-ci.docker.io/waterfall
-Buildbot
-========
+Inside docker-ci container we have the following directory structure:
-Buildbot is a continuous integration system designed to automate the
-build/test cycle. By automatically rebuilding and testing the tree each time
-something has changed, build problems are pinpointed quickly, before other
-developers are inconvenienced by the failure.
+/docker-ci source code of docker-ci
+/data/backup/docker-ci/ daily backup (replicated over S3)
+/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes
+/data/buildbot/{master,slave}/ main docker-ci buildbot config and database
+/var/socket/{docker.sock} host volume access to docker socket
-We are running buildbot in Amazon's EC2 to verify docker passes all
-tests when commits get pushed to the master branch and building
-nightly releases using Docker in Docker awesome implementation made
-by Jerome Petazzoni.
-https://github.com/jpetazzo/dind
+Production deployment
+=====================
-Docker's buildbot instance is at http://docker-ci.dotcloud.com/waterfall
+::
-For deployment instructions, please take a look at
-hack/infrastructure/docker-ci/Dockerfile
+ # Clone docker-ci repository
+ git clone https://github.com/dotcloud/docker
+ cd docker/hack/infrastructure/docker-ci
+
+ export DOCKER_PROD=[PRODUCTION_SERVER_IP]
+
+ # Create data host volume. (only once)
+ docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
+ mkdir -p /data/docker-ci/coverage/docker
+ docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
+ mkdir -p /data/docker-ci/coverage/docker-registry
+ docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
+ chown -R 1000.1000 /data/docker-ci
+
+ # dcr deployment. Define credentials and special environment dcr variables
+ # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml )
+ export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME]
+ export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD]
+ export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD]
+ export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS]
+ export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET]
+ export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE]
+ export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS]
+ export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET]
+ export SMTP_USER=[MAILGUN_SMTP_USERNAME]
+ export SMTP_PWD=[MAILGUN_SMTP_PASSWORD]
+ export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS]
+
+ # Build docker-ci and testbuilder docker images
+ docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci .
+ (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .)
+
+ # Run docker-ci container ( assuming no previous container running )
+ (cd dcr/prod; dcr docker-ci.yml start)
+ (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io)
diff --git a/hack/infrastructure/docker-ci/VERSION b/hack/infrastructure/docker-ci/VERSION
index 0bfccb0804..b49b25336d 100644
--- a/hack/infrastructure/docker-ci/VERSION
+++ b/hack/infrastructure/docker-ci/VERSION
@@ -1 +1 @@
-0.4.5
+0.5.6
diff --git a/hack/infrastructure/docker-ci/buildbot/README.rst b/hack/infrastructure/docker-ci/buildbot/README.rst
deleted file mode 100644
index 6cbcb8d93a..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-Buildbot configuration and setup files
diff --git a/hack/infrastructure/docker-ci/buildbot/buildbot.conf b/hack/infrastructure/docker-ci/buildbot/buildbot.conf
deleted file mode 100644
index e07b2e3c8c..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/buildbot.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-[program:buildmaster]
-command=twistd --nodaemon --no_save -y buildbot.tac
-directory=/data/buildbot/master
-chown= root:root
-redirect_stderr=true
-stdout_logfile=/var/log/supervisor/buildbot-master.log
-stderr_logfile=/var/log/supervisor/buildbot-master.log
-
-[program:buildworker]
-command=twistd --nodaemon --no_save -y buildbot.tac
-directory=/data/buildbot/slave
-chown= root:root
-redirect_stderr=true
-stdout_logfile=/var/log/supervisor/buildbot-slave.log
-stderr_logfile=/var/log/supervisor/buildbot-slave.log
-
-[group:buildbot]
-programs=buildmaster,buildworker
diff --git a/hack/infrastructure/docker-ci/buildbot/github.py b/hack/infrastructure/docker-ci/buildbot/github.py
index ff6b6c62dd..5316e13282 100644
--- a/hack/infrastructure/docker-ci/buildbot/github.py
+++ b/hack/infrastructure/docker-ci/buildbot/github.py
@@ -17,7 +17,7 @@
"""
github_buildbot.py is based on git_buildbot.py
-github_buildbot.py will determine the repository information from the JSON
+github_buildbot.py will determine the repository information from the JSON
HTTP POST it receives from github.com and build the appropriate repository.
If your github repository is private, you must add a ssh key to the github
repository for the user who initiated the build on the buildslave.
@@ -88,7 +88,8 @@ def getChanges(request, options = None):
payload = json.loads(request.args['payload'][0])
import urllib,datetime
fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19]
- open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2))
+ # Github event debug
+ # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2))
if 'pull_request' in payload:
user = payload['pull_request']['user']['login']
@@ -142,13 +143,13 @@ def process_change(payload, user, repo, repo_url, project):
'category' : 'github_pullrequest',
'who' : '{0} - PR#{1}'.format(user,payload['number']),
'files' : [],
- 'comments' : payload['pull_request']['title'],
+ 'comments' : payload['pull_request']['title'],
'revision' : newrev,
'when' : convertTime(payload['pull_request']['updated_at']),
'branch' : branch,
'revlink' : '{0}/commit/{1}'.format(repo_url,newrev),
'repository' : repo_url,
- 'project' : project }]
+ 'project' : project }]
return changes
for commit in payload['commits']:
files = []
diff --git a/hack/infrastructure/docker-ci/buildbot/master.cfg b/hack/infrastructure/docker-ci/buildbot/master.cfg
index 9ca5fc035a..75605da8ab 100644
--- a/hack/infrastructure/docker-ci/buildbot/master.cfg
+++ b/hack/infrastructure/docker-ci/buildbot/master.cfg
@@ -1,4 +1,4 @@
-import os
+import os, re
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.basic import SingleBranchScheduler
@@ -6,127 +6,156 @@ from buildbot.schedulers.timed import Nightly
from buildbot.changes import filter
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
-from buildbot.process.properties import Interpolate
+from buildbot.process.properties import Property
from buildbot.steps.shell import ShellCommand
from buildbot.status import html, words
from buildbot.status.web import authz, auth
from buildbot.status.mail import MailNotifier
-PORT_WEB = 80 # Buildbot webserver port
-PORT_GITHUB = 8011 # Buildbot github hook port
-PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
-TEST_USER = 'buildbot' # Credential to authenticate build triggers
-TEST_PWD = 'docker' # Credential to authenticate build triggers
-GITHUB_DOCKER = 'github.com/dotcloud/docker'
-BUILDBOT_PATH = '/data/buildbot'
-DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
-DOCKER_CI_PATH = '/docker-ci'
+
+def ENV(x):
+ '''Promote an environment variable for global use returning its value'''
+ retval = os.environ.get(x, '')
+ globals()[x] = retval
+ return retval
+
+
+class TestCommand(ShellCommand):
+ '''Extend ShellCommand with optional summary logs'''
+ def __init__(self, *args, **kwargs):
+ super(TestCommand, self).__init__(*args, **kwargs)
+
+ def createSummary(self, log):
+ exit_status = re.sub(r'.+\n\+ exit (\d+).+',
+ r'\1', log.getText()[-100:], flags=re.DOTALL)
+ if exit_status != '0':
+ return
+ # Infer coverage path from log
+ if '+ COVERAGE_PATH' in log.getText():
+ path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+',
+ r'\2/\1', log.getText(), flags=re.DOTALL)
+ url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
+ self.addURL('coverage', url)
+ elif 'COVERAGE_FILE' in log.getText():
+ path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+',
+ r'\2/\1', log.getText(), flags=re.DOTALL)
+ url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
+ self.addURL('coverage', url)
+
+
+PORT_WEB = 8000 # Buildbot webserver port
+PORT_GITHUB = 8011 # Buildbot github hook port
+PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
+
+BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB)
+DOCKER_REPO = 'https://github.com/docker-test/docker'
+DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO)
+REGISTRY_REPO = 'https://github.com/docker-test/docker-registry'
+REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO)
+if ENV('DEPLOYMENT') == 'staging':
+ BUILDBOT_URL = "//docker-ci-stage.docker.io/"
+if ENV('DEPLOYMENT') == 'production':
+ BUILDBOT_URL = '//docker-ci.docker.io/'
+ DOCKER_REPO = 'https://github.com/dotcloud/docker'
+ DOCKER_TEST_ARGV = ''
+ REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry'
+ REGISTRY_TEST_ARGV = ''
# Credentials set by setup.sh from deployment.py
-BUILDBOT_PWD = ''
-IRC_PWD = ''
-IRC_CHANNEL = ''
-SMTP_USER = ''
-SMTP_PWD = ''
-EMAIL_RCP = ''
+ENV('WEB_USER')
+ENV('WEB_IRC_PWD')
+ENV('BUILDBOT_PWD')
+ENV('SMTP_USER')
+ENV('SMTP_PWD')
+ENV('EMAIL_RCP')
+ENV('IRC_CHANNEL')
c = BuildmasterConfig = {}
-c['title'] = "Docker"
+c['title'] = "docker-ci"
c['titleURL'] = "waterfall"
-c['buildbotURL'] = "http://docker-ci.dotcloud.com/"
+c['buildbotURL'] = BUILDBOT_URL
c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
c['slavePortnum'] = PORT_MASTER
# Schedulers
-c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
- 'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
-c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
+c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[
+ 'docker', 'docker-registry', 'nightlyrelease', 'backup'])]
+c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None,
change_filter=filter.ChangeFilter(branch='master',
- repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
-c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
- change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
- builderNames=['pullrequest'])]
-c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
- 'docker-coverage','registry-coverage'], hour=7, minute=00)]
-c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
- hour=range(0,24,4), minute=15)]
+ repository=DOCKER_REPO), builderNames=['docker'])]
+c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None,
+ change_filter=filter.ChangeFilter(branch='master',
+ repository=REGISTRY_REPO), builderNames=['docker-registry'])]
+c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None,
+ change_filter=filter.ChangeFilter(category='github_pullrequest',
+ project='docker'), builderNames=['docker-pr'])]
+c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None,
+ change_filter=filter.ChangeFilter(category='github_pullrequest',
+ project='docker-registry'), builderNames=['docker-registry-pr'])]
+c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[
+ 'nightlyrelease', 'backup'], hour=7, minute=00)]
+
# Builders
-# Docker commit test
-test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind'
- ' test_docker.sh %(src::revision)s')
-factory = BuildFactory()
-factory.addStep(ShellCommand(description='Docker', logEnviron=False,
- usePTY=True, command=["sh", "-c", Interpolate(test_cmd)]))
-c['builders'] = [BuilderConfig(name='docker',slavenames=['buildworker'],
- factory=factory)]
-# Docker pull request test
-test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind'
- ' test_docker.sh %(src::revision)s %(src::repository)s %(src::branch)s')
+# Backup
factory = BuildFactory()
-factory.addStep(ShellCommand(description='pull_request', logEnviron=False,
- usePTY=True, command=["sh", "-c", Interpolate(test_cmd)]))
-c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
+factory.addStep(TestCommand(description='backup', logEnviron=False,
+ usePTY=True, command='/docker-ci/tool/backup.py'))
+c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'],
factory=factory)]
-# Docker coverage test
+# Docker test
factory = BuildFactory()
-factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
- usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
- DOCKER_CI_PATH)))
-c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
+factory.addStep(TestCommand(description='docker', logEnviron=False,
+ usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV)))
+c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'],
factory=factory)]
-# Docker registry coverage test
+# Docker pull request test
factory = BuildFactory()
-factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
- usePTY=True, command='docker run registry_coverage'.format(
- DOCKER_CI_PATH)))
-c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
+factory.addStep(TestCommand(description='docker-pr', logEnviron=False,
+ usePTY=True, command=['/docker-ci/dockertest/docker',
+ Property('revision'), Property('repository'), Property('branch')]))
+c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'],
factory=factory)]
-# Registry functional test
+# docker-registry test
factory = BuildFactory()
-factory.addStep(ShellCommand(description='registry', logEnviron=False,
- command='. {0}/master/credentials.cfg; '
- '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
- usePTY=True))
-c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
+factory.addStep(TestCommand(description='docker-registry', logEnviron=False,
+ usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV)))
+c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'],
factory=factory)]
-# Index functional test
+# Docker registry pull request test
factory = BuildFactory()
-factory.addStep(ShellCommand(description='index', logEnviron=False,
- command='. {0}/master/credentials.cfg; '
- '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
- usePTY=True))
-c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
+factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False,
+ usePTY=True, command=['/docker-ci/dockertest/docker-registry',
+ Property('revision'), Property('repository'), Property('branch')]))
+c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'],
factory=factory)]
# Docker nightly release
-nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
- 'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
factory = BuildFactory()
factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
- usePTY=True, command=nightlyrelease_cmd))
+ usePTY=True, command=['/docker-ci/dockertest/nightlyrelease']))
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
factory=factory)]
# Status
-authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),
+authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]),
forceBuild='auth')
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True,
change_hook_dialects={ 'github': True }))
-c['status'].append(MailNotifier(fromaddr='buildbot@docker.io',
+c['status'].append(MailNotifier(fromaddr='docker-test@docker.io',
sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP],
mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True,
smtpUser=SMTP_USER, smtpPassword=SMTP_PWD))
c['status'].append(words.IRC("irc.freenode.net", "dockerqabot",
- channels=[IRC_CHANNEL], password=IRC_PWD, allowForce=True,
+ channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True,
notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1}))
diff --git a/hack/infrastructure/docker-ci/buildbot/requirements.txt b/hack/infrastructure/docker-ci/buildbot/requirements.txt
deleted file mode 100644
index d2dcf1d125..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-sqlalchemy<=0.7.9
-sqlalchemy-migrate>=0.7.2
-buildbot==0.8.7p1
-buildbot_slave==0.8.7p1
-nose==1.2.1
-requests==1.1.0
-flask==0.10.1
-simplejson==2.3.2
-selenium==2.35.0
diff --git a/hack/infrastructure/docker-ci/buildbot/setup.sh b/hack/infrastructure/docker-ci/buildbot/setup.sh
deleted file mode 100755
index c5d9cb988e..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/setup.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env bash
-
-# Setup of buildbot configuration. Package installation is being done by
-# Vagrantfile
-# Dependencies: buildbot, buildbot-slave, supervisor
-
-USER=$1
-CFG_PATH=$2
-DOCKER_PATH=$3
-BUILDBOT_PWD=$4
-IRC_PWD=$5
-IRC_CHANNEL=$6
-SMTP_USER=$7
-SMTP_PWD=$8
-EMAIL_RCP=$9
-REGISTRY_USER=${10}
-REGISTRY_PWD=${11}
-REGISTRY_BUCKET=${12}
-REGISTRY_ACCESS_KEY=${13}
-REGISTRY_SECRET_KEY=${14}
-BUILDBOT_PATH="/data/buildbot"
-SLAVE_NAME="buildworker"
-SLAVE_SOCKET="localhost:9989"
-
-export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin"
-
-function run { su $USER -c "$1"; }
-
-# Exit if buildbot has already been installed
-[ -d "$BUILDBOT_PATH" ] && exit 0
-
-# Setup buildbot
-run "mkdir -p $BUILDBOT_PATH"
-cd $BUILDBOT_PATH
-run "buildbot create-master master"
-run "cp $CFG_PATH/master.cfg master"
-run "sed -i -E 's#(BUILDBOT_PWD = ).+#\1\"$BUILDBOT_PWD\"#' master/master.cfg"
-run "sed -i -E 's#(IRC_PWD = ).+#\1\"$IRC_PWD\"#' master/master.cfg"
-run "sed -i -E 's#(IRC_CHANNEL = ).+#\1\"$IRC_CHANNEL\"#' master/master.cfg"
-run "sed -i -E 's#(SMTP_USER = ).+#\1\"$SMTP_USER\"#' master/master.cfg"
-run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg"
-run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg"
-run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
-run "echo 'export DOCKER_CREDS=\"$REGISTRY_USER:$REGISTRY_PWD\"' > $BUILDBOT_PATH/master/credentials.cfg"
-run "echo 'export S3_BUCKET=\"$REGISTRY_BUCKET\"' >> $BUILDBOT_PATH/master/credentials.cfg"
-run "echo 'export S3_ACCESS_KEY=\"$REGISTRY_ACCESS_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg"
-run "echo 'export S3_SECRET_KEY=\"$REGISTRY_SECRET_KEY\"' >> $BUILDBOT_PATH/master/credentials.cfg"
-
-# Patch github webstatus to capture pull requests
-cp $CFG_PATH/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks
-
-# Allow buildbot subprocesses (docker tests) to properly run in containers,
-# in particular with docker -u
-run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac"
-
-# Setup supervisor
-cp $CFG_PATH/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
-sed -i -E "s/^chmod=0700.+/chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
-kill -HUP $(pgrep -f "/usr/bin/python /usr/bin/supervisord")
diff --git a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml
new file mode 100644
index 0000000000..523535446a
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml
@@ -0,0 +1,22 @@
+docker-ci:
+ image: "docker-ci/docker-ci"
+ release_name: "docker-ci-0.5.6"
+ ports: ["80","2222:22","8011:8011"]
+ register: "80"
+ volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
+ command: []
+ env:
+ - "DEPLOYMENT=production"
+ - "IRC_CHANNEL=docker-testing"
+ - "BACKUP_BUCKET=backup-ci"
+ - "$WEB_USER"
+ - "$WEB_IRC_PWD"
+ - "$BUILDBOT_PWD"
+ - "$AWS_ACCESS_KEY"
+ - "$AWS_SECRET_KEY"
+ - "$GPG_PASSPHRASE"
+ - "$BACKUP_AWS_ID"
+ - "$BACKUP_AWS_SECRET"
+ - "$SMTP_USER"
+ - "$SMTP_PWD"
+ - "$EMAIL_RCP"
diff --git a/hack/infrastructure/docker-ci/dcr/prod/settings.yml b/hack/infrastructure/docker-ci/dcr/prod/settings.yml
new file mode 100644
index 0000000000..9831afa6dd
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dcr/prod/settings.yml
@@ -0,0 +1,5 @@
+default:
+ hipaches: ['192.168.100.67:6379']
+ daemons: ['192.168.100.67:4243']
+ use_ssh: False
+
diff --git a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml
new file mode 100644
index 0000000000..8eba84825c
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml
@@ -0,0 +1,22 @@
+docker-ci:
+ image: "docker-ci/docker-ci"
+ release_name: "docker-ci-stage"
+ ports: ["80","2222:22","8011:8011"]
+ register: "80"
+ volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
+ command: []
+ env:
+ - "DEPLOYMENT=staging"
+ - "IRC_CHANNEL=docker-testing-staging"
+ - "BACKUP_BUCKET=ci-backup-stage"
+ - "$BACKUP_AWS_ID"
+ - "$BACKUP_AWS_SECRET"
+ - "$WEB_USER"
+ - "$WEB_IRC_PWD"
+ - "$BUILDBOT_PWD"
+ - "$AWS_ACCESS_KEY"
+ - "$AWS_SECRET_KEY"
+ - "$GPG_PASSPHRASE"
+ - "$SMTP_USER"
+ - "$SMTP_PWD"
+ - "$EMAIL_RCP"
diff --git a/hack/infrastructure/docker-ci/dcr/stage/settings.yml b/hack/infrastructure/docker-ci/dcr/stage/settings.yml
new file mode 100644
index 0000000000..a7d37acff3
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dcr/stage/settings.yml
@@ -0,0 +1,5 @@
+default:
+ hipaches: ['192.168.100.65:6379']
+ daemons: ['192.168.100.65:4243']
+ use_ssh: False
+
diff --git a/hack/infrastructure/docker-ci/deployment.py b/hack/infrastructure/docker-ci/deployment.py
deleted file mode 100755
index fd0fdb0fe8..0000000000
--- a/hack/infrastructure/docker-ci/deployment.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env python
-
-import os, sys, re, json, requests, base64
-from subprocess import call
-from fabric import api
-from fabric.api import cd, run, put, sudo
-from os import environ as env
-from datetime import datetime
-from time import sleep
-
-# Remove SSH private key as it needs more processing
-CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','',
- env['CONFIG_JSON'], flags=re.DOTALL))
-
-# Populate environment variables
-for key in CONFIG:
- env[key] = CONFIG[key]
-
-# Load SSH private key
-env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
- env['CONFIG_JSON'],flags=re.DOTALL)
-
-DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
-TIMEOUT = 120 # Seconds before timeout droplet creation
-IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
-REGION_ID = 4 # New York 2
-SIZE_ID = 62 # memory 2GB
-DO_IMAGE_USER = 'root' # Image user on Digital Ocean
-API_URL = 'https://api.digitalocean.com/'
-DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
-DOCKER_CI_PATH = '/docker-ci'
-CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
-
-
-class DigitalOcean():
-
- def __init__(self, key, client):
- '''Set default API parameters'''
- self.key = key
- self.client = client
- self.api_url = API_URL
-
- def api(self, cmd_path, api_arg={}):
- '''Make api call'''
- api_arg.update({'api_key':self.key, 'client_id':self.client})
- resp = requests.get(self.api_url + cmd_path, params=api_arg).text
- resp = json.loads(resp)
- if resp['status'] != 'OK':
- raise Exception(resp['error_message'])
- return resp
-
- def droplet_data(self, name):
- '''Get droplet data'''
- data = self.api('droplets')
- data = [droplet for droplet in data['droplets']
- if droplet['name'] == name]
- return data[0] if data else {}
-
-
-def json_fmt(data):
- '''Format json output'''
- return json.dumps(data, sort_keys = True, indent = 2)
-
-
-do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
-
-# Get DROPLET_NAME data
-data = do.droplet_data(DROPLET_NAME)
-
-# Stop processing if DROPLET_NAME exists on Digital Ocean
-if data:
- print ('Droplet: {} already deployed. Not further processing.'
- .format(DROPLET_NAME))
- exit(1)
-
-# Create droplet
-do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
- 'image_id':IMAGE_ID, 'size_id':SIZE_ID,
- 'ssh_key_ids':[env['DOCKER_KEY_ID']]})
-
-# Wait for droplet to be created.
-start_time = datetime.now()
-while (data.get('status','') != 'active' and (
- datetime.now()-start_time).seconds < TIMEOUT):
- data = do.droplet_data(DROPLET_NAME)
- print data['status']
- sleep(3)
-
-# Wait for the machine to boot
-sleep(15)
-
-# Get droplet IP
-ip = str(data['ip_address'])
-print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
-
-# Create docker-ci ssh private key so docker-ci docker container can communicate
-# with its EC2 instance
-os.makedirs('/root/.ssh')
-open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY'])
-os.chmod('/root/.ssh/id_rsa',0600)
-open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
-
-api.env.host_string = ip
-api.env.user = DO_IMAGE_USER
-api.env.key_filename = '/root/.ssh/id_rsa'
-
-# Correct timezone
-sudo('echo "America/Los_Angeles" >/etc/timezone')
-sudo('dpkg-reconfigure --frontend noninteractive tzdata')
-
-# Load public docker-ci key
-sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
-
-# Create docker nightly release credentials file
-credentials = {
- 'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
- 'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
- 'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
-open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
- base64.b64encode(json.dumps(credentials)))
-
-# Transfer docker
-sudo('mkdir -p ' + DOCKER_CI_PATH)
-sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
-call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
- os.path.dirname(DOCKER_CI_PATH)), shell=True)
-
-# Install Docker and Buildbot dependencies
-sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
-sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9')
-sudo('echo deb https://get.docker.io/ubuntu docker main >'
- ' /etc/apt/sources.list.d/docker.list')
-sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
- 'deb http://us.archive.ubuntu.com/ubuntu/ raring-security main universe\n"'
- ' > /etc/apt/sources.list; apt-get update')
-sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
- ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
- ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
-sudo('wget -O - https://go.googlecode.com/files/go1.2.linux-amd64.tar.gz | '
- 'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
-sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
-sudo('pip install -r {}/requirements.txt'.format(CFG_PATH))
-
-# Install docker and testing dependencies
-sudo('apt-get install -y -q lxc-docker')
-sudo('curl -s https://phantomjs.googlecode.com/files/'
- 'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
- ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
-
-# Build docker-ci containers
-sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
-sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
-sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
- DOCKER_CI_PATH))
-sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
- DOCKER_CI_PATH))
-
-# Download docker-ci testing container
-sudo('docker pull mzdaniel/test_docker')
-
-# Setup buildbot
-sudo('mkdir /data')
-sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
- ' {11} {12}'.format(CFG_PATH, DOCKER_PATH, env['BUILDBOT_PWD'],
- env['IRC_PWD'], env['IRC_CHANNEL'], env['SMTP_USER'],
- env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
- env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
- env['REGISTRY_SECRET_KEY']))
-
-# Preventively reboot docker-ci daily
-sudo('ln -s /sbin/reboot /etc/cron.daily')
diff --git a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh b/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh
deleted file mode 100755
index c29ede5b81..0000000000
--- a/hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-
-set -x
-# Generate a random string of $1 characters
-function random {
- cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1
-}
-
-# Compute test paths
-BASE_PATH=`pwd`/test_docker_$(random 12)
-DOCKER_PATH=$BASE_PATH/go/src/github.com/dotcloud/docker
-export GOPATH=$BASE_PATH/go:$DOCKER_PATH/vendor
-
-# Fetch latest master
-mkdir -p $DOCKER_PATH
-cd $DOCKER_PATH
-git init .
-git fetch -q http://github.com/dotcloud/docker master
-git reset --hard FETCH_HEAD
-
-# Fetch go coverage
-cd $BASE_PATH/go
-GOPATH=$BASE_PATH/go go get github.com/axw/gocov/gocov
-sudo -E GOPATH=$GOPATH ./bin/gocov test -deps -exclude-goroot -v\
- -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,\
-code.google.com/p/go.net/websocket\
- github.com/dotcloud/docker | ./bin/gocov report; exit_status=$?
-
-# Cleanup testing directory
-rm -rf $BASE_PATH
-
-exit $exit_status
diff --git a/hack/infrastructure/docker-ci/docker-test/Dockerfile b/hack/infrastructure/docker-ci/docker-test/Dockerfile
deleted file mode 100644
index 0f3a63f5f1..0000000000
--- a/hack/infrastructure/docker-ci/docker-test/Dockerfile
+++ /dev/null
@@ -1,25 +0,0 @@
-# VERSION: 0.4
-# DOCKER-VERSION 0.6.6
-# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
-# DESCRIPTION: Testing docker PRs and commits on top of master using
-# REFERENCES: This code reuses the excellent implementation of
-# Docker in Docker made by Jerome Petazzoni.
-# https://github.com/jpetazzo/dind
-# COMMENTS:
-# This Dockerfile adapts /Dockerfile to enable docker PRs and commits testing
-# Optional arguments:
-# [commit] (default: 'HEAD')
-# [repo] (default: 'http://github.com/dotcloud/docker')
-# [branch] (default: 'master')
-# TO_BUILD: docker build -t test_docker .
-# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
-
-from docker
-maintainer Daniel Mizyrycki <daniel@docker.com>
-
-# Setup go in PATH. Extracted from /Dockerfile
-env PATH /usr/local/go/bin:$PATH
-
-# Add test_docker.sh
-add test_docker.sh /usr/bin/test_docker.sh
-run chmod +x /usr/bin/test_docker.sh
diff --git a/hack/infrastructure/docker-ci/docker-test/test_docker.sh b/hack/infrastructure/docker-ci/docker-test/test_docker.sh
deleted file mode 100755
index 14816706ed..0000000000
--- a/hack/infrastructure/docker-ci/docker-test/test_docker.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-
-set -x
-COMMIT=${1-HEAD}
-REPO=${2-http://github.com/dotcloud/docker}
-BRANCH=${3-master}
-
-# Compute test paths
-DOCKER_PATH=/go/src/github.com/dotcloud/docker
-
-# Timestamp
-echo
-date; echo
-
-# Fetch latest master
-cd /
-rm -rf /go
-git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
-cd $DOCKER_PATH
-
-# Merge commit
-git fetch -q "$REPO" "$BRANCH"
-git merge --no-edit $COMMIT || exit 255
-
-# Test commit
-./hack/make.sh test; exit_status=$?
-
-# Display load if test fails
-if [ $exit_status -ne 0 ] ; then
- uptime; echo; free
-fi
-
-exit $exit_status
diff --git a/hack/infrastructure/docker-ci/dockertest/docker b/hack/infrastructure/docker-ci/dockertest/docker
new file mode 120000
index 0000000000..e3f094ee63
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dockertest/docker
@@ -0,0 +1 @@
+project \ No newline at end of file
diff --git a/hack/infrastructure/docker-ci/dockertest/docker-registry b/hack/infrastructure/docker-ci/dockertest/docker-registry
new file mode 120000
index 0000000000..e3f094ee63
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dockertest/docker-registry
@@ -0,0 +1 @@
+project \ No newline at end of file
diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease
new file mode 100755
index 0000000000..475b088065
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dockertest/nightlyrelease
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+if [ "$DEPLOYMENT" == "production" ]; then
+ AWS_S3_BUCKET='test.docker.io'
+else
+ AWS_S3_BUCKET='get-staging.docker.io'
+fi
+
+docker run -rm -privileged -v /run:/var/socket \
+ -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \
+ -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \
+ -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker
+
diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project
new file mode 100755
index 0000000000..160f2d5d59
--- /dev/null
+++ b/hack/infrastructure/docker-ci/dockertest/project
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+set -x
+
+PROJECT_NAME=$(basename $0)
+
+docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \
+ -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3
+
diff --git a/hack/infrastructure/docker-ci/nginx/nginx.conf b/hack/infrastructure/docker-ci/nginx/nginx.conf
new file mode 100644
index 0000000000..6649741134
--- /dev/null
+++ b/hack/infrastructure/docker-ci/nginx/nginx.conf
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ root /data/docker-ci;
+
+ location / {
+ proxy_pass http://localhost:8000/;
+ }
+
+ location /coverage {
+ root /data/docker-ci;
+ }
+}
diff --git a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile b/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile
deleted file mode 100644
index 2100a9e8e9..0000000000
--- a/hack/infrastructure/docker-ci/nightlyrelease/Dockerfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# VERSION: 1.6
-# DOCKER-VERSION 0.6.6
-# AUTHOR: Daniel Mizyrycki <daniel@docker.com>
-# DESCRIPTION: Build docker nightly release using Docker in Docker.
-# REFERENCES: This code reuses the excellent implementation of docker in docker
-# made by Jerome Petazzoni. https://github.com/jpetazzo/dind
-# COMMENTS:
-# release_credentials.json is a base64 json encoded file containing:
-# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
-# "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
-# "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
-# TO_BUILD: docker build -t dockerbuilder .
-# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
-
-from docker
-maintainer Daniel Mizyrycki <daniel@docker.com>
-
-# Add docker dependencies and downloading packages
-run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-run apt-get update; apt-get install -y -q wget python2.7
-
-# Add production docker binary
-run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
-
-# Add proto docker builder
-add ./dockerbuild.sh /usr/bin/dockerbuild.sh
-run chmod +x /usr/bin/dockerbuild.sh
-
-# Add release credentials
-add ./release_credentials.json /root/release_credentials.json
diff --git a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh b/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh
deleted file mode 100644
index d5e58da7e1..0000000000
--- a/hack/infrastructure/docker-ci/nightlyrelease/dockerbuild.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-
-# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
-# from /root/release_credentials.json
-# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
-
-# Turn debug off to load credentials from the environment
-set +x
-eval $(cat /root/release_credentials.json | python -c '
-import sys,json,base64;
-d=json.loads(base64.b64decode(sys.stdin.read()));
-exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
-
-# Fetch docker master branch
-set -x
-cd /
-rm -rf /go
-git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
-cd /go/src/github.com/dotcloud/docker
-
-# Launch docker daemon using dind inside the container
-/usr/bin/docker version
-/usr/bin/docker -d &
-sleep 5
-
-# Build Docker release container
-docker build -t docker .
-
-# Test docker and if everything works well, release
-echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
-set +x
-docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
-exit_status=$?
-
-# Display load if test fails
-set -x
-if [ $exit_status -ne 0 ] ; then
- uptime; echo; free
- exit 1
-fi
diff --git a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile b/hack/infrastructure/docker-ci/registry-coverage/Dockerfile
deleted file mode 100644
index e544645b67..0000000000
--- a/hack/infrastructure/docker-ci/registry-coverage/Dockerfile
+++ /dev/null
@@ -1,18 +0,0 @@
-# VERSION: 0.1
-# DOCKER-VERSION 0.6.4
-# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
-# DESCRIPTION: Docker registry coverage
-# COMMENTS: Add registry coverage into the docker-ci image
-# TO_BUILD: docker build -t registry_coverage .
-# TO_RUN: docker run registry_coverage
-
-from docker-ci
-maintainer Daniel Mizyrycki <daniel@dotcloud.com>
-
-# Add registry_coverager.sh and dependencies
-run pip install coverage flask pyyaml requests simplejson python-glanceclient \
- blinker redis boto gevent rsa mock
-add registry_coverage.sh /usr/bin/registry_coverage.sh
-run chmod +x /usr/bin/registry_coverage.sh
-
-cmd "/usr/bin/registry_coverage.sh"
diff --git a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh b/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh
deleted file mode 100755
index c67b17eba0..0000000000
--- a/hack/infrastructure/docker-ci/registry-coverage/registry_coverage.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-
-set -x
-
-# Setup the environment
-REGISTRY_PATH=/data/docker-registry
-export SETTINGS_FLAVOR=test
-export DOCKER_REGISTRY_CONFIG=config_test.yml
-export PYTHONPATH=$REGISTRY_PATH/test
-
-# Fetch latest docker-registry master
-rm -rf $REGISTRY_PATH
-git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
-cd $REGISTRY_PATH
-
-# Generate coverage
-coverage run -m unittest discover test || exit 1
-coverage report --include='./*' --omit='./test/*'
diff --git a/hack/infrastructure/docker-ci/setup.sh b/hack/infrastructure/docker-ci/setup.sh
new file mode 100755
index 0000000000..65a00f6dd0
--- /dev/null
+++ b/hack/infrastructure/docker-ci/setup.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+# Set timezone
+echo "GMT" >/etc/timezone
+dpkg-reconfigure --frontend noninteractive tzdata
+
+# Set ssh superuser
+mkdir -p /data/buildbot /var/run/sshd /run
+useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin
+sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers
+cd /home/sysadmin
+mkdir .ssh
+chmod 700 .ssh
+cat > .ssh/authorized_keys << 'EOF'
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io
+EOF
+chmod 600 .ssh/authorized_keys
+chown -R sysadmin .ssh
+
+# Fix docker group id for use of host dockerd by sysadmin
+sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group
+
+# Create buildbot configuration
+cd /data/buildbot; buildbot create-master master
+cp -a /data/buildbot/master/master.cfg.sample \
+ /data/buildbot/master/master.cfg
+cd /data/buildbot; \
+ buildslave create-slave slave localhost:9989 buildworker pass
+cp /docker-ci/buildbot/master.cfg /data/buildbot/master
+
+# Patch github webstatus to capture pull requests
+cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks
+chown -R sysadmin.sysadmin /data
+
+# Create nginx configuration
+rm /etc/nginx/sites-enabled/default
+cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf
+/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf
+
+# Set supervisord buildbot, nginx and sshd processes
+/bin/echo -e "\
+[program:buildmaster]\n\
+command=twistd --nodaemon --no_save -y buildbot.tac\n\
+directory=/data/buildbot/master\n\
+user=sysadmin\n\n\
+[program:buildworker]\n\
+command=twistd --nodaemon --no_save -y buildbot.tac\n\
+directory=/data/buildbot/slave\n\
+user=sysadmin\n" > \
+ /etc/supervisor/conf.d/buildbot.conf
+/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \
+ /etc/supervisor/conf.d/nginx.conf
+/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \
+ /etc/supervisor/conf.d/sshd.conf
diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile
new file mode 100644
index 0000000000..a008da6843
--- /dev/null
+++ b/hack/infrastructure/docker-ci/testbuilder/Dockerfile
@@ -0,0 +1,12 @@
+# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder .
+# TO_RUN: docker run -rm -u sysadmin \
+# -v /run:/var/socket docker-ci/testbuilder docker-registry
+#
+
+FROM docker-ci/docker-ci
+ENV HOME /home/sysadmin
+
+RUN mkdir /testbuilder
+ADD . /testbuilder
+
+ENTRYPOINT ["/testbuilder/testbuilder.sh"]
diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh
new file mode 100755
index 0000000000..72087462ad
--- /dev/null
+++ b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+set -x
+set -e
+PROJECT_PATH=$1
+
+# Build the docker project
+cd /data/$PROJECT_PATH
+sg docker -c "docker build -q -rm -t registry ."
+cd test; sg docker -c "docker build -q -rm -t docker-registry-test ."
+
+# Run the tests
+sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test"
diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh
new file mode 100755
index 0000000000..b365dd7eaf
--- /dev/null
+++ b/hack/infrastructure/docker-ci/testbuilder/docker.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+set -x
+set -e
+PROJECT_PATH=$1
+
+# Build the docker project
+cd /data/$PROJECT_PATH
+sg docker -c "docker build -q -rm -t docker ."
+
+if [ "$DOCKER_RELEASE" == "1" ]; then
+ # Do nightly release
+ echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh"
+ set +x
+ sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh"
+else
+ # Run the tests
+ sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh"
+fi
diff --git a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh
new file mode 100755
index 0000000000..70701343c2
--- /dev/null
+++ b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+# Download, build and run a docker project tests
+# Environment variables: DEPLOYMENT
+
+cat $0
+set -e
+set -x
+
+PROJECT=$1
+COMMIT=${2-HEAD}
+REPO=${3-https://github.com/dotcloud/$PROJECT}
+BRANCH=${4-master}
+REPO_PROJ="https://github.com/docker-test/$PROJECT"
+if [ "$DEPLOYMENT" == "production" ]; then
+ REPO_PROJ="https://github.com/dotcloud/$PROJECT"
+fi
+set +x
+
+# Generate a random string of $1 characters
+function random {
+ cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1
+}
+
+PROJECT_PATH="$PROJECT-tmp-$(random 12)"
+
+# Set docker-test git user
+set -x
+git config --global user.email "docker-test@docker.io"
+git config --global user.name "docker-test"
+
+# Fetch project
+git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH
+cd /data/$PROJECT_PATH
+echo "Git commit: $(git rev-parse HEAD)"
+git fetch -q $REPO $BRANCH
+git merge --no-edit $COMMIT
+
+# Build the project dockertest
+/testbuilder/$PROJECT.sh $PROJECT_PATH
+rm -rf /data/$PROJECT_PATH
diff --git a/hack/infrastructure/docker-ci/tool/backup.py b/hack/infrastructure/docker-ci/tool/backup.py
new file mode 100755
index 0000000000..2db633e526
--- /dev/null
+++ b/hack/infrastructure/docker-ci/tool/backup.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+import os,sys,json
+from datetime import datetime
+from filecmp import cmp
+from subprocess import check_call
+from boto.s3.key import Key
+from boto.s3.connection import S3Connection
+
+def ENV(x):
+ '''Promote an environment variable for global use returning its value'''
+ retval = os.environ.get(x, '')
+ globals()[x] = retval
+ return retval
+
+ROOT_PATH = '/data/backup/docker-ci'
+TODAY = str(datetime.today())[:10]
+BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY)
+BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH)
+ENV('BACKUP_BUCKET')
+ENV('BACKUP_AWS_ID')
+ENV('BACKUP_AWS_SECRET')
+
+'''Create full master buildbot backup, avoiding duplicates'''
+# Ensure backup path exist
+if not os.path.exists(ROOT_PATH):
+ os.makedirs(ROOT_PATH)
+# Make actual backups
+check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave'
+ ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True)
+# remove previous dump if it is the same as the latest
+if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and
+ os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE):
+ os.unlink(os.path._resolve_link(BACKUP_LINK))
+# Recreate backup link pointing to latest backup
+try:
+ os.unlink(BACKUP_LINK)
+except:
+ pass
+os.symlink(BACKUP_FILE, BACKUP_LINK)
+
+# Make backup on S3
+bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET)
+k = Key(bucket)
+k.key = BACKUP_FILE
+k.set_contents_from_filename(BACKUP_FILE)
+bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:])
diff --git a/hack/vendor.sh b/hack/vendor.sh
index d3e7ea9f43..184cb750a5 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -50,3 +50,11 @@ clone git github.com/syndtr/gocapability 3454319be2
clone hg code.google.com/p/go.net 84a4013f96e0
clone hg code.google.com/p/gosqlite 74691fb6f837
+
+# get Go tip's archive/tar, for xattr support
+# TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep
+clone hg code.google.com/p/go a15f344a9efa
+mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
+rm -rf src/code.google.com/p/go
+mkdir -p src/code.google.com/p/go/src/pkg/archive
+mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
diff --git a/integration/api_test.go b/integration/api_test.go
index c587f111a2..5779e6b226 100644
--- a/integration/api_test.go
+++ b/integration/api_test.go
@@ -1,9 +1,9 @@
package docker
import (
- "archive/tar"
"bufio"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"encoding/json"
"fmt"
"github.com/dotcloud/docker"
@@ -389,6 +389,77 @@ func TestGetContainersExport(t *testing.T) {
}
}
+func TestSaveImageAndThenLoad(t *testing.T) {
+ eng := NewTestEngine(t)
+ defer mkRuntimeFromEngine(eng, t).Nuke()
+
+ // save image
+ r := httptest.NewRecorder()
+ req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusOK {
+ t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
+ }
+ tarball := r.Body
+
+ // delete the image
+ r = httptest.NewRecorder()
+ req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusOK {
+ t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
+ }
+
+ // make sure there is no image
+ r = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusNotFound {
+ t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code)
+ }
+
+ // load the image
+ r = httptest.NewRecorder()
+ req, err = http.NewRequest("POST", "/images/load", tarball)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusOK {
+ t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
+ }
+
+ // finally make sure the image is there
+ r = httptest.NewRecorder()
+ req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusOK {
+ t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
+ }
+}
+
func TestGetContainersChanges(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
@@ -1217,6 +1288,34 @@ func TestPostContainersCopy(t *testing.T) {
}
}
+func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
+ eng := NewTestEngine(t)
+ defer mkRuntimeFromEngine(eng, t).Nuke()
+
+ r := httptest.NewRecorder()
+
+ var copyData engine.Env
+ copyData.Set("Resource", "/test.txt")
+ copyData.Set("HostPath", ".")
+
+ jsonData := bytes.NewBuffer(nil)
+ if err := copyData.Encode(jsonData); err != nil {
+ t.Fatal(err)
+ }
+
+ req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Header.Add("Content-Type", "application/json")
+ if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if r.Code != http.StatusNotFound {
+ t.Fatalf("404 expected for id_not_found Container, received %v", r.Code)
+ }
+}
+
// Mocked types for tests
type NopConn struct {
io.ReadCloser
diff --git a/integration/utils_test.go b/integration/utils_test.go
index 77328b2511..947ace11d9 100644
--- a/integration/utils_test.go
+++ b/integration/utils_test.go
@@ -1,8 +1,8 @@
package docker
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"fmt"
"io"
"io/ioutil"
diff --git a/networkdriver/network_test.go b/networkdriver/network_test.go
index c15f8b1cf5..6224c2dffb 100644
--- a/networkdriver/network_test.go
+++ b/networkdriver/network_test.go
@@ -105,7 +105,7 @@ func TestNetworkOverlaps(t *testing.T) {
//netY starts before and ends at same IP of netX
AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t)
//netY starts before and ends outside of netX
- AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t)
+ AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t)
//netY starts and ends before netX
AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t)
//netX starts and ends before netY
diff --git a/pkg/netlink/MAINTAINERS b/pkg/netlink/MAINTAINERS
new file mode 100644
index 0000000000..e53d933d47
--- /dev/null
+++ b/pkg/netlink/MAINTAINERS
@@ -0,0 +1,2 @@
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Guillaume Charmes <guillaume@dotcloud.com> (@creack)
diff --git a/reflink_copy_linux.go b/reflink_copy_linux.go
deleted file mode 100644
index 74a0cb98f7..0000000000
--- a/reflink_copy_linux.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// +build amd64
-
-package docker
-
-// FIXME: This could be easily rewritten in pure Go
-
-/*
-#include <sys/ioctl.h>
-#include <linux/fs.h>
-#include <errno.h>
-
-// See linux.git/fs/btrfs/ioctl.h
-#define BTRFS_IOCTL_MAGIC 0x94
-#define BTRFS_IOC_CLONE _IOW(BTRFS_IOCTL_MAGIC, 9, int)
-
-int
-btrfs_reflink(int fd_out, int fd_in)
-{
- int res;
- res = ioctl(fd_out, BTRFS_IOC_CLONE, fd_in);
- if (res < 0)
- return errno;
- return 0;
-}
-
-*/
-import "C"
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// FIXME: Move this to btrfs package?
-
-func BtrfsReflink(fd_out, fd_in uintptr) error {
- res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in))
- if res != 0 {
- return syscall.Errno(res)
- }
- return nil
-}
-
-func CopyFile(dstFile, srcFile *os.File) error {
- err := BtrfsReflink(dstFile.Fd(), srcFile.Fd())
- if err == nil {
- return nil
- }
-
- // Fall back to normal copy
- // FIXME: Check the return of Copy and compare with dstFile.Stat().Size
- _, err = io.Copy(dstFile, srcFile)
- return err
-}
diff --git a/reflink_copy_unsupported.go b/reflink_copy_unsupported.go
deleted file mode 100644
index 271ed0178f..0000000000
--- a/reflink_copy_unsupported.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !linux !amd64
-
-package docker
-
-import (
- "io"
- "os"
-)
-
-func CopyFile(dstFile, srcFile *os.File) error {
- // No BTRFS reflink suppport, Fall back to normal copy
-
- // FIXME: Check the return of Copy and compare with dstFile.Stat().Size
- _, err := io.Copy(dstFile, srcFile)
- return err
-}
diff --git a/server.go b/server.go
index bad8d8bfb5..46ab33b467 100644
--- a/server.go
+++ b/server.go
@@ -457,7 +457,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
}
defer os.RemoveAll(root)
- if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil {
+ if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
return job.Errorf("Error trying to use git: %s (%s)", err, output)
}
diff --git a/tags_unit_test.go b/tags_unit_test.go
index 1341b989fe..b6236280a8 100644
--- a/tags_unit_test.go
+++ b/tags_unit_test.go
@@ -31,6 +31,8 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
t.Fatal(err)
}
img := &Image{ID: testImageID}
+ // FIXME: this fails on Darwin with:
+ // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied
if err := graph.Register(nil, archive, img); err != nil {
t.Fatal(err)
}
diff --git a/utils/tarsum.go b/utils/tarsum.go
index 786196b6b4..ddeecfb450 100644
--- a/utils/tarsum.go
+++ b/utils/tarsum.go
@@ -1,8 +1,8 @@
package utils
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
diff --git a/utils_test.go b/utils_test.go
index 4b8cfba39f..6917007575 100644
--- a/utils_test.go
+++ b/utils_test.go
@@ -1,8 +1,8 @@
package docker
import (
- "archive/tar"
"bytes"
+ "code.google.com/p/go/src/pkg/archive/tar"
"io"
)
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
new file mode 100644
index 0000000000..e8b973c1fa
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
@@ -0,0 +1,304 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+// http://www.gnu.org/software/tar/manual/html_node/Standard.html
+// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ blockSize = 512
+
+ // Types
+ TypeReg = '0' // regular file
+ TypeRegA = '\x00' // regular file
+ TypeLink = '1' // hard link
+ TypeSymlink = '2' // symbolic link
+ TypeChar = '3' // character device node
+ TypeBlock = '4' // block device node
+ TypeDir = '5' // directory
+ TypeFifo = '6' // fifo node
+ TypeCont = '7' // reserved
+ TypeXHeader = 'x' // extended header
+ TypeXGlobalHeader = 'g' // global extended header
+ TypeGNULongName = 'L' // Next file has a long name
+ TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+ Name string // name of header file entry
+ Mode int64 // permission and mode bits
+ Uid int // user id of owner
+ Gid int // group id of owner
+ Size int64 // length in bytes
+ ModTime time.Time // modified time
+ Typeflag byte // type of header entry
+ Linkname string // target name of link
+ Uname string // user name of owner
+ Gname string // group name of owner
+ Devmajor int64 // major number of character or block device
+ Devminor int64 // minor number of character or block device
+ AccessTime time.Time // access time
+ ChangeTime time.Time // status change time
+ Xattrs map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+ fileNameSize = 100 // Maximum number of bytes in a standard tar name.
+ fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ // setuid
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ // setgid
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ // sticky
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits.
+ // clear perm, setuid, setgid and sticky bits.
+ m := os.FileMode(fi.h.Mode) &^ 07777
+ if m == c_ISDIR {
+ // directory
+ mode |= os.ModeDir
+ }
+ if m == c_ISFIFO {
+ // named pipe (FIFO)
+ mode |= os.ModeNamedPipe
+ }
+ if m == c_ISLNK {
+ // symbolic link
+ mode |= os.ModeSymlink
+ }
+ if m == c_ISBLK {
+ // device file
+ mode |= os.ModeDevice
+ }
+ if m == c_ISCHR {
+ // Unix character device
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ }
+ if m == c_ISSOCK {
+ // Unix domain socket
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeLink, TypeSymlink:
+ // hard link, symbolic link
+ mode |= os.ModeSymlink
+ case TypeChar:
+ // character device node
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ // block device node
+ mode |= os.ModeDevice
+ case TypeDir:
+ // directory
+ mode |= os.ModeDir
+ case TypeFifo:
+ // fifo node
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+ paxAtime = "atime"
+ paxCharset = "charset"
+ paxComment = "comment"
+ paxCtime = "ctime" // please note that ctime is not a valid pax header.
+ paxGid = "gid"
+ paxGname = "gname"
+ paxLinkpath = "linkpath"
+ paxMtime = "mtime"
+ paxPath = "path"
+ paxSize = "size"
+ paxUid = "uid"
+ paxUname = "uname"
+ paxXattr = "SCHILY.xattr."
+ paxNone = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Mode |= c_ISREG
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Mode |= c_ISDIR
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Mode |= c_ISLNK
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Mode |= c_ISCHR
+ h.Typeflag = TypeChar
+ } else {
+ h.Mode |= c_ISBLK
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ h.Mode |= c_ISFIFO
+ case fm&os.ModeSocket != 0:
+ h.Mode |= c_ISSOCK
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+ for i := 0; i < len(header); i++ {
+ if i == 148 {
+ // The chksum field (header[148:156]) is special: it should be treated as space bytes.
+ unsigned += ' ' * 8
+ signed += ' ' * 8
+ i += 7
+ continue
+ }
+ unsigned += int64(header[i])
+ signed += int64(int8(header[i]))
+ }
+ return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+ s := *sp
+ b, *sp = s[0:n], s[n:]
+ return
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 {
+ return false
+ }
+ }
+ return true
+}
+
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ var buf bytes.Buffer
+ for _, c := range s {
+ if c < 0x80 {
+ buf.WriteByte(byte(c))
+ }
+ }
+ return buf.String()
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
new file mode 100644
index 0000000000..351eaa0e6c
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
@@ -0,0 +1,79 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar_test
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+func Example() {
+ // Create a buffer to write our archive to.
+ buf := new(bytes.Buffer)
+
+ // Create a new tar archive.
+ tw := tar.NewWriter(buf)
+
+ // Add some files to the archive.
+ var files = []struct {
+ Name, Body string
+ }{
+ {"readme.txt", "This archive contains some text files."},
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+ {"todo.txt", "Get animal handling licence."},
+ }
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ log.Fatalln(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ log.Fatalln(err)
+ }
+ }
+ // Make sure to check the error on Close.
+ if err := tw.Close(); err != nil {
+ log.Fatalln(err)
+ }
+
+ // Open the tar archive for reading.
+ r := bytes.NewReader(buf.Bytes())
+ tr := tar.NewReader(r)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("Contents of %s:\n", hdr.Name)
+ if _, err := io.Copy(os.Stdout, tr); err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Println()
+ }
+
+ // Output:
+ // Contents of readme.txt:
+ // This archive contains some text files.
+ // Contents of gopher.txt:
+ // Gopher names:
+ // George
+ // Geoffrey
+ // Gonzo
+ // Contents of todo.txt:
+ // Get animal handling licence.
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
new file mode 100644
index 0000000000..7cb6e649c7
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
@@ -0,0 +1,402 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - pax extensions
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ err error
+ nb int64 // number of unread bytes for current file entry
+ pad int64 // amount of padding (ignored) after current file entry
+}
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+func (tr *Reader) Next() (*Header, error) {
+ var hdr *Header
+ if tr.err == nil {
+ tr.skipUnread()
+ }
+ if tr.err != nil {
+ return hdr, tr.err
+ }
+ hdr = tr.readHeader()
+ if hdr == nil {
+ return hdr, tr.err
+ }
+ // Check for PAX/GNU header.
+ switch hdr.Typeflag {
+ case TypeXHeader:
+ // PAX extended header
+ headers, err := parsePAX(tr)
+ if err != nil {
+ return nil, err
+ }
+ // We actually read the whole file,
+ // but this skips alignment padding
+ tr.skipUnread()
+ hdr = tr.readHeader()
+ mergePAX(hdr, headers)
+ return hdr, nil
+ case TypeGNULongName:
+ // We have a GNU long name header. Its contents are the real file name.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Name = cString(realname)
+ return hdr, err
+ case TypeGNULongLink:
+ // We have a GNU long link header.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Linkname = cString(realname)
+ return hdr, err
+ }
+ return hdr, tr.err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+ for k, v := range headers {
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxUid:
+ uid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = int(uid)
+ case paxGid:
+ gid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = int(gid)
+ case paxAtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.AccessTime = t
+ case paxMtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ModTime = t
+ case paxCtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ChangeTime = t
+ case paxSize:
+ size, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Size = int64(size)
+ default:
+ if strings.HasPrefix(k, paxXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxXattr):]] = v
+ }
+ }
+ }
+ return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+ buf := []byte(t)
+ pos := bytes.IndexByte(buf, '.')
+ var seconds, nanoseconds int64
+ var err error
+ if pos == -1 {
+ seconds, err = strconv.ParseInt(t, 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ } else {
+ seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nano_buf := string(buf[pos+1:])
+ // Pad as needed before converting to a decimal.
+ // For example .030 -> .030000000 -> 30000000 nanoseconds
+ if len(nano_buf) < maxNanoSecondIntSize {
+ // Right pad
+ nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+ } else if len(nano_buf) > maxNanoSecondIntSize {
+ // Right truncate
+ nano_buf = nano_buf[:maxNanoSecondIntSize]
+ }
+ nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ }
+ ts := time.Unix(seconds, nanoseconds)
+ return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ headers := make(map[string]string)
+ // Each record is constructed as
+ // "%d %s=%s\n", length, keyword, value
+ for len(buf) > 0 {
+ // or the header was empty to start with.
+ var sp int
+ // The size field ends at the first space.
+ sp = bytes.IndexByte(buf, ' ')
+ if sp == -1 {
+ return nil, ErrHeader
+ }
+ // Parse the first token as a decimal integer.
+ n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ // Extract everything between the decimal and the n -1 on the
+ // beginning to to eat the ' ', -1 on the end to skip the newline.
+ var record []byte
+ record, buf = buf[sp+1:n-1], buf[n:]
+ // The first equals is guaranteed to mark the end of the key.
+ // Everything else is value.
+ eq := bytes.IndexByte(record, '=')
+ if eq == -1 {
+ return nil, ErrHeader
+ }
+ key, value := record[:eq], record[eq+1:]
+ headers[string(key)] = string(value)
+ }
+ return headers, nil
+}
+
+// cString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func cString(b []byte) string {
+ n := 0
+ for n < len(b) && b[n] != 0 {
+ n++
+ }
+ return string(b[0:n])
+}
+
+func (tr *Reader) octal(b []byte) int64 {
+ // Check for binary format first.
+ if len(b) > 0 && b[0]&0x80 != 0 {
+ var x int64
+ for i, c := range b {
+ if i == 0 {
+ c &= 0x7f // ignore signal bit in first byte
+ }
+ x = x<<8 | int64(c)
+ }
+ return x
+ }
+
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, err := strconv.ParseUint(cString(b), 8, 64)
+ if err != nil {
+ tr.err = err
+ }
+ return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
+func (tr *Reader) skipUnread() {
+ nr := tr.nb + tr.pad // number of bytes to skip
+ tr.nb, tr.pad = 0, 0
+ if sr, ok := tr.r.(io.Seeker); ok {
+ if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
+ return
+ }
+ }
+ _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+ if tr.err != nil {
+ return false
+ }
+
+ given := tr.octal(header[148:156])
+ unsigned, signed := checksum(header)
+ return given == unsigned || given == signed
+}
+
+func (tr *Reader) readHeader() *Header {
+ header := make([]byte, blockSize)
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+
+ // Two blocks of zero bytes marks the end of the archive.
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ tr.err = io.EOF
+ } else {
+ tr.err = ErrHeader // zero block and then non-zero block
+ }
+ return nil
+ }
+
+ if !tr.verifyChecksum(header) {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Unpack
+ hdr := new(Header)
+ s := slicer(header)
+
+ hdr.Name = cString(s.next(100))
+ hdr.Mode = tr.octal(s.next(8))
+ hdr.Uid = int(tr.octal(s.next(8)))
+ hdr.Gid = int(tr.octal(s.next(8)))
+ hdr.Size = tr.octal(s.next(12))
+ hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
+ s.next(8) // chksum
+ hdr.Typeflag = s.next(1)[0]
+ hdr.Linkname = cString(s.next(100))
+
+ // The remainder of the header depends on the value of magic.
+ // The original (v7) version of tar had no explicit magic field,
+ // so its magic bytes, like the rest of the block, are NULs.
+ magic := string(s.next(8)) // contains version field as well.
+ var format string
+ switch magic {
+ case "ustar\x0000": // POSIX tar (1003.1-1988)
+ if string(header[508:512]) == "tar\x00" {
+ format = "star"
+ } else {
+ format = "posix"
+ }
+ case "ustar \x00": // old GNU tar
+ format = "gnu"
+ }
+
+ switch format {
+ case "posix", "gnu", "star":
+ hdr.Uname = cString(s.next(32))
+ hdr.Gname = cString(s.next(32))
+ devmajor := s.next(8)
+ devminor := s.next(8)
+ if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+ hdr.Devmajor = tr.octal(devmajor)
+ hdr.Devminor = tr.octal(devminor)
+ }
+ var prefix string
+ switch format {
+ case "posix", "gnu":
+ prefix = cString(s.next(155))
+ case "star":
+ prefix = cString(s.next(131))
+ hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
+ hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Maximum value of hdr.Size is 64 GB (12 octal digits),
+ // so there's no risk of int64 overflowing.
+ tr.nb = int64(hdr.Size)
+ tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two
+
+ return hdr
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+ if tr.nb == 0 {
+ // file consumed
+ return 0, io.EOF
+ }
+
+ if int64(len(b)) > tr.nb {
+ b = b[0:tr.nb]
+ }
+ n, err = tr.r.Read(b)
+ tr.nb -= int64(n)
+
+ if err == io.EOF && tr.nb > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ tr.err = err
+ return
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
new file mode 100644
index 0000000000..f84dbebe98
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
@@ -0,0 +1,425 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type untarTest struct {
+ file string
+ headers []*Header
+ cksums []string
+}
+
+var gnuTarTest = &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244428340, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244436044, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ cksums: []string{
+ "e38b27eaccb4391bdec553a7f3ae6b2f",
+ "c65bd2e50a56a2138bf1716f2fd56fe9",
+ },
+}
+
+var untarTests = []*untarTest{
+ gnuTarTest,
+ {
+ file: "testdata/star.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ },
+ },
+ {
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ },
+ },
+ {
+ file: "testdata/pax.tar",
+ headers: []*Header{
+ {
+ Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ Mode: 0664,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 7,
+ ModTime: time.Unix(1350244992, 23960108),
+ ChangeTime: time.Unix(1350244992, 23960108),
+ AccessTime: time.Unix(1350244992, 23960108),
+ Typeflag: TypeReg,
+ },
+ {
+ Name: "a/b",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 0,
+ ModTime: time.Unix(1350266320, 910238425),
+ ChangeTime: time.Unix(1350266320, 910238425),
+ AccessTime: time.Unix(1350266320, 910238425),
+ Typeflag: TypeSymlink,
+ Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ },
+ },
+ },
+ {
+ file: "testdata/nil-uid.tar", // golang.org/issue/5290
+ headers: []*Header{
+ {
+ Name: "P1050238.JPG.log",
+ Mode: 0664,
+ Uid: 0,
+ Gid: 0,
+ Size: 14,
+ ModTime: time.Unix(1365454838, 0),
+ Typeflag: TypeReg,
+ Linkname: "",
+ Uname: "eyefi",
+ Gname: "eyefi",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ },
+ },
+ {
+ file: "testdata/xattrs.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 5,
+ ModTime: time.Unix(1386065770, 448252320),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1389782956, 794414986),
+ Xattrs: map[string]string{
+ "user.key": "value",
+ "user.key2": "value2",
+ // Interestingly, selinux encodes the terminating null inside the xattr
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 11,
+ ModTime: time.Unix(1386065770, 449252304),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1386065770, 449252304),
+ Xattrs: map[string]string{
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ },
+ },
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ defer f.Close()
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(*hdr, *header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ continue testLoop
+ }
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
+ }
+ }
+}
+
+func TestPartialRead(t *testing.T) {
+ f, err := os.Open("testdata/gnu.tar")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ // Read the first four bytes; Next() should skip the last byte.
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get first file: %v", err)
+ }
+ buf := make([]byte, 4)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+
+ // Second file
+ hdr, err = tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get second file: %v", err)
+ }
+ buf = make([]byte, 6)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Google"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+}
+
+func TestIncrementalRead(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ headers := test.headers
+ cksums := test.cksums
+ nread := 0
+
+ // loop over all files
+ for ; ; nread++ {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ break
+ }
+
+ // check the header
+ if !reflect.DeepEqual(*hdr, *headers[nread]) {
+ t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
+ *hdr, headers[nread])
+ }
+
+ // read file contents in little chunks EOF,
+ // checksumming all the way
+ h := md5.New()
+ rdbuf := make([]uint8, 8)
+ for {
+ nr, err := tr.Read(rdbuf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read: unexpected error %v\n", err)
+ break
+ }
+ h.Write(rdbuf[0:nr])
+ }
+ // verify checksum
+ have := fmt.Sprintf("%x", h.Sum(nil))
+ want := cksums[nread]
+ if want != have {
+ t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
+ }
+ }
+ if nread != len(headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
+ }
+}
+
+func TestNonSeekable(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ type readerOnly struct {
+ io.Reader
+ }
+ tr := NewReader(readerOnly{f})
+ nread := 0
+
+ for ; ; nread++ {
+ _, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ }
+
+ if nread != len(test.headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
+ }
+}
+
+func TestParsePAXHeader(t *testing.T) {
+ paxTests := [][3]string{
+ {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
+ {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
+ {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
+ for _, test := range paxTests {
+ key, expected, raw := test[0], test[1], test[2]
+ reader := bytes.NewReader([]byte(raw))
+ headers, err := parsePAX(reader)
+ if err != nil {
+ t.Errorf("Couldn't parse correctly formatted headers: %v", err)
+ continue
+ }
+ if strings.EqualFold(headers[key], expected) {
+ t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
+ continue
+ }
+ trailer := make([]byte, 100)
+ n, err := reader.Read(trailer)
+ if err != io.EOF || n != 0 {
+ t.Error("Buffer wasn't consumed")
+ }
+ }
+ badHeader := bytes.NewReader([]byte("3 somelongkey="))
+ if _, err := parsePAX(badHeader); err != ErrHeader {
+ t.Fatal("Unexpected success when parsing bad header")
+ }
+}
+
+func TestParsePAXTime(t *testing.T) {
+ // Some valid PAX time values
+ timestamps := map[string]time.Time{
+ "1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case
+ "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
+ "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
+ "1350244992": time.Unix(1350244992, 0), // Low precision value
+ }
+ for input, expected := range timestamps {
+ ts, err := parsePAXTime(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ts.Equal(expected) {
+ t.Fatalf("Time parsing failure %s %s", ts, expected)
+ }
+ }
+}
+
+func TestMergePAX(t *testing.T) {
+ hdr := new(Header)
+ // Test a string, integer, and time based value.
+ headers := map[string]string{
+ "path": "a/b/c",
+ "uid": "1000",
+ "mtime": "1350244992.023960108",
+ }
+ err := mergePAX(hdr, headers)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &Header{
+ Name: "a/b/c",
+ Uid: 1000,
+ ModTime: time.Unix(1350244992, 23960108),
+ }
+ if !reflect.DeepEqual(hdr, want) {
+ t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
+ }
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
new file mode 100644
index 0000000000..cf9cc79c59
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
new file mode 100644
index 0000000000..6f17dbe307
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
new file mode 100644
index 0000000000..cb843db4cf
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+ // TODO(bradfitz): populate username & group. os/user
+ // doesn't cache LookupId lookups, and lacks group
+ // lookup functions.
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+ // TODO(bradfitz): major/minor device numbers?
+ return nil
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
new file mode 100644
index 0000000000..ed333f3ea4
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
@@ -0,0 +1,284 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestFileInfoHeader(t *testing.T) {
+ fi, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "small.txt"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(5); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+ // FileInfoHeader should error when passing nil FileInfo
+ if _, err := FileInfoHeader(nil, ""); err == nil {
+ t.Fatalf("Expected error when passing nil to FileInfoHeader")
+ }
+}
+
+func TestFileInfoHeaderDir(t *testing.T) {
+ fi, err := os.Stat("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "testdata/"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ // Ignoring c_ISGID for golang.org/issue/4867
+ if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(0); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+}
+
+func TestFileInfoHeaderSymlink(t *testing.T) {
+ h, err := FileInfoHeader(symlink{}, "some-target")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := h.Name, "some-symlink"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Linkname, "some-target"; g != e {
+ t.Errorf("Linkname = %q; want %q", g, e)
+ }
+}
+
+type symlink struct{}
+
+func (symlink) Name() string { return "some-symlink" }
+func (symlink) Size() int64 { return 0 }
+func (symlink) Mode() os.FileMode { return os.ModeSymlink }
+func (symlink) ModTime() time.Time { return time.Time{} }
+func (symlink) IsDir() bool { return false }
+func (symlink) Sys() interface{} { return nil }
+
+func TestRoundTrip(t *testing.T) {
+ data := []byte("some file contents")
+
+ var b bytes.Buffer
+ tw := NewWriter(&b)
+ hdr := &Header{
+ Name: "file.txt",
+ Uid: 1 << 21, // too big for 8 octal digits
+ Size: int64(len(data)),
+ ModTime: time.Now(),
+ }
+ // tar only supports second precision.
+ hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("tw.WriteHeader: %v", err)
+ }
+ if _, err := tw.Write(data); err != nil {
+ t.Fatalf("tw.Write: %v", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("tw.Close: %v", err)
+ }
+
+ // Read it back.
+ tr := NewReader(&b)
+ rHdr, err := tr.Next()
+ if err != nil {
+ t.Fatalf("tr.Next: %v", err)
+ }
+ if !reflect.DeepEqual(rHdr, hdr) {
+ t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
+ }
+ rData, err := ioutil.ReadAll(tr)
+ if err != nil {
+ t.Fatalf("Read: %v", err)
+ }
+ if !bytes.Equal(rData, data) {
+ t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
+ }
+}
+
+type headerRoundTripTest struct {
+ h *Header
+ fm os.FileMode
+}
+
+func TestHeaderRoundTrip(t *testing.T) {
+ golden := []headerRoundTripTest{
+ // regular file.
+ {
+ h: &Header{
+ Name: "test.txt",
+ Mode: 0644 | c_ISREG,
+ Size: 12,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0644,
+ },
+ // hard link.
+ {
+ h: &Header{
+ Name: "hard.txt",
+ Mode: 0644 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeLink,
+ },
+ fm: 0644 | os.ModeSymlink,
+ },
+ // symbolic link.
+ {
+ h: &Header{
+ Name: "link.txt",
+ Mode: 0777 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600852, 0),
+ Typeflag: TypeSymlink,
+ },
+ fm: 0777 | os.ModeSymlink,
+ },
+ // character device node.
+ {
+ h: &Header{
+ Name: "dev/null",
+ Mode: 0666 | c_ISCHR,
+ Size: 0,
+ ModTime: time.Unix(1360578951, 0),
+ Typeflag: TypeChar,
+ },
+ fm: 0666 | os.ModeDevice | os.ModeCharDevice,
+ },
+ // block device node.
+ {
+ h: &Header{
+ Name: "dev/sda",
+ Mode: 0660 | c_ISBLK,
+ Size: 0,
+ ModTime: time.Unix(1360578954, 0),
+ Typeflag: TypeBlock,
+ },
+ fm: 0660 | os.ModeDevice,
+ },
+ // directory.
+ {
+ h: &Header{
+ Name: "dir/",
+ Mode: 0755 | c_ISDIR,
+ Size: 0,
+ ModTime: time.Unix(1360601116, 0),
+ Typeflag: TypeDir,
+ },
+ fm: 0755 | os.ModeDir,
+ },
+ // fifo node.
+ {
+ h: &Header{
+ Name: "dev/initctl",
+ Mode: 0600 | c_ISFIFO,
+ Size: 0,
+ ModTime: time.Unix(1360578949, 0),
+ Typeflag: TypeFifo,
+ },
+ fm: 0600 | os.ModeNamedPipe,
+ },
+ // setuid.
+ {
+ h: &Header{
+ Name: "bin/su",
+ Mode: 0755 | c_ISREG | c_ISUID,
+ Size: 23232,
+ ModTime: time.Unix(1355405093, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0755 | os.ModeSetuid,
+ },
+ // setguid.
+ {
+ h: &Header{
+ Name: "group.txt",
+ Mode: 0750 | c_ISREG | c_ISGID,
+ Size: 0,
+ ModTime: time.Unix(1360602346, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0750 | os.ModeSetgid,
+ },
+ // sticky.
+ {
+ h: &Header{
+ Name: "sticky.txt",
+ Mode: 0600 | c_ISREG | c_ISVTX,
+ Size: 7,
+ ModTime: time.Unix(1360602540, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0600 | os.ModeSticky,
+ },
+ }
+
+ for i, g := range golden {
+ fi := g.h.FileInfo()
+ h2, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if strings.Contains(fi.Name(), "/") {
+ t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
+ }
+ name := path.Base(g.h.Name)
+ if fi.IsDir() {
+ name += "/"
+ }
+ if got, want := h2.Name, name; got != want {
+ t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Size, g.h.Size; got != want {
+ t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Mode, g.h.Mode; got != want {
+ t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := fi.Mode(), g.fm; got != want {
+ t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := h2.ModTime, g.h.ModTime; got != want {
+ t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
+ }
+ if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
+ t.Errorf("i=%d: Sys didn't return original *Header", i)
+ }
+ }
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
new file mode 100644
index 0000000000..fc899dc8dc
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
new file mode 100644
index 0000000000..cc9cfaa33c
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
new file mode 100644
index 0000000000..9bc24b6587
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
new file mode 100644
index 0000000000..b249bfc518
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
@@ -0,0 +1 @@
+Kilts \ No newline at end of file
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
new file mode 100644
index 0000000000..394ee3ecd0
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
@@ -0,0 +1 @@
+Google.com
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
new file mode 100644
index 0000000000..59e2d4e604
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
new file mode 100644
index 0000000000..29679d9a30
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
new file mode 100644
index 0000000000..eb65fc9410
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
new file mode 100644
index 0000000000..753e883ceb
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
new file mode 100644
index 0000000000..e6d816ad07
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
new file mode 100644
index 0000000000..9701950edd
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
new file mode 100644
index 0000000000..9ee9499297
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
@@ -0,0 +1,383 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errNameTooLong = errors.New("archive/tar: name too long")
+ errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+ w io.Writer
+ err error
+ nb int64 // number of unwritten bytes for current file entry
+ pad int64 // amount of padding to write after current file entry
+ closed bool
+ usedBinary bool // whether the binary numeric field extension was used
+ preferPax bool // use pax header instead of binary numeric header
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+ if tw.nb > 0 {
+ tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+ return tw.err
+ }
+
+ n := tw.nb + tw.pad
+ for n > 0 && tw.err == nil {
+ nr := n
+ if nr > blockSize {
+ nr = blockSize
+ }
+ var nw int
+ nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+ n -= int64(nw)
+ }
+ tw.nb = 0
+ tw.pad = 0
+ return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+// If the value is too long for the field and allowPax is true add a paxheader record instead
+func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
+ if needsPaxHeader {
+ paxHeaders[paxKeyword] = s
+ return
+ }
+ if len(s) > len(b) {
+ if tw.err == nil {
+ tw.err = ErrFieldTooLong
+ }
+ return
+ }
+ ascii := toASCII(s)
+ copy(b, ascii)
+ if len(ascii) < len(b) {
+ b[len(ascii)] = 0
+ }
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (tw *Writer) octal(b []byte, x int64) {
+ s := strconv.FormatInt(x, 8)
+ // leading zeros, but leave room for a NUL.
+ for len(s)+1 < len(b) {
+ s = "0" + s
+ }
+ tw.cString(b, s, false, paxNone, nil)
+}
+
+// Write x into b, either as octal or as binary (GNUtar/star extension).
+// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
+func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ // Try octal first.
+ s := strconv.FormatInt(x, 8)
+ if len(s) < len(b) {
+ tw.octal(b, x)
+ return
+ }
+
+ // If it is too long for octal, and pax is preferred, use a pax header
+ if allowPax && tw.preferPax {
+ tw.octal(b, 0)
+ s := strconv.FormatInt(x, 10)
+ paxHeaders[paxKeyword] = s
+ return
+ }
+
+ // Too big: use binary (big-endian).
+ tw.usedBinary = true
+ for i := len(b) - 1; x > 0 && i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // highest bit indicates binary format
+}
+
+var (
+ minTime = time.Unix(0, 0)
+ // There is room for 11 octal digits (33 bits) of mtime.
+ maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+ if tw.closed {
+ return ErrWriteAfterClose
+ }
+ if tw.err == nil {
+ tw.Flush()
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // a map to hold pax header records, if any are needed
+ paxHeaders := make(map[string]string)
+
+ // TODO(shanemhansen): we might want to use PAX headers for
+ // subsecond time resolution, but for now let's just capture
+ // too long fields or non ascii characters
+
+ header := make([]byte, blockSize)
+ s := slicer(header)
+
+ // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ pathHeaderBytes := s.next(fileNameSize)
+
+ tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
+
+ // Handle out of range ModTime carefully.
+ var modTime int64
+ if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+ modTime = hdr.ModTime.Unix()
+ }
+
+ tw.octal(s.next(8), hdr.Mode) // 100:108
+ tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
+ tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
+ tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
+ tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
+ s.next(8) // chksum (148:156)
+ s.next(1)[0] = hdr.Typeflag // 156:157
+
+ tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
+
+ copy(s.next(8), []byte("ustar\x0000")) // 257:265
+ tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
+ tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
+ tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
+ tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
+
+ // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ prefixHeaderBytes := s.next(155)
+ tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
+
+ // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+ if tw.usedBinary {
+ copy(header[257:265], []byte("ustar \x00"))
+ }
+
+ _, paxPathUsed := paxHeaders[paxPath]
+ // try to use a ustar header when only the name is too long
+ if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+ suffix := hdr.Name
+ prefix := ""
+ if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
+ var err error
+ prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
+ if err == nil {
+ // ok we can use a ustar long name instead of pax, now correct the fields
+
+ // remove the path field from the pax header. this will suppress the pax header
+ delete(paxHeaders, paxPath)
+
+ // update the path fields
+ tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
+ tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
+
+ // Use the ustar magic if we used ustar long names.
+ if len(prefix) > 0 {
+ copy(header[257:265], []byte("ustar\000"))
+ }
+ }
+ }
+ }
+
+ // The chksum field is terminated by a NUL and a space.
+ // This is different from the other octal fields.
+ chksum, _ := checksum(header)
+ tw.octal(header[148:155], chksum)
+ header[155] = ' '
+
+ if tw.err != nil {
+ // problem with header; probably integer too big for a field.
+ return tw.err
+ }
+
+ if allowPax {
+ for k, v := range hdr.Xattrs {
+ paxHeaders[paxXattr+k] = v
+ }
+ }
+
+ if len(paxHeaders) > 0 {
+ if !allowPax {
+ return errInvalidHeader
+ }
+ if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ return err
+ }
+ }
+ tw.nb = int64(hdr.Size)
+ tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+ _, tw.err = tw.w.Write(header)
+ return tw.err
+}
+
+// writeUSTARLongName splits a USTAR long name hdr.Name.
+// name must be < 256 characters. errNameTooLong is returned
+// if hdr.Name can't be split. The splitting heuristic
+// is compatible with gnu tar.
+func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
+ length := len(name)
+ if length > fileNamePrefixSize+1 {
+ length = fileNamePrefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+ i := strings.LastIndex(name[:length], "/")
+ // nlen contains the resulting length in the name field.
+ // plen contains the resulting length in the prefix field.
+ nlen := len(name) - i - 1
+ plen := i
+ if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+ err = errNameTooLong
+ return
+ }
+ prefix, suffix = name[:i], name[i+1:]
+ return
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+ // Prepare extended header
+ ext := new(Header)
+ ext.Typeflag = TypeXHeader
+ // Setting ModTime is required for reader parsing to
+ // succeed, and seems harmless enough.
+ ext.ModTime = hdr.ModTime
+ // The spec asks that we namespace our pseudo files
+ // with the current pid.
+ pid := os.Getpid()
+ dir, file := path.Split(hdr.Name)
+ fullName := path.Join(dir,
+ fmt.Sprintf("PaxHeaders.%d", pid), file)
+
+ ascii := toASCII(fullName)
+ if len(ascii) > 100 {
+ ascii = ascii[:100]
+ }
+ ext.Name = ascii
+ // Construct the body
+ var buf bytes.Buffer
+
+ for k, v := range paxHeaders {
+ fmt.Fprint(&buf, paxHeader(k+"="+v))
+ }
+
+ ext.Size = int64(len(buf.Bytes()))
+ if err := tw.writeHeader(ext, false); err != nil {
+ return err
+ }
+ if _, err := tw.Write(buf.Bytes()); err != nil {
+ return err
+ }
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// paxHeader formats a single pax record, prefixing it with the appropriate length
+func paxHeader(msg string) string {
+ const padding = 2 // Extra padding for space and newline
+ size := len(msg) + padding
+ size += len(strconv.Itoa(size))
+ record := fmt.Sprintf("%d %s\n", size, msg)
+ if len(record) != size {
+ // Final adjustment if adding size increased
+ // the number of digits in size
+ size = len(record)
+ record = fmt.Sprintf("%d %s\n", size, msg)
+ }
+ return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+ if tw.closed {
+ err = ErrWriteTooLong
+ return
+ }
+ overwrite := false
+ if int64(len(b)) > tw.nb {
+ b = b[0:tw.nb]
+ overwrite = true
+ }
+ n, err = tw.w.Write(b)
+ tw.nb -= int64(n)
+ if err == nil && overwrite {
+ err = ErrWriteTooLong
+ return
+ }
+ tw.err = err
+ return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+ if tw.err != nil || tw.closed {
+ return tw.err
+ }
+ tw.Flush()
+ tw.closed = true
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // trailer: two zero blocks
+ for i := 0; i < 2; i++ {
+ _, tw.err = tw.w.Write(zeroBlock)
+ if tw.err != nil {
+ break
+ }
+ }
+ return tw.err
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
new file mode 100644
index 0000000000..2b9ea658db
--- /dev/null
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
@@ -0,0 +1,433 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/iotest"
+ "time"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ // The writer test file was produced with this command:
+ // tar (GNU tar) 1.26
+ // ln -s small.txt link.txt
+ // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
+ {
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1246508266, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ {
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1245217492, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ {
+ header: &Header{
+ Name: "link.txt",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 0,
+ ModTime: time.Unix(1314603082, 0),
+ Typeflag: '2',
+ Linkname: "small.txt",
+ Uname: "strings",
+ Gname: "strings",
+ },
+ // no contents
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ {
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ ModTime: time.Unix(1254699560, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // fake contents
+ contents: strings.Repeat("\x00", 4<<10),
+ },
+ },
+ },
+ // This file was produced using gnu tar 1.17
+ // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
+ {
+ file: "testdata/ustar.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: strings.Repeat("longname/", 15) + "file.txt",
+ Mode: 0644,
+ Uid: 0765,
+ Gid: 024,
+ Size: 06,
+ ModTime: time.Unix(1360135598, 0),
+ Typeflag: '0',
+ Uname: "shane",
+ Gname: "staff",
+ },
+ contents: "hello\n",
+ },
+ },
+ },
+}
+
+// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
+func bytestr(offset int, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("%04x ", offset)
+ for _, ch := range b {
+ switch {
+ case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
+ s += fmt.Sprintf(" %c", ch)
+ default:
+ s += fmt.Sprintf(" %02x", ch)
+ }
+ }
+ return s
+}
+
+// Render a pseudo-diff between two blocks of bytes.
+func bytediff(a []byte, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
+ for offset := 0; len(a)+len(b) > 0; offset += rowLen {
+ na, nb := rowLen, rowLen
+ if na > len(a) {
+ na = len(a)
+ }
+ if nb > len(b) {
+ nb = len(b)
+ }
+ sa := bytestr(offset, a[0:na])
+ sb := bytestr(offset, b[0:nb])
+ if sa != sb {
+ s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
+ }
+ a = a[na:]
+ b = b[nb:]
+ }
+ return s
+}
+
+func TestWriter(t *testing.T) {
+testLoop:
+ for i, test := range writerTests {
+ expected, err := ioutil.ReadFile(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+
+ buf := new(bytes.Buffer)
+ tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
+ big := false
+ for j, entry := range test.entries {
+ big = big || entry.header.Size > 1<<10
+ if err := tw.WriteHeader(entry.header); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
+ continue testLoop
+ }
+ if _, err := io.WriteString(tw, entry.contents); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
+ continue testLoop
+ }
+ }
+ // Only interested in Close failures for the small tests.
+ if err := tw.Close(); err != nil && !big {
+ t.Errorf("test %d: Failed closing archive: %v", i, err)
+ continue testLoop
+ }
+
+ actual := buf.Bytes()
+ if !bytes.Equal(expected, actual) {
+ t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
+ i, bytediff(expected, actual))
+ }
+ if testing.Short() { // The second test is expensive.
+ break
+ }
+ }
+}
+
+func TestPax(t *testing.T) {
+ // Create an archive with a large name
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ // Force a PAX long name to be written
+ longName := strings.Repeat("ab", 100)
+ contents := strings.Repeat(" ", int(hdr.Size))
+ hdr.Name = longName
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long file name")
+ }
+}
+
+func TestPaxSymlink(t *testing.T) {
+ // Create an archive with a large linkname
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeSymlink
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long linkname to be written
+ longLinkname := strings.Repeat("1234567890/1234567890", 10)
+ hdr.Linkname = longLinkname
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Linkname != longLinkname {
+ t.Fatal("Couldn't recover long link name")
+ }
+}
+
+func TestPaxNonAscii(t *testing.T) {
+ // Create an archive with non ascii. These should trigger a pax header
+ // because pax headers have a defined utf-8 encoding.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+
+ // some sample data
+ chineseFilename := "文件名"
+ chineseGroupname := "組"
+ chineseUsername := "用戶名"
+
+ hdr.Name = chineseFilename
+ hdr.Gname = chineseGroupname
+ hdr.Uname = chineseUsername
+
+ contents := strings.Repeat(" ", int(hdr.Size))
+
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != chineseFilename {
+ t.Fatal("Couldn't recover unicode name")
+ }
+ if hdr.Gname != chineseGroupname {
+ t.Fatal("Couldn't recover unicode group")
+ }
+ if hdr.Uname != chineseUsername {
+ t.Fatal("Couldn't recover unicode user")
+ }
+}
+
+func TestPaxXattrs(t *testing.T) {
+ xattrs := map[string]string{
+ "user.key": "value",
+ }
+
+ // Create an archive with an xattr
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ contents := "Kilts"
+ hdr.Xattrs = xattrs
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get the xattrs back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
+ t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
+ hdr.Xattrs, xattrs)
+ }
+}
+
+func TestPAXHeader(t *testing.T) {
+ medName := strings.Repeat("CD", 50)
+ longName := strings.Repeat("AB", 100)
+ paxTests := [][2]string{
+ {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
+ {"a=b", "6 a=b\n"}, // Single digit length
+ {"a=names", "11 a=names\n"}, // Test case involving carries
+ {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
+ {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
+
+ for _, test := range paxTests {
+ key, expected := test[0], test[1]
+ if result := paxHeader(key); result != expected {
+ t.Fatalf("paxHeader: got %s, expected %s", result, expected)
+ }
+ }
+}
+
+func TestUSTARLongName(t *testing.T) {
+ // Create an archive with a path that failed to split with USTAR extension in previous versions.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeDir
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long name to be written. The name was taken from a practical example
+ // that fails and replaced ever char through numbers to anonymize the sample.
+ longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
+ hdr.Name = longName
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long name")
+ }
+}