summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam Thursfield <sam.thursfield@codethink.co.uk>2014-07-03 14:13:57 +0000
committerSam Thursfield <sam@afuera.me.uk>2014-09-05 13:00:20 +0000
commitfdbba013130f028359baf8af4b7fb4171ea70bbf (patch)
treec6e755410e7d3f790bdd8aaea6ff9d12a7da5874
parent149352b7dbdb955f79ced2a9f7364e8ebdb1a75f (diff)
downloadmorph-fdbba013130f028359baf8af4b7fb4171ea70bbf.tar.gz
more docker.write
-rw-r--r--morphlib/exts/bgtunnel.py3
-rw-r--r--morphlib/exts/docker.write291
2 files changed, 161 insertions, 133 deletions
diff --git a/morphlib/exts/bgtunnel.py b/morphlib/exts/bgtunnel.py
index 6aa79670..8fefbb83 100644
--- a/morphlib/exts/bgtunnel.py
+++ b/morphlib/exts/bgtunnel.py
@@ -248,7 +248,8 @@ class SSHTunnelForwarderThread(threading.Thread, UnicodeMagicMixin):
validate_ssh_cmd_exists(self.ssh_path)
# The path to the private key file to use
- self.identity_file = normalize_path(identity_file or '') or None
+ self.identity_file = None
+ #normalize_path(identity_file or '') or None
super(SSHTunnelForwarderThread, self).__init__()
diff --git a/morphlib/exts/docker.write b/morphlib/exts/docker.write
index 508b3d9c..0e933cc3 100644
--- a/morphlib/exts/docker.write
+++ b/morphlib/exts/docker.write
@@ -17,17 +17,22 @@
'''A Morph deployment write extension for deploying to Docker hosts'''
+# bgtunnel: From https://github.com/jmagnusson/bgtunnel
+# (using Paramiko would be better, but it's not in Baserock yet. Its
+# demos/forward.py demonstrates what we need).
+import bgtunnel
+
+# From https://github.com/dotcloud/docker-py
+import docker
+
import cliapp
import contextlib
+import gzip
import logging
import os
-import shutil
-import subprocess
import sys
import tarfile
-import threading
-import time
import urlparse
import morphlib.writeexts
@@ -37,17 +42,39 @@ class DockerWriteExtension(morphlib.writeexts.WriteExtension):
'''Create a Docker image or container from a Morph deployment.
- THIS IS A PROTOTYPE. IT USES NETCAT TO SEND THE IMAGE WHICH IS RATHER
- FRAGILE AND SUCKY.
+ THIS IS A PROTOTYPE!!!
+
+ This extension assumes you are accessing a remote Docker service. It uses
+ the Docker remote API. The Docker remote API cannot be exposed over TCP
+ directly in a secure way, so instead you should set the Docker daemon on
+ the server listening on a local-only TCP socket. Morph will then use SSH
+ to forward this port securely while the write extention runs.
+
+ Docker doesn't listen on a TCP socket by default. Run the Docker service
+ as follows (2375 is an arbitrary number):
+
+ docker -d -H='tcp://127.0.0.1:2375"
The location command line argument is a network location that should be
accessible over SSH, followed by the name of the image to be created.
- docker://HOST/IMAGE
+ docker+ssh://[USER@]HOST:PORT/IMAGE
+
+ Where
+
+ * USER is your username on the remote Docker server
+ * HOST is the hostname of the remote Docker server
+ * PORT is the local-only TCP port on which Docker is listening (2375 in
+ the above example)
+ * IMAGE is the name of the image to create.
Docker image names commonly containly follow the form 'owner/name'. If
a VERSION_LABEL setting is supplied, this will be used to tag the image.
+ See also:
+ http://blog.tutum.co/2013/11/21/remote-and-secure-use-of-docker-api-with-python-part-1/
+ http://coreos.com/docs/launching-containers/building/customizing-docker/
+
'''
def process_args(self, args):
@@ -56,13 +83,79 @@ class DockerWriteExtension(morphlib.writeexts.WriteExtension):
temp_root, location = args
- # config parameters:
- # image name
-
- docker_host, image_name = self.parse_location(location)
-
- self.import_image(temp_root, docker_host, image_name)
-
+ if not location.startswith('docker+ssh://'):
+ raise cliapp.AppException(
+ 'Sorry, currently this extension only supports remote '
+ 'access to Docker using a port forwarded by SSH.')
+
+ user, host, port, image_name = self.parse_location(location)
+
+ # FIXME: is the tunnel cleaned up? do we need a 'with' ?
+ self.status(msg='Connecting to Docker service at %s:%s' % (host, port))
+ docker_client = self.create_docker_client_with_remote_ssh_tunnel(
+ user, host, port)
+
+ tar_read_fd, tar_write_fd = os.pipe()
+
+ tar_read_fileobj = os.fdopen(tar_read_fd, 'r')
+
+ print docker_client.info()
+
+ # FIXME: hack! The docker-py library should let us put in a fileobj and
+ # have it handle buffering automatically ... I.E. this hack should be
+ # sent upstream as an improvement, instead. Still, it's kind of cool
+ # that Python enables such easy workarounds!
+ #
+ # For reference, the Ruby client can already do this:
+ # https://github.com/swipely/docker-api/blob/master/lib/docker/image.rb
+ import_url = docker_client._url('/images/create')
+
+ logging.debug('Open tar write FD')
+ tar_write_fileobj = os.fdopen(tar_write_fd, 'w')
+
+ logging.debug('Create tar thread')
+ tar_bytes = 0
+ import threading
+ tar_thread = threading.Thread(
+ target=self.write_system_as_tar, args=[temp_root, tar_write_fileobj])
+ tar_thread.start()
+ print tar_thread
+ print tar_thread.is_alive()
+
+ import select
+ def batch_fileobj(fileobj, batch_size):
+ '''Split an fileobj up into batches of 'batch_size' items.'''
+ i = 0
+ # This is hard, we need to signal end ...
+ while True:
+ data = fileobj.read(batch_size)
+ yield data
+ print "End of fileobj"
+ yield []
+ print "Yielded None, called again ..."
+
+ #logging.debug('Prepare request...')
+ #import_request_prepped = docker_client.prepare_request(import_request)
+ logging.debug('Send request...')
+ # FOR SOME REASON THIS SEEMS NEVER TO EXIT!
+
+ #docker_client.send(import_request_prepped)
+ docker_client.post(
+ import_url,
+ data=batch_fileobj(tar_read_fileobj, 10240),
+ params={
+ 'fromSrc': '-',
+ 'repo': image_name
+ },
+ headers = {
+ 'Content-Type': 'application/tar',
+ 'Transfer-Encoding': 'chunked',
+ }
+ )
+
+ print "OK! Wow, that surely didn't actually work."
+
+ ###
autostart = self.get_environment_boolean('AUTOSTART')
self.status(
@@ -73,124 +166,58 @@ class DockerWriteExtension(morphlib.writeexts.WriteExtension):
'''Parse the location argument to get relevant data.'''
x = urlparse.urlparse(location)
- return x.netloc, x.path[1:]
-
- def import_image(self, fs_root, ssh_host, image_name):
- '''Transfer disk image to a Docker image on a remote host.
-
- This is currently done using SSH and netcat, rather than the Docker
- remote API. While the Docker daemon can be bound directly to a TCP
- port, this socket provides root access on the host for anyone that
- can access that port.
-
- '''
-
- self.status(msg='Transferring disk image')
-
- port = '2222'
-
- tarpipe_read, tarpipe_write = os.pipe()
-
- def create_tar():
- try:
- # using tarfile.TarFile.gzopen() and passing compresslevel=1
- # seems to result in compresslevel=9 anyway. That's completely
- # unusable on ARM CPUs so it's important to force
- # compresslevel=1 or something low.
- import gzip
- gzstream = gzip.GzipFile(
- mode='wb',
- compresslevel=1,
- fileobj=os.fdopen(tarpipe_write, 'w'))
- tar = tarfile.TarFile.gzopen(
- name='docker.write-temp',
- mode='w',
- compresslevel=1,
- fileobj=gzstream)
- logging.debug("Creating tar of rootfs")
- tar.add(fs_root, recursive=True)
- tar.close()
- logging.debug('Tar complete')
- except IOError as e:
- # Most probably due to SIGPIPE due to the send process
- # dying.
- logging.debug('Writing image data failed due to %s', e)
-
- @contextlib.contextmanager
- def send():
- hostname = ssh_host.split('@')[-1]
- tarpipe_read_file = os.fdopen(tarpipe_read, 'r')
- process = subprocess.Popen(
- ['nc', hostname, port],
- stdin=tarpipe_read_file,
- stderr=subprocess.PIPE)
- try:
- yield process
- except BaseException as e:
- if process.poll() is None:
- logging.debug('Killing send process due to %s', e)
- process.kill()
- raise
- else:
- process.terminate()
- finally:
- process.wait()
- tarpipe_read_file.close()
-
- @contextlib.contextmanager
- def receive():
- # Open subprocess to pipe the tar file using netcat
- receiver_cmd = cliapp.shell_quote(
- 'nc -l -p %s > /tmp/sam-docker.img' % port)
- #'nc -l -p %s | sudo docker import - %s' % (port, image_name))
- logging.debug('Runcmd: %s', receiver_cmd)
- # -t just so I can run sudo at the other end for now
- process = subprocess.Popen(
- ['ssh', '-oNumberOfPasswordPrompts=0', ssh_host, 'sh', '-c', receiver_cmd],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- try:
- yield process
- except BaseException as e:
- if process.poll() is None:
- logging.debug('Killing receive process due to %s', e)
- process.kill()
- raise
- finally:
- process.wait()
-
- with receive() as receive_process:
- # Oh god! WAit for the netcat process to start ... we hope.
- time.sleep(1)
- with send() as send_process:
- tar_thread = threading.Thread(
- name='tar-create', target=create_tar)
- print "Starting to create the tar ..."
- tar_thread.start()
- while tar_thread.is_alive():
- time.sleep(1)
- print "Asleep"
-
- code = receive_process.poll()
- if code is not None:
- output = receive_process.stderr.read()
- output += '\n' + receive_process.stdout.read()
- raise cliapp.AppException(
- 'Receive process exited with code %i, output %s' %
- (code, output))
-
- code = send_process.poll()
- if code is not None:
- output = send_process.stderr.read()
- raise cliapp.AppException(
- 'Send process exited with code %i, output %s' %
- (code, output))
-
- # FIXME: make path relative using filter
- # You can't do this, of course, because the buffer of the pipe gets
- # full ....
- # What if you did a netcat locally to test??? That's a good idea!
-
- print "OK!"
+ return x.username, x.hostname, x.port, x.path[1:]
+
+ def create_docker_client_with_remote_ssh_tunnel(self, user, host, port):
+ # Taken from: https://gist.github.com/hamiltont/10950399
+ # Local bind port is randomly chosen.
+
+ #tunnel = bgtunnel.open(
+ # ssh_user=user,
+ # ssh_address=host,
+ # host_port=port,
+ # expect_hello=False,
+ # # Block for 5 seconds then fail
+ # timeout=5,
+ # # Work around 'TypeError: must be encoded string without NULL
+ # # bytes, not str'. This is due to a bug in bgtunnel where it
+ # # fetches the SSH path as a Unicode string, then passes it to
+ # # shlex.split() which returns something horrid. Should be
+ # # fixed and the patch sent upstream.
+ # ssh_path=str('/usr/bin/ssh'))
+
+ #docker_client = docker.Client(
+ # base_url='http://127.0.0.1:%d' % tunnel.bind_port)
+
+ # FIXME: bgtunnel seems broken, do this manually for now in a separate
+ # terminal:
+ # /usr/bin/ssh -T -p 22 -L 127.0.0.1:57714:127.0.0.1:2375 sam@droopy
+
+ docker_client = docker.Client(
+ base_url='http://127.0.0.1:57714')
+
+ return docker_client
+
+ def write_system_as_tar(self, fs_root, fileobj):
+ # Using tarfile.TarFile.gzopen() and passing compresslevel=1
+ # seems to result in compresslevel=9 anyway. That's completely
+ # unusable on ARM CPUs so it's important to force
+ # compresslevel=1 or something low.
+ logging.debug('Writing system as a tar!')
+ #gzip_stream = gzip.GzipFile(
+ # mode='wb',
+ # compresslevel=1,
+ # fileobj=fileobj)
+ tar_stream = tarfile.TarFile.gzopen(
+ name='docker.write-temp',
+ mode='w',
+ compresslevel=1,
+ fileobj=fileobj)#gzip_stream)
+ logging.debug("Creating tar of rootfs")
+ tar_stream.add(fs_root, recursive=True)
+ tar_stream.close()
+ logging.debug('Tar complete')
+ tar_finished = True
DockerWriteExtension().run()