summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Coldrick <adam.coldrick@codethink.co.uk>2015-02-03 17:40:29 +0000
committerRichard Maw <richard.maw@codethink.co.uk>2015-04-22 10:06:51 +0000
commite258076f555ef5e66aa5888cbfc23bb40e50e72b (patch)
treee9ccd0f282993c86a69627214e33e87dc898b7f5
parentaa6dfcbb70c03dfeb3f9af02283aa1ab83667162 (diff)
downloadmorph-e258076f555ef5e66aa5888cbfc23bb40e50e72b.tar.gz
Use OSTree for hardlink and artifact cache and a CoW unionfs to make system artifacts fasterbaserock/richardmaw/ostree-squash
This replaces the artifact cache and the hardlink cache with an OSTree repository, which is a great performance improvement when the cache directory and temporary directory are on the same filesystem. Additionally it can de-duplicate file contents. When we construct system artifacts deploy them, the staging area needs to be writable, so OSTree on its own is insufficient, as its hardlinks require the root to be kept read-only. To handle this we use either the in-kernel overlayfs or unionfs-fuse, though there is no automatic fall-back and it needs to be specified manually. To support distributed building, the artifact cache is extended to support an OSTree repository. Unfortunately cross-bootstrap is not expected to work with these changes at this point in time. IMPORTANT NOTE: We are well aware that this patch is too large to be comprehensible. We intend to revert and apply a cleaned up series when it is ready. Change-Id: I693bb752500dab3c6db3b97393689239ae7071a8
-rwxr-xr-xmorph-cache-server124
-rw-r--r--morphlib/__init__.py2
-rw-r--r--morphlib/app.py30
-rw-r--r--morphlib/bins.py59
-rw-r--r--morphlib/bins_tests.py98
-rw-r--r--morphlib/buildcommand.py19
-rw-r--r--morphlib/builder.py131
-rw-r--r--morphlib/builder_tests.py18
-rw-r--r--morphlib/fsutils.py24
-rw-r--r--morphlib/localartifactcache.py16
-rw-r--r--morphlib/ostree.py178
-rw-r--r--morphlib/ostreeartifactcache.py301
-rw-r--r--morphlib/plugins/build_plugin.py5
-rw-r--r--morphlib/plugins/deploy_plugin.py120
-rw-r--r--morphlib/plugins/distbuild_plugin.py18
-rw-r--r--morphlib/plugins/gc_plugin.py7
-rw-r--r--morphlib/plugins/ostree_artifacts_plugin.py169
-rw-r--r--morphlib/remoteartifactcache.py25
-rw-r--r--morphlib/stagingarea.py87
-rw-r--r--morphlib/stagingarea_tests.py58
-rw-r--r--morphlib/util.py64
-rwxr-xr-xostree-repo-server15
-rwxr-xr-xtests.build/build-chunk-writes-log.script16
-rwxr-xr-xtests.build/build-stratum-with-submodules.script13
-rw-r--r--tests.build/build-stratum-with-submodules.stdout3
-rwxr-xr-xtests.build/build-system-autotools.script12
-rw-r--r--tests.build/build-system-autotools.stdout4
-rwxr-xr-xtests.build/build-system-cmake.script13
-rw-r--r--tests.build/build-system-cmake.stdout2
-rwxr-xr-xtests.build/build-system-cpan.script13
-rwxr-xr-xtests.build/build-system-python-distutils.script17
-rw-r--r--tests.build/build-system-python-distutils.stdout10
-rwxr-xr-xtests.build/build-system.script13
-rw-r--r--tests.build/build-system.stdout5
-rwxr-xr-xtests.build/cross-bootstrap.script3
-rwxr-xr-xtests.build/morphless-chunks.script14
-rwxr-xr-xtests.build/prefix.script13
-rwxr-xr-xtests.build/rebuild-cached-stratum.script4
-rw-r--r--tests.build/rebuild-cached-stratum.stdout12
-rw-r--r--without-test-modules3
-rw-r--r--yarns/architecture.yarn16
-rw-r--r--yarns/implementations.yarn19
-rw-r--r--yarns/morph.shell-lib3
43 files changed, 1310 insertions, 466 deletions
diff --git a/morph-cache-server b/morph-cache-server
index 007cfbe8..938ecb1f 100755
--- a/morph-cache-server
+++ b/morph-cache-server
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright (C) 2013, 2014-2015 Codethink Limited
+# Copyright (C) 2013-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -27,6 +27,9 @@ import shutil
from bottle import Bottle, request, response, run, static_file
from flup.server.fcgi import WSGIServer
from morphcacheserver.repocache import RepoCache
+from morphlib.artifactcachereference import ArtifactCacheReference
+from morphlib.ostreeartifactcache import OSTreeArtifactCache
+from morphlib.remoteartifactcache import RemoteArtifactCache
defaults = {
@@ -34,6 +37,8 @@ defaults = {
'bundle-dir': '/var/cache/morph-cache-server/bundles',
'artifact-dir': '/var/cache/morph-cache-server/artifacts',
'port': 8080,
+ 'ostree-port': 12324,
+ 'ostree-repo-mode': 'archive_z2',
}
@@ -44,6 +49,10 @@ class MorphCacheServer(cliapp.Application):
'port to listen on',
metavar='PORTNUM',
default=defaults['port'])
+ self.settings.integer(['ostree-port'],
+ 'port for accessing the ostree repo for '
+ 'the artifact cache',
+ default=defaults['ostree-port'])
self.settings.string(['port-file'],
'write port number to FILE',
metavar='FILE',
@@ -60,6 +69,11 @@ class MorphCacheServer(cliapp.Application):
'path to the artifact cache directory',
metavar='PATH',
default=defaults['artifact-dir'])
+ self.settings.string(['ostree-repo-mode'],
+ 'mode of the ostree artifact cache - either '
+ 'archive_z2 or bare, for servers and users '
+ 'respectively.',
+ default=defaults['ostree-repo-mode'])
self.settings.boolean(['direct-mode'],
'cache directories are directly managed')
self.settings.boolean(['enable-writes'],
@@ -68,50 +82,20 @@ class MorphCacheServer(cliapp.Application):
'runs a fcgi-server',
default=True)
-
- def _fetch_artifact(self, url, filename):
- in_fh = None
- try:
- in_fh = urllib2.urlopen(url)
- with open(filename, "w") as localtmp:
- shutil.copyfileobj(in_fh, localtmp)
- in_fh.close()
- except Exception, e:
- if in_fh is not None:
- in_fh.close()
- raise
- else:
- if in_fh is not None:
- in_fh.close()
- return os.stat(filename)
-
def _fetch_artifacts(self, server, cacheid, artifacts):
ret = {}
+ cache = OSTreeArtifactCache(self.settings['artifact-dir'],
+ mode=self.settings['ostree-repo-mode'])
+ remote = RemoteArtifactCache('http://%s/' % server)
try:
for artifact in artifacts:
- artifact_name = "%s.%s" % (cacheid, artifact)
- tmpname = os.path.join(self.settings['artifact-dir'],
- ".dl.%s" % artifact_name)
- url = "http://%s/1.0/artifacts?filename=%s" % (
- server, urllib.quote(artifact_name))
- stinfo = self._fetch_artifact(url, tmpname)
- ret[artifact_name] = {
- "size": stinfo.st_size,
- "used": stinfo.st_blocks * 512,
- }
+ logging.debug('%s.%s' % (cacheid, artifact))
+ cache_artifact = ArtifactCacheReference(
+ '.'.join((cacheid, artifact)))
+ cache.copy_from_remote(cache_artifact, remote)
except Exception, e:
- for artifact in ret.iterkeys():
- os.unlink(os.path.join(self.settings['artifact-dir'],
- ".dl.%s" % artifact))
+ logging.debug('OSTree raised an Exception: %s' % e)
raise
-
- for artifact in ret.iterkeys():
- tmpname = os.path.join(self.settings['artifact-dir'],
- ".dl.%s" % artifact)
- artifilename = os.path.join(self.settings['artifact-dir'],
- artifact)
- os.rename(tmpname, artifilename)
-
return ret
@@ -172,7 +156,6 @@ class MorphCacheServer(cliapp.Application):
response.set_header('Cache-Control', 'no-cache')
artifacts = artifacts.split(",")
return self._fetch_artifacts(host, cacheid, artifacts)
-
except Exception, e:
response.status = 500
logging.debug('%s' % e)
@@ -298,11 +281,38 @@ class MorphCacheServer(cliapp.Application):
@app.get('/artifacts')
def artifact():
basename = self._unescape_parameter(request.query.filename)
- filename = os.path.join(self.settings['artifact-dir'], basename)
- if os.path.exists(filename):
- return static_file(basename,
- root=self.settings['artifact-dir'],
- download=True)
+ cache = OSTreeArtifactCache(self.settings['artifact-dir'],
+ mode=self.settings['ostree-repo-mode'])
+ try:
+ cachekey, kind, name = basename.split('.', 2)
+ a = ArtifactCacheReference(basename)
+ except ValueError:
+ # We can't split the name as expected, we want metadata
+ cachekey, metadata_name = basename.split('.', 1)
+ logging.debug('Looking for artifact metadata: %s'
+ % metadata_name)
+ a = ArtifactCacheReference(cachekey)
+ if cache.has_artifact_metadata(a, metadata_name):
+ filename = cache._artifact_metadata_filename(
+ a, metadata_name)
+ return static_file(basename,
+ root=self.settings['artifact-dir'],
+ download=True)
+ else:
+ response.status = 404
+ logging.debug('artifact metadata %s does not exist'
+ % metadata_name)
+
+ if cache.has(a):
+ if kind == 'stratum':
+ logging.debug('Stratum %s is in the cache' % name)
+ return static_file(basename,
+ root=self.settings['artifact-dir'],
+ download=True)
+ else:
+ response.status = 500
+ logging.error('use `ostree pull` to get non-stratum '
+ 'artifacts')
else:
response.status = 404
logging.debug('artifact %s does not exist' % basename)
@@ -318,24 +328,34 @@ class MorphCacheServer(cliapp.Application):
logging.debug('Received a POST request for /artifacts')
- for artifact in artifacts:
- if artifact.startswith('/'):
+ cache = OSTreeArtifactCache(self.settings['artifact-dir'],
+ mode=self.settings['ostree-repo-mode'])
+ for basename in artifacts:
+ if basename.startswith('/'):
response.status = 500
logging.error("%s: artifact name cannot start with a '/'"
- % artifact)
+ % basename)
return
- filename = os.path.join(self.settings['artifact-dir'],
- artifact)
- results[artifact] = os.path.exists(filename)
+ a = ArtifactCacheReference(basename)
+ results[basename] = cache.has(a)
- if results[artifact]:
+ if results[basename]:
logging.debug('%s is in the cache', artifact)
else:
logging.debug('%s is NOT in the cache', artifact)
return results
+ @app.get('/method')
+ def method():
+ return 'ostree'
+
+ @app.get('/ostreeinfo')
+ def ostree_info():
+ logging.debug('returning %s' % self.settings['ostree-port'])
+ return str(self.settings['ostree-port'])
+
root = Bottle()
root.mount(app, '/1.0')
diff --git a/morphlib/__init__.py b/morphlib/__init__.py
index 0c9284d8..79e829a4 100644
--- a/morphlib/__init__.py
+++ b/morphlib/__init__.py
@@ -72,6 +72,8 @@ import morphologyfinder
import morphology
import morphloader
import morphset
+import ostree
+import ostreeartifactcache
import remoteartifactcache
import remoterepocache
import repoaliasresolver
diff --git a/morphlib/app.py b/morphlib/app.py
index 293b8517..2ec98dab 100644
--- a/morphlib/app.py
+++ b/morphlib/app.py
@@ -125,6 +125,22 @@ class Morph(cliapp.Application):
metavar='URL',
default=None,
group=group_advanced)
+ self.settings.string(['union-filesystem'],
+ 'filesystem used to provide "union filesystem" '
+ 'functionality when building and deploying. '
+ 'Only "overlayfs" and "unionfs-fuse" are '
+ 'supported at this time.',
+ default='overlayfs',
+ group=group_advanced)
+ self.settings.string(['ostree-repo-mode'],
+ 'Mode for OSTree artifact cache repository. If '
+ 'things will need to pull from your cache, this '
+ 'needs to be "archive_z2". Otherwise use '
+ '"bare". Note that archive_z2 will cause things '
+ 'involving the artifact cache (building and/or '
+ 'deploying) to be slow.',
+ default='bare',
+ group=group_advanced)
group_build = 'Build Options'
self.settings.integer(['max-jobs'],
@@ -159,12 +175,19 @@ class Morph(cliapp.Application):
'or /tmp because those are used internally '
'by things that cannot be on NFS, but '
'this setting can point at a directory in '
- 'NFS)',
+ 'NFS). If cachedir and tempdir are on separate '
+ 'filesystems, you will experience poor '
+ 'performance when building and deploying '
+ 'systems.',
metavar='DIR',
default=None,
group=group_storage)
self.settings.string(['cachedir'],
- 'cache git repositories and build results in DIR',
+ 'cache git repositories and build results in DIR.'
+ 'If cachedir and tempdir are on separate '
+ 'filesystems, you will experience poor '
+ 'performance when building and deploying '
+ 'systems.',
metavar='DIR',
group=group_storage,
default=defaults['cachedir'])
@@ -273,8 +296,7 @@ class Morph(cliapp.Application):
sys.exit(0)
tmpdir = self.settings['tempdir']
- for required_dir in (os.path.join(tmpdir, 'chunks'),
- os.path.join(tmpdir, 'staging'),
+ for required_dir in (os.path.join(tmpdir, 'staging'),
os.path.join(tmpdir, 'failed'),
os.path.join(tmpdir, 'deployments'),
self.settings['cachedir']):
diff --git a/morphlib/bins.py b/morphlib/bins.py
index 2e8ba0b3..c5bacc26 100644
--- a/morphlib/bins.py
+++ b/morphlib/bins.py
@@ -78,12 +78,8 @@ if sys.version_info < (2, 7, 3): # pragma: no cover
raise ExtractError("could not change owner")
tarfile.TarFile.chown = fixed_chown
-def create_chunk(rootdir, f, include, dump_memory_profile=None):
- '''Create a chunk from the contents of a directory.
-
- ``f`` is an open file handle, to which the tar file is written.
-
- '''
+def create_chunk(rootdir, chunkdir, include, dump_memory_profile=None):
+ '''Create a chunk from the contents of a directory.'''
dump_memory_profile = dump_memory_profile or (lambda msg: None)
@@ -91,31 +87,42 @@ def create_chunk(rootdir, f, include, dump_memory_profile=None):
# chunk artifact. This is useful to avoid problems from smallish
# clock skew. It needs to be recent enough, however, that GNU tar
# does not complain about an implausibly old timestamp.
- normalized_timestamp = 683074800
+ normalized_timestamp = (683074800, 683074800)
dump_memory_profile('at beginning of create_chunk')
-
- path_pairs = [(relname, os.path.join(rootdir, relname))
- for relname in include]
- tar = tarfile.open(fileobj=f, mode='w')
- for relname, filename in path_pairs:
+
+ def check_parent(name, paths):
+ parent = os.path.dirname(name)
+ if parent:
+ path = os.path.join(rootdir, parent)
+ if parent != rootdir and path not in paths:
+ paths.append(path)
+ check_parent(parent, paths)
+
+ def filter_contents(dirname, filenames):
+ paths = [os.path.join(rootdir, relname) for relname in include]
+ for name in include:
+ check_parent(name, paths)
+
+ return [f for f in filenames if os.path.join(dirname, f) not in paths]
+
+ logging.debug('Copying artifact into %s.' % chunkdir)
+ shutil.copytree(rootdir, chunkdir,
+ symlinks=True, ignore=filter_contents)
+
+ path_triplets = [(relname, os.path.join(chunkdir, relname),
+ os.path.join(rootdir, relname))
+ for relname in include]
+ for relname, filename, orig in path_triplets:
# Normalize mtime for everything.
- tarinfo = tar.gettarinfo(filename,
- arcname=relname)
- tarinfo.ctime = normalized_timestamp
- tarinfo.mtime = normalized_timestamp
- if tarinfo.isreg():
- with open(filename, 'rb') as f:
- tar.addfile(tarinfo, fileobj=f)
- else:
- tar.addfile(tarinfo)
- tar.close()
+ if not os.path.islink(filename):
+ os.utime(filename, normalized_timestamp)
- for relname, filename in reversed(path_pairs):
- if os.path.isdir(filename) and not os.path.islink(filename):
+ for relname, filename, orig in reversed(path_triplets):
+ if os.path.isdir(orig) and not os.path.islink(orig):
continue
else:
- os.remove(filename)
+ os.remove(orig)
dump_memory_profile('after removing in create_chunks')
@@ -209,7 +216,7 @@ def unpack_binary_from_file(f, dirname): # pragma: no cover
tf.close()
-def unpack_binary(filename, dirname):
+def unpack_binary(filename, dirname): # pragma: no cover
with open(filename, "rb") as f:
unpack_binary_from_file(f, dirname)
diff --git a/morphlib/bins_tests.py b/morphlib/bins_tests.py
index 3895680f..879aada4 100644
--- a/morphlib/bins_tests.py
+++ b/morphlib/bins_tests.py
@@ -78,11 +78,9 @@ class ChunkTests(BinsTest):
self.tempdir = tempfile.mkdtemp()
self.instdir = os.path.join(self.tempdir, 'inst')
self.chunk_file = os.path.join(self.tempdir, 'chunk')
- self.chunk_f = open(self.chunk_file, 'wb')
self.unpacked = os.path.join(self.tempdir, 'unpacked')
def tearDown(self):
- self.chunk_f.close()
shutil.rmtree(self.tempdir)
def populate_instdir(self):
@@ -108,109 +106,21 @@ class ChunkTests(BinsTest):
def create_chunk(self, includes):
self.populate_instdir()
- morphlib.bins.create_chunk(self.instdir, self.chunk_f, includes)
- self.chunk_f.flush()
-
- def unpack_chunk(self):
- os.mkdir(self.unpacked)
- morphlib.bins.unpack_binary(self.chunk_file, self.unpacked)
+ morphlib.bins.create_chunk(self.instdir, self.chunk_file, includes)
def test_empties_files(self):
self.create_chunk(['bin/foo', 'lib/libfoo.so'])
self.assertEqual([x for x, y in self.recursive_lstat(self.instdir)],
['.', 'bin', 'lib'])
- def test_creates_and_unpacks_chunk_exactly(self):
+ def test_creates_chunk_exactly(self):
self.create_chunk(['bin', 'bin/foo', 'lib', 'lib/libfoo.so'])
- self.unpack_chunk()
self.assertEqual(self.instdir_orig_files,
- self.recursive_lstat(self.unpacked))
+ self.recursive_lstat(self.chunk_file))
def test_uses_only_matching_names(self):
self.create_chunk(['bin/foo'])
- self.unpack_chunk()
- self.assertEqual([x for x, y in self.recursive_lstat(self.unpacked)],
+ self.assertEqual([x for x, y in self.recursive_lstat(self.chunk_file)],
['.', 'bin', 'bin/foo'])
self.assertEqual([x for x, y in self.recursive_lstat(self.instdir)],
['.', 'bin', 'lib', 'lib/libfoo.so'])
-
- def test_does_not_compress_artifact(self):
- self.create_chunk(['bin'])
- f = gzip.open(self.chunk_file)
- self.assertRaises(IOError, f.read)
- f.close()
-
-
-class ExtractTests(unittest.TestCase):
-
- def setUp(self):
- self.tempdir = tempfile.mkdtemp()
- self.instdir = os.path.join(self.tempdir, 'inst')
- self.unpacked = os.path.join(self.tempdir, 'unpacked')
-
- def tearDown(self):
- shutil.rmtree(self.tempdir)
-
- def create_chunk(self, callback):
- fh = StringIO.StringIO()
- os.mkdir(self.instdir)
- patterns = callback(self.instdir)
- morphlib.bins.create_chunk(self.instdir, fh, patterns)
- shutil.rmtree(self.instdir)
- fh.flush()
- fh.seek(0)
- return fh
-
- def test_extracted_files_replace_links(self):
- def make_linkfile(basedir):
- with open(os.path.join(basedir, 'babar'), 'w') as f:
- pass
- os.symlink('babar', os.path.join(basedir, 'bar'))
- return ['babar']
- linktar = self.create_chunk(make_linkfile)
-
- def make_file(basedir):
- with open(os.path.join(basedir, 'bar'), 'w') as f:
- pass
- return ['bar']
- filetar = self.create_chunk(make_file)
-
- os.mkdir(self.unpacked)
- morphlib.bins.unpack_binary_from_file(linktar, self.unpacked)
- morphlib.bins.unpack_binary_from_file(filetar, self.unpacked)
- mode = os.lstat(os.path.join(self.unpacked, 'bar')).st_mode
- self.assertTrue(stat.S_ISREG(mode))
-
- def test_extracted_dirs_keep_links(self):
- def make_usrlink(basedir):
- os.symlink('.', os.path.join(basedir, 'usr'))
- return ['usr']
- linktar = self.create_chunk(make_usrlink)
-
- def make_usrdir(basedir):
- os.mkdir(os.path.join(basedir, 'usr'))
- return ['usr']
- dirtar = self.create_chunk(make_usrdir)
-
- morphlib.bins.unpack_binary_from_file(linktar, self.unpacked)
- morphlib.bins.unpack_binary_from_file(dirtar, self.unpacked)
- mode = os.lstat(os.path.join(self.unpacked, 'usr')).st_mode
- self.assertTrue(stat.S_ISLNK(mode))
-
- def test_extracted_files_follow_links(self):
- def make_usrlink(basedir):
- os.symlink('.', os.path.join(basedir, 'usr'))
- return ['usr']
- linktar = self.create_chunk(make_usrlink)
-
- def make_usrdir(basedir):
- os.mkdir(os.path.join(basedir, 'usr'))
- with open(os.path.join(basedir, 'usr', 'foo'), 'w') as f:
- pass
- return ['usr', 'usr/foo']
- dirtar = self.create_chunk(make_usrdir)
-
- morphlib.bins.unpack_binary_from_file(linktar, self.unpacked)
- morphlib.bins.unpack_binary_from_file(dirtar, self.unpacked)
- mode = os.lstat(os.path.join(self.unpacked, 'foo')).st_mode
- self.assertTrue(stat.S_ISREG(mode))
diff --git a/morphlib/buildcommand.py b/morphlib/buildcommand.py
index fd5acdf5..a4670f0a 100644
--- a/morphlib/buildcommand.py
+++ b/morphlib/buildcommand.py
@@ -74,7 +74,8 @@ class BuildCommand(object):
This includes creating the directories on disk if they are missing.
'''
- return morphlib.util.new_artifact_caches(self.app.settings)
+ return morphlib.util.new_artifact_caches(
+ self.app.settings, status_cb=self.app.status)
def new_repo_caches(self):
return morphlib.util.new_repo_caches(self.app)
@@ -310,9 +311,8 @@ class BuildCommand(object):
self.build_source(source, build_env)
for a in artifacts:
- self.app.status(msg='%(kind)s %(name)s is cached at %(cachepath)s',
+ self.app.status(msg='%(kind)s %(name)s is cached.',
kind=source.morphology['kind'], name=a.name,
- cachepath=self.lac.artifact_filename(a),
chatty=(source.morphology['kind'] != "system"))
def build_source(self, source, build_env):
@@ -422,8 +422,10 @@ class BuildCommand(object):
# module into morphlib.remoteartififactcache first.
to_fetch = []
if not self.lac.has(artifact):
- to_fetch.append((self.rac.get(artifact),
- self.lac.put(artifact)))
+ self.app.status(
+ msg='Fetching to local cache: artifact %(name)s',
+ name=artifact.name)
+ self.lac.copy_from_remote(artifact, self.rac)
if artifact.source.morphology.needs_artifact_metadata_cached:
if not self.lac.has_artifact_metadata(artifact, 'meta'):
@@ -432,9 +434,6 @@ class BuildCommand(object):
self.lac.put_artifact_metadata(artifact, 'meta')))
if len(to_fetch) > 0:
- self.app.status(
- msg='Fetching to local cache: artifact %(name)s',
- name=artifact.name)
fetch_files(to_fetch)
def create_staging_area(self, build_env, use_chroot=True, extra_env={},
@@ -492,13 +491,13 @@ class BuildCommand(object):
if artifact.source.build_mode == 'bootstrap':
if not self.in_same_stratum(artifact.source, target_source):
continue
+
self.app.status(
msg='Installing chunk %(chunk_name)s from cache %(cache)s',
chunk_name=artifact.name,
cache=artifact.source.cache_key[:7],
chatty=True)
- handle = self.lac.get(artifact)
- staging_area.install_artifact(handle)
+ staging_area.install_artifact(self.lac, artifact)
if target_source.build_mode == 'staging':
morphlib.builder.ldconfig(self.app.runcmd, staging_area.dirname)
diff --git a/morphlib/builder.py b/morphlib/builder.py
index 1c016674..426c0ed0 100644
--- a/morphlib/builder.py
+++ b/morphlib/builder.py
@@ -28,7 +28,6 @@ import tempfile
import cliapp
import morphlib
-from morphlib.artifactcachereference import ArtifactCacheReference
from morphlib.util import error_message_for_containerised_commandline
import morphlib.gitversion
@@ -125,11 +124,7 @@ def ldconfig(runcmd, rootdir): # pragma: no cover
def download_depends(constituents, lac, rac, metadatas=None):
for constituent in constituents:
if not lac.has(constituent):
- source = rac.get(constituent)
- target = lac.put(constituent)
- shutil.copyfileobj(source, target)
- target.close()
- source.close()
+ lac.copy_from_remote(constituent, rac)
if metadatas is not None:
for metadata in metadatas:
if not lac.has_artifact_metadata(constituent, metadata):
@@ -246,28 +241,6 @@ class ChunkBuilder(BuilderBase):
'''Build chunk artifacts.'''
- def create_devices(self, destdir): # pragma: no cover
- '''Creates device nodes if the morphology specifies them'''
- morphology = self.source.morphology
- perms_mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
- if 'devices' in morphology and morphology['devices'] is not None:
- for dev in morphology['devices']:
- destfile = os.path.join(destdir, './' + dev['filename'])
- mode = int(dev['permissions'], 8) & perms_mask
- if dev['type'] == 'c':
- mode = mode | stat.S_IFCHR
- elif dev['type'] == 'b':
- mode = mode | stat.S_IFBLK
- else:
- raise IOError('Cannot create device node %s,'
- 'unrecognized device type "%s"'
- % (destfile, dev['type']))
- self.app.status(msg="Creating device node %s"
- % destfile)
- os.mknod(destfile, mode,
- os.makedev(dev['major'], dev['minor']))
- os.chown(destfile, dev['uid'], dev['gid'])
-
def build_and_cache(self): # pragma: no cover
with self.build_watch('overall-build'):
@@ -286,7 +259,6 @@ class ChunkBuilder(BuilderBase):
try:
self.get_sources(builddir)
self.run_commands(builddir, destdir, temppath, stdout)
- self.create_devices(destdir)
os.rename(temppath, logpath)
except BaseException as e:
@@ -459,13 +431,23 @@ class ChunkBuilder(BuilderBase):
extra_files += ['baserock/%s.meta' % chunk_artifact_name]
parented_paths = parentify(file_paths + extra_files)
- with self.local_artifact_cache.put(chunk_artifact) as f:
- self.write_metadata(destdir, chunk_artifact_name,
- parented_paths)
+ self.write_metadata(destdir, chunk_artifact_name,
+ parented_paths)
- self.app.status(msg='Creating chunk artifact %(name)s',
- name=chunk_artifact_name)
- morphlib.bins.create_chunk(destdir, f, parented_paths)
+ self.app.status(msg='Creating chunk artifact %(name)s',
+ name=chunk_artifact_name)
+ # TODO: This is not concurrency safe, bins.create_chunk will
+ # fail if tempdir already exists (eg if another build
+ # has created it).
+ tempdir = os.path.join(self.app.settings['tempdir'],
+ chunk_artifact.basename())
+ try:
+ morphlib.bins.create_chunk(destdir, tempdir,
+ parented_paths)
+ self.local_artifact_cache.put(tempdir, chunk_artifact)
+ finally:
+ if os.path.isdir(tempdir):
+ shutil.rmtree(tempdir)
built_artifacts.append(chunk_artifact)
for dirname, subdirs, files in os.walk(destdir):
@@ -509,8 +491,13 @@ class StratumBuilder(BuilderBase):
[x.name for x in constituents])
with lac.put_artifact_metadata(a, 'meta') as f:
json.dump(meta, f, indent=4, sort_keys=True)
- with self.local_artifact_cache.put(a) as f:
+ # TODO: This is not concurrency safe, put_stratum_artifact
+ # deletes temp which could be in use by another
+ # build.
+ temp = os.path.join(self.app.settings['tempdir'], a.name)
+ with open(temp, 'w+') as f:
json.dump([c.basename() for c in constituents], f)
+ self.local_artifact_cache.put_non_ostree_artifact(a, temp)
self.save_build_times()
return self.source.artifacts.values()
@@ -532,64 +519,47 @@ class SystemBuilder(BuilderBase): # pragma: no cover
arch = self.source.morphology['arch']
for a_name, artifact in self.source.artifacts.iteritems():
- handle = self.local_artifact_cache.put(artifact)
-
try:
fs_root = self.staging_area.destdir(self.source)
- self.unpack_strata(fs_root)
- self.write_metadata(fs_root, a_name)
- self.run_system_integration_commands(fs_root)
- unslashy_root = fs_root[1:]
- def uproot_info(info):
- info.name = relpath(info.name, unslashy_root)
- if info.islnk():
- info.linkname = relpath(info.linkname,
- unslashy_root)
- return info
- tar = tarfile.open(fileobj=handle, mode="w", name=a_name)
- self.app.status(msg='Constructing tarball of rootfs',
- chatty=True)
- tar.add(fs_root, recursive=True, filter=uproot_info)
- tar.close()
+ upperdir = self.staging_area.overlay_upperdir(
+ self.source)
+ editable_root = self.staging_area.overlaydir(self.source)
+ workdir = os.path.join(self.staging_area.dirname,
+ 'overlayfs-workdir')
+ if not os.path.exists(workdir):
+ os.makedirs(workdir)
+ union_filesystem = self.app.settings['union-filesystem']
+ morphlib.fsutils.overlay_mount(self.app.runcmd,
+ 'overlay-%s' % a_name,
+ editable_root, fs_root,
+ upperdir, workdir,
+ union_filesystem)
+ try:
+ self.unpack_strata(fs_root)
+ self.write_metadata(editable_root, a_name)
+ self.run_system_integration_commands(editable_root)
+ self.local_artifact_cache.put(editable_root, artifact)
+ finally:
+ morphlib.fsutils.unmount(self.app.runcmd,
+ editable_root)
except BaseException as e:
logging.error(traceback.format_exc())
self.app.status(msg='Error while building system',
error=True)
- handle.abort()
raise
- else:
- handle.close()
self.save_build_times()
return self.source.artifacts.itervalues()
- def load_stratum(self, stratum_artifact):
- '''Load a stratum from the local artifact cache.
-
- Returns a list of ArtifactCacheReference instances for the chunks
- contained in the stratum.
-
- '''
- cache = self.local_artifact_cache
- with cache.get(stratum_artifact) as stratum_file:
- try:
- artifact_list = json.load(stratum_file,
- encoding='unicode-escape')
- except ValueError as e:
- raise cliapp.AppException(
- 'Corruption detected: %s while loading %s' %
- (e, cache.artifact_filename(stratum_artifact)))
- return [ArtifactCacheReference(a) for a in artifact_list]
-
def unpack_one_stratum(self, stratum_artifact, target):
'''Unpack a single stratum into a target directory'''
cache = self.local_artifact_cache
- for chunk in self.load_stratum(stratum_artifact):
- self.app.status(msg='Unpacking chunk %(basename)s',
+ chunks = morphlib.util.get_stratum_contents(cache, stratum_artifact)
+ for chunk in chunks:
+ self.app.status(msg='Checkout chunk %(basename)s',
basename=chunk.basename(), chatty=True)
- with cache.get(chunk) as chunk_file:
- morphlib.bins.unpack_binary_from_file(chunk_file, target)
+ cache.get(chunk, target)
target_metadata = os.path.join(
target, 'baserock', '%s.meta' % stratum_artifact.name)
@@ -600,7 +570,7 @@ class SystemBuilder(BuilderBase): # pragma: no cover
def unpack_strata(self, path):
'''Unpack strata into a directory.'''
- self.app.status(msg='Unpacking strata to %(path)s',
+ self.app.status(msg='Checking out strata to %(path)s',
path=path, chatty=True)
with self.build_watch('unpack-strata'):
for a_name, a in self.source.artifacts.iteritems():
@@ -612,7 +582,8 @@ class SystemBuilder(BuilderBase): # pragma: no cover
# download the chunk artifacts if necessary
for stratum_artifact in self.source.dependencies:
- chunks = self.load_stratum(stratum_artifact)
+ chunks = morphlib.util.get_stratum_contents(
+ self.local_artifact_cache, stratum_artifact)
download_depends(chunks,
self.local_artifact_cache,
self.remote_artifact_cache)
diff --git a/morphlib/builder_tests.py b/morphlib/builder_tests.py
index a571e3d0..b5e66521 100644
--- a/morphlib/builder_tests.py
+++ b/morphlib/builder_tests.py
@@ -105,8 +105,8 @@ class FakeArtifactCache(object):
def __init__(self):
self._cached = {}
- def put(self, artifact):
- return FakeFileHandle(self, (artifact.cache_key, artifact.name))
+ def put(self, artifact, directory):
+ self._cached[(artifact.cache_key, artifact.name)] = artifact.name
def put_artifact_metadata(self, artifact, name):
return FakeFileHandle(self, (artifact.cache_key, artifact.name, name))
@@ -114,7 +114,7 @@ class FakeArtifactCache(object):
def put_source_metadata(self, source, cachekey, name):
return FakeFileHandle(self, (cachekey, name))
- def get(self, artifact):
+ def get(self, artifact, directory=None):
return StringIO.StringIO(
self._cached[(artifact.cache_key, artifact.name)])
@@ -134,6 +134,10 @@ class FakeArtifactCache(object):
def has_source_metadata(self, source, cachekey, name):
return (cachekey, name) in self._cached
+ def copy_from_remote(self, artifact, remote):
+ self._cached[(artifact.cache_key, artifact.name)] = \
+ remote._cached[(artifact.cache_key, artifact.name)]
+
class BuilderBaseTests(unittest.TestCase):
@@ -191,9 +195,7 @@ class BuilderBaseTests(unittest.TestCase):
rac = FakeArtifactCache()
afacts = [FakeArtifact(name) for name in ('a', 'b', 'c')]
for a in afacts:
- fh = rac.put(a)
- fh.write(a.name)
- fh.close()
+ rac.put(a, 'not-a-dir')
morphlib.builder.download_depends(afacts, lac, rac)
self.assertTrue(all(lac.has(a) for a in afacts))
@@ -202,9 +204,7 @@ class BuilderBaseTests(unittest.TestCase):
rac = FakeArtifactCache()
afacts = [FakeArtifact(name) for name in ('a', 'b', 'c')]
for a in afacts:
- fh = rac.put(a)
- fh.write(a.name)
- fh.close()
+ rac.put(a, 'not-a-dir')
fh = rac.put_artifact_metadata(a, 'meta')
fh.write('metadata')
fh.close()
diff --git a/morphlib/fsutils.py b/morphlib/fsutils.py
index a3b73bf6..6e1adc90 100644
--- a/morphlib/fsutils.py
+++ b/morphlib/fsutils.py
@@ -46,14 +46,34 @@ def create_fs(runcmd, partition): # pragma: no cover
runcmd(['mkfs.btrfs', '-L', 'baserock', partition])
-def mount(runcmd, partition, mount_point, fstype=None): # pragma: no cover
+def mount(runcmd, partition, mount_point,
+ fstype=None, options=[]): # pragma: no cover
if not os.path.exists(mount_point):
os.mkdir(mount_point)
if not fstype:
fstype = []
else:
fstype = ['-t', fstype]
- runcmd(['mount', partition, mount_point] + fstype)
+ argv = ['mount', partition, mount_point] + fstype
+ for option in options:
+ argv.extend(('-o', option))
+ runcmd(argv)
+
+
+def overlay_mount(runcmd, partition, mount_point,
+ lowerdir, upperdir, workdir, method): # pragma: no cover
+ if method == 'overlayfs':
+ options = ['lowerdir=%s' % lowerdir, 'upperdir=%s' % upperdir,
+ 'workdir=%s' % workdir]
+ mount(runcmd, partition, mount_point, 'overlay', options)
+ elif method == 'unionfs-fuse':
+ if not os.path.exists(mount_point):
+ os.mkdir(mount_point)
+ dir_string = '%s=RW:%s=RO' % (upperdir, lowerdir)
+ runcmd(['unionfs', '-o', 'cow', '-o', 'hide_meta_files',
+ dir_string, mount_point])
+ else:
+ raise Exception('Union filesystem %s not supported' % method)
def unmount(runcmd, mount_point): # pragma: no cover
diff --git a/morphlib/localartifactcache.py b/morphlib/localartifactcache.py
index e6695c4e..1b834a32 100644
--- a/morphlib/localartifactcache.py
+++ b/morphlib/localartifactcache.py
@@ -129,11 +129,21 @@ class LocalArtifactCache(object):
returns a [(cache_key, set(artifacts), last_used)]
'''
+ def is_artifact(filename):
+ # This is just enough to avoid crashes from random unpacked
+ # directory trees and temporary files in the cachedir. A
+ # better mechanism is needed. It's not simple to tell
+ # OSFS.walkfiles() to do a non-recursive walk, sadly.
+ return '.' in filename and '/' not in filename
+
CacheInfo = collections.namedtuple('CacheInfo', ('artifacts', 'mtime'))
contents = collections.defaultdict(lambda: CacheInfo(set(), 0))
for filename in self.cachefs.walkfiles():
- cachekey = filename[:63]
- artifact = filename[65:]
+ # filenames are returned with a preceeding /.
+ filename = filename[1:]
+ if not is_artifact(filename): # pragma: no cover
+ continue
+ cachekey, artifact = filename.split('.', 1)
artifacts, max_mtime = contents[cachekey]
artifacts.add(artifact)
art_info = self.cachefs.getinfo(filename)
@@ -146,5 +156,5 @@ class LocalArtifactCache(object):
def remove(self, cachekey):
'''Remove all artifacts associated with the given cachekey.'''
for filename in (x for x in self.cachefs.walkfiles()
- if x.startswith(cachekey)):
+ if x[1:].startswith(cachekey)):
self.cachefs.remove(filename)
diff --git a/morphlib/ostree.py b/morphlib/ostree.py
new file mode 100644
index 00000000..ed2c59da
--- /dev/null
+++ b/morphlib/ostree.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2013-2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-2 =*=
+
+
+from gi.repository import OSTree
+from gi.repository import Gio
+from gi.repository import GLib
+
+import os
+import logging
+
+
+class OSTreeRepo(object):
+
+ """Class to wrap the OSTree API."""
+
+ G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS = Gio.FileQueryInfoFlags(1)
+ OSTREE_GIO_FAST_QUERYINFO = (
+ 'standard::name,'
+ 'standard::type,'
+ 'standard::size,'
+ 'standard::is-symlink,'
+ 'standard::symlink-target,'
+ 'unix::device,'
+ 'unix::inode,'
+ 'unix::mode,'
+ 'unix::uid,'
+ 'unix::gid,'
+ 'unix::rdev')
+
+ def __init__(self, path, disable_fsync=True, mode='bare'):
+ self.path = path
+ self.repo = self._open_repo(path, disable_fsync, mode)
+
+ def _open_repo(self, path, disable_fsync=True, mode='bare'):
+ """Create and open and OSTree.Repo, and return it."""
+ repo_dir = Gio.file_new_for_path(path)
+ repo = OSTree.Repo.new(repo_dir)
+ logging.debug('using %s' % mode)
+ if mode == 'bare':
+ mode = OSTree.RepoMode.BARE
+ elif mode == 'archive_z2':
+ mode = OSTree.RepoMode.ARCHIVE_Z2
+ else:
+ raise Exception('Mode %s is not supported' % mode)
+
+ try:
+ repo.open(None)
+ logging.debug('opened')
+ except GLib.GError:
+ if not os.path.exists(path):
+ os.makedirs(path)
+ logging.debug('failed to open, creating')
+ repo.create(mode, None)
+ repo.set_disable_fsync(disable_fsync)
+ return repo
+
+ def refsdir(self):
+ """Return the abspath to the refs/heads directory in the repo."""
+ return os.path.join(os.path.abspath(self.path), 'refs/heads')
+
+ def touch_ref(self, ref):
+ """Update the mtime of a ref file in repo/refs/heads."""
+ os.utime(os.path.join(self.refsdir(), ref), None)
+
+ def resolve_rev(self, branch, allow_noent=True):
+ """Return the SHA256 corresponding to 'branch'."""
+ return self.repo.resolve_rev(branch, allow_noent)[1]
+
+ def read_commit(self, branch):
+ """Return an OSTree.RepoFile representing a committed tree."""
+ return self.repo.read_commit(branch, None)[1]
+
+ def query_info(self, file_object):
+ """Quickly return a Gio.FileInfo for file_object."""
+ return file_object.query_info(self.OSTREE_GIO_FAST_QUERYINFO,
+ self.G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
+ None)
+
+ def checkout(self, branch, destdir):
+ """Checkout branch into destdir."""
+ checkout_path = destdir
+ if not os.path.exists(checkout_path):
+ os.makedirs(checkout_path)
+ checkout = Gio.file_new_for_path(checkout_path)
+
+ commit = self.read_commit(branch)
+ commit_info = self.query_info(commit)
+ self.repo.checkout_tree(OSTree.RepoCheckoutMode.NONE,
+ OSTree.RepoCheckoutOverwriteMode.UNION_FILES,
+ checkout, commit,commit_info, None)
+
+ def commit(self, subject, srcdir, branch, body=''):
+ """Commit the contents of 'srcdir' to 'branch'.
+
+ The subject parameter is the title of the commit message, and the
+ body parameter is the body of the commit message.
+
+ """
+ self.repo.prepare_transaction(None)
+ parent = self.resolve_rev(branch)
+ mtree = OSTree.MutableTree()
+ src = Gio.file_new_for_path(srcdir)
+ self.repo.write_directory_to_mtree(src, mtree, None, None)
+ root = self.repo.write_mtree(mtree, None)[1]
+ checksum = self.repo.write_commit(parent, subject, body,
+ None, root, None)[1]
+ self.repo.transaction_set_ref(None, branch, checksum)
+ stats = self.repo.commit_transaction(None)
+
+ def cat_file(self, ref, path):
+ """Return the file descriptor of path at ref."""
+ commit = self.read_commit(ref)
+ relative = commit.resolve_relative_path(path)
+ ret, content, etag = relative.load_contents()
+ return content
+
+ def list_refs(self, ref=None, resolved=False):
+ """Return a list of all refs in the repo."""
+ if ref:
+ refs = self.repo.list_refs(ref)[1]
+ else:
+ refs = self.repo.list_refs()[1]
+ if not resolved:
+ return refs.keys()
+ return refs
+
+ def delete_ref(self, ref):
+ """Remove refspec from the repo."""
+ if not self.list_refs(ref):
+ raise Exception("Failed to delete ref, it doesn't exist")
+ self.repo.set_ref_immediate(None, ref, None, None)
+
+ def prune(self):
+ """Remove unreachable objects from the repo."""
+ depth = -1 # no recursion limit
+ return self.repo.prune(OSTree.RepoPruneFlags.REFS_ONLY, depth, None)
+
+ def add_remote(self, name, url):
+ """Add a remote with a given name and url."""
+ options_type = GLib.VariantType.new('a{sv}')
+ options_builder = GLib.VariantBuilder.new(options_type)
+ options = options_builder.end()
+ self.repo.remote_add(name, url, options, None)
+
+ def remove_remote(self, name):
+ """Remove a remote with a given name."""
+ self.repo.remote_delete(name, None)
+
+ def get_remote_url(self, name):
+ """Return the URL for a remote."""
+ return self.repo.remote_get_url(name)[1]
+
+ def list_remotes(self):
+ """Return a list of all remotes for this repo."""
+ return self.repo.remote_list()
+
+ def has_remote(self, name):
+ """Return True if name is a remote for the repo."""
+ return name in self.list_remotes()
+
+ def pull(self, refs, remote):
+ """Pull ref from remote into the local repo."""
+ flags = OSTree.RepoPullFlags.NONE
+ self.repo.pull(remote, refs, flags, None, None)
diff --git a/morphlib/ostreeartifactcache.py b/morphlib/ostreeartifactcache.py
new file mode 100644
index 00000000..8176f499
--- /dev/null
+++ b/morphlib/ostreeartifactcache.py
@@ -0,0 +1,301 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import collections
+import contextlib
+import logging
+import os
+import shutil
+import stat
+import string
+import tarfile
+import tempfile
+
+import cliapp
+from gi.repository import GLib
+
+import morphlib
+from morphlib.artifactcachereference import ArtifactCacheReference
+
+
+class NotCachedError(morphlib.Error):
+
+ def __init__(self, ref):
+ self.msg = 'Failed to checkout %s from the artifact cache.' % ref
+
+
+class OSTreeArtifactCache(object):
+ """Class to provide the artifact cache API using an OSTree repo."""
+
+ def __init__(self, cachedir, mode='bare', status_cb=None):
+ repo_dir = os.path.join(cachedir, 'repo')
+ self.repo = morphlib.ostree.OSTreeRepo(repo_dir, mode=mode)
+ self.cachedir = cachedir
+ self.status_cb = status_cb
+
+ def status(self, *args, **kwargs):
+ if self.status_cb is not None:
+ self.status_cb(*args, **kwargs)
+
+ @contextlib.contextmanager
+ def _get_file_from_remote(self, artifact, remote, metadata_name=None):
+ if metadata_name:
+ handle = remote.get_artifact_metadata(artifact, metadata_name)
+ self.status(
+ msg='Downloading %(name)s %(metadata_name)s as a file.',
+ chatty=True, name=artifact.basename(),
+ metadata_name=metadata_name)
+ else:
+ handle = remote.get(artifact)
+ self.status(
+ msg='Downloading %(name)s as a tarball.', chatty=True,
+ name=artifact.basename())
+
+ try:
+ temporary_download = tempfile.NamedTemporaryFile(dir=self.cachedir)
+ shutil.copyfileobj(handle, temporary_download)
+ temporary_download.flush()
+ yield temporary_download.name
+ finally:
+ temporary_download.close()
+
+ def _get_artifact_cache_name(self, artifact):
+ valid_chars = string.digits + string.letters + '-._'
+ transl = lambda x: x if x in valid_chars else '_'
+ key = ''.join([transl(x) for x in artifact.basename()])
+ return key
+
+ def put(self, directory, artifact):
+ """Commit the contents of 'directory' to the repo.
+
+ This uses the artifact name and cache key to create the ref, so the
+ contents of directory should be the contents of the artifact.
+
+ """
+ cache_key, kind, name = artifact.basename().split('.', 2)
+ ref = self._get_artifact_cache_name(artifact)
+ subject = name
+ try:
+ self.status(
+ msg='Committing %(subject)s to artifact cache at %(ref)s.',
+ chatty=True, subject=subject, ref=ref)
+ self.repo.commit(subject, directory, ref)
+ except GLib.GError as e:
+ logging.debug('OSTree raised an exception: %s' % e)
+ raise cliapp.AppException('Failed to commit %s to artifact '
+ 'cache.' % ref)
+
+ def put_non_ostree_artifact(self, artifact, location, metadata_name=None):
+ """Store a single file in the artifact cachedir."""
+ if metadata_name:
+ filename = self._artifact_metadata_filename(artifact,
+ metadata_name)
+ else:
+ filename = self.artifact_filename(artifact)
+ shutil.copy(location, filename)
+
+ def _remove_device_nodes(self, path):
+ for dirpath, dirnames, filenames in os.walk(path):
+ for f in filenames:
+ filepath = os.path.join(dirpath, f)
+ mode = os.lstat(filepath).st_mode
+ if stat.S_ISBLK(mode) or stat.S_ISCHR(mode):
+ logging.debug('Removing device node %s from artifact' %
+ filepath)
+ os.remove(filepath)
+
+ def _copy_metadata_from_remote(self, artifact, remote):
+ """Copy a metadata file from a remote cache."""
+ a, name = artifact.basename().split('.', 1)
+ with self._get_file_from_remote(ArtifactCacheReference(a),
+ remote, name) as location:
+ self.put_non_ostree_artifact(ArtifactCacheReference(a),
+ location, name)
+
+ def copy_from_remote(self, artifact, remote):
+ """Get 'artifact' from remote artifact cache and store it locally.
+
+ This takes an Artifact object and a RemoteArtifactCache. Note that
+ `remote` here is not the same as a `remote` for and OSTree repo.
+
+ """
+ if remote.method == 'tarball':
+ with self._get_file_from_remote(artifact, remote) as location:
+ try:
+ cache_key, kind, name = artifact.basename().split('.', 2)
+ except ValueError:
+ # We can't split the name properly, it must be metadata!
+ self._copy_metadata_from_remote(artifact, remote)
+ return
+
+ if kind == 'stratum':
+ self.put_non_ostree_artifact(artifact, location)
+ return
+ try:
+ tempdir = tempfile.mkdtemp(dir=self.cachedir)
+ with tarfile.open(name=location) as tf:
+ tf.extractall(path=tempdir)
+ self._remove_device_nodes(tempdir)
+ self.put(tempdir, artifact)
+ except tarfile.ReadError:
+ # Reading the tarball failed, and we expected a
+ # tarball artifact. Something must have gone
+ # wrong.
+ raise
+ finally:
+ shutil.rmtree(tempdir)
+
+ elif remote.method == 'ostree':
+ self.status(msg='Pulling artifact for %(name)s from remote.',
+ chatty=True, name=artifact.basename())
+ ref = self._get_artifact_cache_name(artifact)
+ try:
+ cache_key, kind, name = ref.split('.', 2)
+ except ValueError:
+ # if we can't split the name properly, we must want metadata
+ self._copy_metadata_from_remote(artifact, remote)
+ return
+
+ if artifact.basename().split('.', 2)[1] == 'stratum':
+ with self._get_file_from_remote(artifact, remote) as location:
+ self.put_non_ostree_artifact(artifact, location)
+ return
+
+ try:
+ if not self.repo.has_remote(remote.name):
+ self.repo.add_remote(remote.name, remote.ostree_url)
+ self.repo.pull([ref], remote.name)
+ except GLib.GError as e:
+ logging.debug('OSTree raised an exception: %s' % e)
+ raise cliapp.AppException('Failed to pull %s from remote '
+ 'cache.' % ref)
+
+ def get(self, artifact, directory=None):
+ """Checkout an artifact from the repo and return its location."""
+ cache_key, kind, name = artifact.basename().split('.', 2)
+ if kind == 'stratum':
+ return self.artifact_filename(artifact)
+ if directory is None:
+ directory = tempfile.mkdtemp()
+ ref = self._get_artifact_cache_name(artifact)
+ try:
+ self.repo.checkout(ref, directory)
+ # We need to update the mtime and atime of the ref file in the
+ # repository so that we can decide which refs were least recently
+ # accessed when doing `morph gc`.
+ self.repo.touch_ref(ref)
+ except GLib.GError as e:
+ logging.debug('OSTree raised an exception: %s' % e)
+ raise NotCachedError(ref)
+ return directory
+
+ def list_contents(self):
+ """Return the set of sources cached and related information.
+
+ returns a [(cache_key, set(artifacts), last_used)]
+
+ """
+ CacheInfo = collections.namedtuple('CacheInfo', ('artifacts', 'mtime'))
+ contents = collections.defaultdict(lambda: CacheInfo(set(), 0))
+ for ref in self.repo.list_refs():
+ cachekey = ref[:63]
+ artifact = ref[65:]
+ artifacts, max_mtime = contents[cachekey]
+ artifacts.add(artifact)
+ ref_filename = os.path.join(self.repo.refsdir(), ref)
+ mtime = os.path.getmtime(ref_filename)
+ contents[cachekey] = CacheInfo(artifacts, max(max_mtime, mtime))
+ return ((cache_key, info.artifacts, info.mtime)
+ for cache_key, info in contents.iteritems())
+
+ def remove(self, cachekey):
+ """Remove all artifacts associated with the given cachekey."""
+ for ref in (r for r in self.repo.list_refs()
+ if r.startswith(cachekey)):
+ self.repo.delete_ref(ref)
+
+ def prune(self):
+ """Delete orphaned objects in the repo."""
+ self.repo.prune()
+
+ def has(self, artifact):
+ try:
+ cachekey, kind, name = artifact.basename().split('.', 2)
+ except ValueError:
+ # We couldn't split the basename properly, we must want metadata
+ cachekey, name = artifact.basename().split('.', 1)
+ if self.has_artifact_metadata(artifact, name):
+ return True
+ else:
+ return False
+
+ if kind == 'stratum':
+ if self._has_file(self.artifact_filename(artifact)):
+ return True
+ else:
+ return False
+
+ sha = self.repo.resolve_rev(self._get_artifact_cache_name(artifact))
+ if sha:
+ # We call touch_ref here to help `morph gc` work out which
+ # artifacts have been used most and least recently.
+ self.repo.touch_ref(self._get_artifact_cache_name(artifact))
+ return True
+ return False
+
+ def get_artifact_metadata(self, artifact, name):
+ filename = self._artifact_metadata_filename(artifact, name)
+ os.utime(filename, None)
+ return open(filename)
+
+ def get_source_metadata_filename(self, source, cachekey, name):
+ return self._source_metadata_filename(source, cachekey, name)
+
+ def get_source_metadata(self, source, cachekey, name):
+ filename = self._source_metadata_filename(source, cachekey, name)
+ os.utime(filename, None)
+ return open(filename)
+
+ def artifact_filename(self, artifact):
+ return os.path.join(self.cachedir, artifact.basename())
+
+ def _artifact_metadata_filename(self, artifact, name):
+ return os.path.join(self.cachedir, artifact.metadata_basename(name))
+
+ def _source_metadata_filename(self, source, cachekey, name):
+ return os.path.join(self.cachedir, '%s.%s' % (cachekey, name))
+
+ def put_artifact_metadata(self, artifact, name):
+ filename = self._artifact_metadata_filename(artifact, name)
+ return morphlib.savefile.SaveFile(filename, mode='w')
+
+ def put_source_metadata(self, source, cachekey, name):
+ filename = self._source_metadata_filename(source, cachekey, name)
+ return morphlib.savefile.SaveFile(filename, mode='w')
+
+ def _has_file(self, filename):
+ if os.path.exists(filename):
+ os.utime(filename, None)
+ return True
+ return False
+
+ def has_artifact_metadata(self, artifact, name):
+ filename = self._artifact_metadata_filename(artifact, name)
+ return self._has_file(filename)
+
+ def has_source_metadata(self, source, cachekey, name):
+ filename = self._source_metadata_filename(source, cachekey, name)
+ return self._has_file(filename)
diff --git a/morphlib/plugins/build_plugin.py b/morphlib/plugins/build_plugin.py
index b8569ff7..168f83c9 100644
--- a/morphlib/plugins/build_plugin.py
+++ b/morphlib/plugins/build_plugin.py
@@ -327,7 +327,6 @@ class BuildPlugin(cliapp.Plugin):
for name, component in components.iteritems():
component.build_env = root.build_env
bc.build_in_order(component)
- self.app.status(msg='%(kind)s %(name)s is cached at %(path)s',
+ self.app.status(msg='%(kind)s %(name)s is cached.',
kind=component.source.morphology['kind'],
- name=name,
- path=bc.lac.artifact_filename(component))
+ name=name)
diff --git a/morphlib/plugins/deploy_plugin.py b/morphlib/plugins/deploy_plugin.py
index ea84d9ec..231fa868 100644
--- a/morphlib/plugins/deploy_plugin.py
+++ b/morphlib/plugins/deploy_plugin.py
@@ -25,6 +25,15 @@ import warnings
import cliapp
import morphlib
+from morphlib.artifactcachereference import ArtifactCacheReference
+
+
+class NotYetBuiltError(morphlib.Error):
+
+ def __init__(self, name):
+ self.msg = ('Deployment failed as %s is not yet built.\n'
+ 'Please ensure the system is built before deployment.'
+ % name)
def configuration_for_system(system_id, vars_from_commandline,
@@ -419,6 +428,8 @@ class DeployPlugin(cliapp.Plugin):
system_status_prefix = '%s[%s]' % (old_status_prefix, system['morph'])
self.app.status_prefix = system_status_prefix
try:
+ system_tree = None
+
# Find the artifact to build
morph = morphlib.util.sanitise_morphology_path(system['morph'])
srcpool = build_command.create_source_pool(build_repo, ref, morph)
@@ -467,6 +478,9 @@ class DeployPlugin(cliapp.Plugin):
system_tree, deploy_location)
finally:
self.app.status_prefix = system_status_prefix
+ if system_tree and os.path.exists(system_tree):
+ morphlib.fsutils.unmount(self.app.runcmd, system_tree)
+ shutil.rmtree(system_tree)
finally:
self.app.status_prefix = old_status_prefix
@@ -525,46 +539,106 @@ class DeployPlugin(cliapp.Plugin):
except morphlib.extensions.ExtensionNotFoundError:
pass
+ def checkout_system(self, build_command, artifact, path):
+ """Checkout a system into `path`.
+
+ This checks out the system artifact into the directory given by
+ `path`. If the system is not in the local cache, it is first fetched
+ from the remote cache.
+
+ Raises a NotYetBuiltError if the system artifact isn't cached either
+ locally or remotely.
+
+ """
+ try:
+ self.app.status(msg='Checking out system for configuration')
+ build_command.cache_artifacts_locally([artifact])
+ build_command.lac.get(artifact, path)
+ self.create_device_nodes(artifact, path)
+ except (morphlib.ostreeartifactcache.NotCachedError,
+ morphlib.remoteartifactcache.GetError):
+ raise NotYetBuiltError(artifact.name)
+
+ self.app.status(
+ msg='System checked out at %(system_tree)s',
+ system_tree=path)
+
+ def create_device_nodes(self, artifact, path):
+ self.fix_chunk_build_mode(artifact)
+ for a in artifact.walk():
+ morph = a.source.morphology
+ if morph['kind'] == 'chunk' and \
+ morph['build-mode'] != 'bootstrap':
+ morphlib.util.create_devices(a.source.morphology, path)
+
+ def fix_chunk_build_mode(self, system_artifact):
+ """Give each chunk's in-memory morpholgy the correct build-mode."""
+ strata = set(a for a in system_artifact.walk()
+ if a.source.morphology['kind'] == 'stratum')
+ chunks = set(a for a in system_artifact.walk()
+ if a.source.morphology['kind'] == 'chunk')
+ for chunk in chunks:
+ for stratum in strata:
+ for spec in stratum.source.morphology['chunks']:
+ if chunk.source.morphology['name'] == spec['name']:
+ chunk.source.morphology['build-mode'] = \
+ spec['build-mode']
+
def setup_deploy(self, build_command, deploy_tempdir, root_repo_dir, ref,
artifact, deployment_type, location, env):
+ """Checkout the artifact, create metadata and return the location.
+
+ This checks out the system into a temporary directory, and then mounts
+ this temporary directory alongside a different temporary directory
+ using a union filesystem. This allows changes to be made without
+ touching the checked out artifacts. The deployment metadata file is
+ created and then the directory at which the two temporary directories
+ are mounted is returned.
+
+ """
# deployment_type, location and env are only used for saving metadata
- # Create a tempdir to extract the rootfs in
- system_tree = tempfile.mkdtemp(dir=deploy_tempdir)
+ deployment_dir = tempfile.mkdtemp(dir=deploy_tempdir)
+ # Create a tempdir to extract the rootfs in
+ system_tree = tempfile.mkdtemp(dir=deployment_dir)
+
+ # Create temporary directory for overlayfs
+ overlay_dir = os.path.join(deployment_dir,
+ '%s-upperdir' % artifact.name)
+ if not os.path.exists(overlay_dir):
+ os.makedirs(overlay_dir)
+ work_dir = os.path.join(deployment_dir, '%s-workdir' % artifact.name)
+ if not os.path.exists(work_dir):
+ os.makedirs(work_dir)
+
+ deploy_tree = os.path.join(deployment_dir,
+ 'overlay-deploy-%s' % artifact.name)
try:
- # Unpack the artifact (tarball) to a temporary directory.
- self.app.status(msg='Unpacking system for configuration')
-
- if build_command.lac.has(artifact):
- f = build_command.lac.get(artifact)
- elif build_command.rac.has(artifact):
- build_command.cache_artifacts_locally([artifact])
- f = build_command.lac.get(artifact)
- else:
- raise cliapp.AppException('Deployment failed as system is'
- ' not yet built.\nPlease ensure'
- ' the system is built before'
- ' deployment.')
- tf = tarfile.open(fileobj=f)
- tf.extractall(path=system_tree)
+ self.checkout_system(build_command, artifact, system_tree)
- self.app.status(
- msg='System unpacked at %(system_tree)s',
- system_tree=system_tree)
+ union_filesystem = self.app.settings['union-filesystem']
+ morphlib.fsutils.overlay_mount(self.app.runcmd,
+ 'overlay-deploy-%s' %
+ artifact.name,
+ deploy_tree, system_tree,
+ overlay_dir, work_dir,
+ union_filesystem)
self.app.status(
msg='Writing deployment metadata file')
metadata = self.create_metadata(
artifact, root_repo_dir, deployment_type, location, env)
metadata_path = os.path.join(
- system_tree, 'baserock', 'deployment.meta')
+ deploy_tree, 'baserock', 'deployment.meta')
with morphlib.savefile.SaveFile(metadata_path, 'w') as f:
json.dump(metadata, f, indent=4,
sort_keys=True, encoding='unicode-escape')
- return system_tree
+ return deploy_tree
except Exception:
- shutil.rmtree(system_tree)
+ if deploy_tree and os.path.exists(deploy_tree):
+ morphlib.fsutils.unmount(self.app.runcmd, deploy_tree)
+ shutil.rmtree(deployment_dir)
raise
def run_deploy_commands(self, deploy_tempdir, env, artifact, root_repo_dir,
diff --git a/morphlib/plugins/distbuild_plugin.py b/morphlib/plugins/distbuild_plugin.py
index 8aaead10..708ffee1 100644
--- a/morphlib/plugins/distbuild_plugin.py
+++ b/morphlib/plugins/distbuild_plugin.py
@@ -15,10 +15,13 @@
# with this program. If not, see <http://www.gnu.org/licenses/>.
-import cliapp
import logging
import re
import sys
+import urllib2
+import urlparse
+
+import cliapp
import morphlib
import distbuild
@@ -26,6 +29,13 @@ import distbuild
group_distbuild = 'Distributed Build Options'
+
+class OutdatedCacheServerError(morphlib.Error):
+ def __init__(self):
+ self.msg = 'Writeable cache server is using an outdated version of ' \
+ 'morph-cache-server which is incompatible with this ' \
+ 'version of morph.'
+
class DistbuildOptionsPlugin(cliapp.Plugin):
def enable(self):
@@ -291,6 +301,12 @@ class ControllerDaemon(cliapp.Plugin):
self.app.settings['worker-cache-server-port']
morph_instance = self.app.settings['morph-instance']
+ request_url = urlparse.urljoin(writeable_cache_server, '/1.0/method')
+ try:
+ req = urllib2.urlopen(request_url)
+ except urllib2.URLError:
+ raise OutdatedCacheServerError()
+
listener_specs = [
# address, port, class to initiate on connection, class init args
('controller-helper-address', 'controller-helper-port',
diff --git a/morphlib/plugins/gc_plugin.py b/morphlib/plugins/gc_plugin.py
index 71522b04..54c1b43e 100644
--- a/morphlib/plugins/gc_plugin.py
+++ b/morphlib/plugins/gc_plugin.py
@@ -125,8 +125,8 @@ class GCPlugin(cliapp.Plugin):
'sufficient space already cleared',
chatty=True)
return
- lac = morphlib.localartifactcache.LocalArtifactCache(
- fs.osfs.OSFS(os.path.join(cache_path, 'artifacts')))
+ lac = morphlib.ostreeartifactcache.OSTreeArtifactCache(
+ os.path.join(cache_path, 'artifacts'))
max_age, min_age = self.calculate_delete_range()
logging.debug('Must remove artifacts older than timestamp %d'
% max_age)
@@ -144,6 +144,8 @@ class GCPlugin(cliapp.Plugin):
lac.remove(cachekey)
removed += 1
+ lac.prune()
+
# Maybe remove remaining middle-aged artifacts
for cachekey in may_delete:
if sufficient_free():
@@ -155,6 +157,7 @@ class GCPlugin(cliapp.Plugin):
self.app.status(msg='Removing source %(cachekey)s',
cachekey=cachekey, chatty=True)
lac.remove(cachekey)
+ lac.prune()
removed += 1
if sufficient_free():
diff --git a/morphlib/plugins/ostree_artifacts_plugin.py b/morphlib/plugins/ostree_artifacts_plugin.py
new file mode 100644
index 00000000..69d66dd1
--- /dev/null
+++ b/morphlib/plugins/ostree_artifacts_plugin.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2015 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import collections
+import fs
+import os
+
+import cliapp
+
+import morphlib
+from morphlib.artifactcachereference import ArtifactCacheReference
+
+
+class NoCacheError(morphlib.Error):
+
+ def __init__(self, cachedir):
+ self.msg = ("Expected artifact cache directory %s doesn't exist.\n"
+ "No existing cache to convert!" % cachedir)
+
+
+class ComponentNotInSystemError(morphlib.Error):
+
+ def __init__(self, components, system):
+ components = ', '.join(components)
+ self.msg = ('Components %s are not in %s. Ensure you provided '
+ 'component names rather than filenames.'
+ % (components, system))
+
+
+class OSTreeArtifactsPlugin(cliapp.Plugin):
+
+ def enable(self):
+ self.app.add_subcommand('convert-local-cache', self.convert_cache,
+ arg_synopsis='[DELETE]')
+ self.app.add_subcommand('query-cache', self.query_cache,
+ arg_synopsis='SYSTEM NAME...')
+
+ def disable(self):
+ pass
+
+ def convert_cache(self, args):
+ """Convert a local tarball cache into an OSTree cache.
+
+ Command line arguments:
+
+ * DELETE: This is an optional argument, which if given as "delete"
+ will cause tarball artifacts to be removed once they are converted.
+
+ This command will extract all the tarball artifacts in your local
+ artifact cache and store them in an OSTree repository in that
+ artifact cache. This will be quicker than redownloading all that
+ content from a remote cache server, but may still be time consuming
+ if your cache is large.
+
+ """
+ delete = False
+ if args:
+ if args[0] == 'delete':
+ delete = True
+
+ artifact_cachedir = os.path.join(self.app.settings['cachedir'],
+ 'artifacts')
+ if not os.path.exists(artifact_cachedir):
+ raise NoCacheError(artifact_cachedir)
+
+ tarball_cache = morphlib.localartifactcache.LocalArtifactCache(
+ fs.osfs.OSFS(artifact_cachedir))
+ ostree_cache = morphlib.ostreeartifactcache.OSTreeArtifactCache(
+ artifact_cachedir, mode=self.app.settings['ostree-repo-mode'],
+ status_cb=self.app.status)
+
+ cached_artifacts = []
+ for cachekey, artifacts, last_used in tarball_cache.list_contents():
+ for artifact in artifacts:
+ basename = '.'.join((cachekey.lstrip('/'), artifact))
+ cached_artifacts.append(ArtifactCacheReference(basename))
+
+ # Set the method property of the tarball cache to allow us to
+ # treat it like a RemoteArtifactCache.
+ tarball_cache.method = 'tarball'
+
+ to_convert = [artifact for artifact in cached_artifacts
+ if '.stratum.' not in artifact.basename()]
+ for i, artifact in enumerate(to_convert):
+ if not ostree_cache.has(artifact):
+ try:
+ cache_key, kind, name = artifact.basename().split('.', 2)
+ except ValueError:
+ # We must have metadata, which doesn't need converting
+ continue
+ self.app.status(msg='[%(current)d/%(total)d] Converting '
+ '%(name)s', current=i+1,
+ total=len(to_convert),
+ name=artifact.basename())
+ ostree_cache.copy_from_remote(artifact, tarball_cache)
+ if delete:
+ os.remove(tarball_cache.artifact_filename(artifact))
+
+ def _find_artifacts(self, names, root_artifact):
+ found = collections.OrderedDict()
+ not_found = list(names)
+ for a in root_artifact.walk():
+ name = a.source.morphology['name']
+ if name in names and name not in found:
+ found[name] = [a]
+ if name in not_found:
+ not_found.remove(name)
+ elif name in names:
+ found[name].append(a)
+ if name in not_found:
+ not_found.remove(name)
+ return found, not_found
+
+ def query_cache(self, args):
+ """Check if the cache contains an artifact.
+
+ Command line arguments:
+
+ * `SYSTEM` is the filename of the system containing the components
+ to be looked for.
+ * `NAME...` is the name of one or more components to look for.
+
+ """
+ if not args:
+ raise cliapp.AppException('You must provide at least a system '
+ 'filename.\nUsage: `morph query-cache '
+ 'SYSTEM [NAME...]`')
+ ws = morphlib.workspace.open('.')
+ sb = morphlib.sysbranchdir.open_from_within('.')
+
+ system_filename = morphlib.util.sanitise_morphology_path(args[0])
+ system_filename = sb.relative_to_root_repo(system_filename)
+ component_names = args[1:]
+
+ bc = morphlib.buildcommand.BuildCommand(self.app)
+ repo = sb.get_config('branch.root')
+ ref = sb.get_config('branch.name')
+
+ definitions_repo_path = sb.get_git_directory_name(repo)
+ definitions_repo = morphlib.gitdir.GitDirectory(definitions_repo_path)
+ commit = definitions_repo.resolve_ref_to_commit(ref)
+
+ srcpool = bc.create_source_pool(repo, commit, system_filename)
+ bc.validate_sources(srcpool)
+ root = bc.resolve_artifacts(srcpool)
+ if not component_names:
+ component_names = [root.source.name]
+ components, not_found = self._find_artifacts(component_names, root)
+ if not_found:
+ raise ComponentNotInSystemError(not_found, system_filename)
+
+ for name, artifacts in components.iteritems():
+ for component in artifacts:
+ if bc.lac.has(component):
+ print bc.lac._get_artifact_cache_name(component)
+ else:
+ print '%s is not cached' % name
diff --git a/morphlib/remoteartifactcache.py b/morphlib/remoteartifactcache.py
index 427e4cbb..1a1b5404 100644
--- a/morphlib/remoteartifactcache.py
+++ b/morphlib/remoteartifactcache.py
@@ -57,6 +57,18 @@ class RemoteArtifactCache(object):
def __init__(self, server_url):
self.server_url = server_url
+ self.name = urlparse.urlparse(server_url).hostname
+ try:
+ self.method = self._get_method()
+ except urllib2.URLError:
+ self.method = 'tarball'
+ except Exception as e: # pragma: no cover
+ raise cliapp.AppException(
+ 'Failed to contact remote artifact cache "%s". Error: %s.' %
+ (self.server_url, e))
+ if self.method == 'ostree': # pragma: no cover
+ self.ostree_url = 'http://%s:%s/' % (self.name,
+ self._get_ostree_info())
def has(self, artifact):
return self._has_file(artifact.basename())
@@ -112,5 +124,18 @@ class RemoteArtifactCache(object):
server_url, '/1.0/artifacts?filename=%s' %
urllib.quote(filename))
+ def _get_method(self): # pragma: no cover
+ logging.debug('Getting cache method of %s' % self.server_url)
+ request_url = urlparse.urljoin(self.server_url, '/1.0/method')
+ req = urllib2.urlopen(request_url)
+ return req.read()
+
+ def _get_ostree_info(self): # pragma: no cover
+ logging.debug('Getting OSTree repo info.')
+ request_url = urlparse.urljoin(self.server_url, '/1.0/ostreeinfo')
+ logging.debug('sending %s' % request_url)
+ req = urllib2.urlopen(request_url)
+ return req.read()
+
def __str__(self): # pragma: no cover
return self.server_url
diff --git a/morphlib/stagingarea.py b/morphlib/stagingarea.py
index 8c2781aa..859e7481 100644
--- a/morphlib/stagingarea.py
+++ b/morphlib/stagingarea.py
@@ -87,6 +87,14 @@ class StagingArea(object):
return self._dir_for_source(source, 'inst')
+ def overlay_upperdir(self, source):
+ '''Create a directory to be upperdir for overlayfs, and return it.'''
+ return self._dir_for_source(source, 'overlay_upper')
+
+ def overlaydir(self, source):
+ '''Create a directory to be a mount point for overlayfs, return it'''
+ return self._dir_for_source(source, 'overlay')
+
def relative(self, filename):
'''Return a filename relative to the staging area.'''
@@ -100,83 +108,14 @@ class StagingArea(object):
assert filename.startswith(dirname)
return filename[len(dirname) - 1:] # include leading slash
- def hardlink_all_files(self, srcpath, destpath): # pragma: no cover
- '''Hardlink every file in the path to the staging-area
-
- If an exception is raised, the staging-area is indeterminate.
-
- '''
-
- file_stat = os.lstat(srcpath)
- mode = file_stat.st_mode
-
- if stat.S_ISDIR(mode):
- # Ensure directory exists in destination, then recurse.
- if not os.path.lexists(destpath):
- os.makedirs(destpath)
- dest_stat = os.stat(os.path.realpath(destpath))
- if not stat.S_ISDIR(dest_stat.st_mode):
- raise IOError('Destination not a directory. source has %s'
- ' destination has %s' % (srcpath, destpath))
-
- for entry in os.listdir(srcpath):
- self.hardlink_all_files(os.path.join(srcpath, entry),
- os.path.join(destpath, entry))
- elif stat.S_ISLNK(mode):
- # Copy the symlink.
- if os.path.lexists(destpath):
- os.remove(destpath)
- os.symlink(os.readlink(srcpath), destpath)
-
- elif stat.S_ISREG(mode):
- # Hardlink the file.
- if os.path.lexists(destpath):
- os.remove(destpath)
- os.link(srcpath, destpath)
-
- elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
- # Block or character device. Put contents of st_dev in a mknod.
- if os.path.lexists(destpath):
- os.remove(destpath)
- os.mknod(destpath, file_stat.st_mode, file_stat.st_rdev)
- os.chmod(destpath, file_stat.st_mode)
-
- else:
- # Unsupported type.
- raise IOError('Cannot extract %s into staging-area. Unsupported'
- ' type.' % srcpath)
-
- def install_artifact(self, handle):
- '''Install a build artifact into the staging area.
-
- We access the artifact via an open file handle. For now, we assume
- the artifact is a tarball.
-
- '''
-
- chunk_cache_dir = os.path.join(self._app.settings['tempdir'], 'chunks')
- unpacked_artifact = os.path.join(
- chunk_cache_dir, os.path.basename(handle.name) + '.d')
- if not os.path.exists(unpacked_artifact):
- self._app.status(
- msg='Unpacking chunk from cache %(filename)s',
- filename=os.path.basename(handle.name))
- savedir = tempfile.mkdtemp(dir=chunk_cache_dir)
- try:
- morphlib.bins.unpack_binary_from_file(
- handle, savedir + '/')
- except BaseException as e: # pragma: no cover
- shutil.rmtree(savedir)
- raise
- # TODO: This rename is not concurrency safe if two builds are
- # extracting the same chunk, one build will fail because
- # the other renamed its tempdir here first.
- os.rename(savedir, unpacked_artifact)
-
+ def install_artifact(self, artifact_cache, artifact):
+ '''Install a build artifact into the staging area.'''
if not os.path.exists(self.dirname):
self._mkdir(self.dirname)
- self.hardlink_all_files(unpacked_artifact, self.dirname)
+ artifact_cache.get(artifact, directory=self.dirname)
+
+ morphlib.util.create_devices(artifact.source.morphology, self.dirname)
def remove(self):
'''Remove the entire staging area.
diff --git a/morphlib/stagingarea_tests.py b/morphlib/stagingarea_tests.py
index 97d78236..3d378573 100644
--- a/morphlib/stagingarea_tests.py
+++ b/morphlib/stagingarea_tests.py
@@ -30,6 +30,7 @@ class FakeBuildEnvironment(object):
}
self.extra_path = ['/extra-path']
+
class FakeSource(object):
def __init__(self):
@@ -39,6 +40,31 @@ class FakeSource(object):
self.name = 'le-name'
+class FakeArtifact(object):
+
+ def __init__(self):
+ self.source = FakeSource()
+
+
+class FakeArtifactCache(object):
+
+ def __init__(self, tempdir):
+ self.tempdir = tempdir
+
+ def create_chunk(self, chunkdir):
+ if not chunkdir:
+ chunkdir = os.path.join(self.tempdir, 'chunk')
+ if not os.path.exists(chunkdir):
+ os.mkdir(chunkdir)
+ with open(os.path.join(chunkdir, 'file.txt'), 'w'):
+ pass
+
+ return chunkdir
+
+ def get(self, artifact, directory=None):
+ return self.create_chunk(directory)
+
+
class FakeApplication(object):
def __init__(self, cachedir, tempdir):
@@ -83,12 +109,8 @@ class StagingAreaTests(unittest.TestCase):
os.mkdir(chunkdir)
with open(os.path.join(chunkdir, 'file.txt'), 'w'):
pass
- chunk_tar = os.path.join(self.tempdir, 'chunk.tar')
- tf = tarfile.TarFile(name=chunk_tar, mode='w')
- tf.add(chunkdir, arcname='.')
- tf.close()
- return chunk_tar
+ return chunkdir
def list_tree(self, root):
files = []
@@ -118,20 +140,34 @@ class StagingAreaTests(unittest.TestCase):
self.assertEqual(self.created_dirs, [dirname])
self.assertTrue(dirname.startswith(self.staging))
+ def test_creates_overlay_upper_directory(self):
+ source = FakeSource()
+ self.sa._mkdir = self.fake_mkdir
+ dirname = self.sa.overlay_upperdir(source)
+ self.assertEqual(self.created_dirs, [dirname])
+ self.assertTrue(dirname.startswith(self.staging))
+
+ def test_creates_overlay_directory(self):
+ source = FakeSource()
+ self.sa._mkdir = self.fake_mkdir
+ dirname = self.sa.overlaydir(source)
+ self.assertEqual(self.created_dirs, [dirname])
+ self.assertTrue(dirname.startswith(self.staging))
+
def test_makes_relative_name(self):
filename = os.path.join(self.staging, 'foobar')
self.assertEqual(self.sa.relative(filename), '/foobar')
def test_installs_artifact(self):
- chunk_tar = self.create_chunk()
- with open(chunk_tar, 'rb') as f:
- self.sa.install_artifact(f)
+ artifact = FakeArtifact()
+ artifact_cache = FakeArtifactCache(self.tempdir)
+ self.sa.install_artifact(artifact_cache, artifact)
self.assertEqual(self.list_tree(self.staging), ['/', '/file.txt'])
def test_removes_everything(self):
- chunk_tar = self.create_chunk()
- with open(chunk_tar, 'rb') as f:
- self.sa.install_artifact(f)
+ artifact = FakeArtifact()
+ artifact_cache = FakeArtifactCache(self.tempdir)
+ self.sa.install_artifact(artifact_cache, artifact)
self.sa.remove()
self.assertFalse(os.path.exists(self.staging))
diff --git a/morphlib/util.py b/morphlib/util.py
index 904dc355..8179e523 100644
--- a/morphlib/util.py
+++ b/morphlib/util.py
@@ -14,16 +14,21 @@
import contextlib
import itertools
+import json
+import logging
import os
import pipes
import re
+import stat
import subprocess
-import textwrap
import sys
+import textwrap
+import cliapp
import fs.osfs
import morphlib
+from morphlib.artifactcachereference import ArtifactCacheReference
'''Utility functions for morph.'''
@@ -120,7 +125,7 @@ def get_git_resolve_cache_server(settings): # pragma: no cover
return None
-def new_artifact_caches(settings): # pragma: no cover
+def new_artifact_caches(settings, status_cb=None): # pragma: no cover
'''Create new objects for local and remote artifact caches.
This includes creating the directories on disk, if missing.
@@ -132,12 +137,17 @@ def new_artifact_caches(settings): # pragma: no cover
if not os.path.exists(artifact_cachedir):
os.mkdir(artifact_cachedir)
- lac = morphlib.localartifactcache.LocalArtifactCache(
- fs.osfs.OSFS(artifact_cachedir))
+ mode = settings['ostree-repo-mode']
+ lac = morphlib.ostreeartifactcache.OSTreeArtifactCache(
+ artifact_cachedir, mode=mode, status_cb=status_cb)
rac_url = get_artifact_cache_server(settings)
rac = None
- if rac_url:
+ # We let 'none' here specify 'don't use a remote artifact cache'.
+ # The 'artifact-cache-server' setting defaults to a calue based
+ # on 'trove-host' so it's not enough to specify --artifact-cache-server=''
+ # if you want to force Morph to ignore cached artifacts.
+ if rac_url and rac_url.lower() != 'none':
rac = morphlib.remoteartifactcache.RemoteArtifactCache(rac_url)
return lac, rac
@@ -691,3 +701,47 @@ def write_from_dict(filepath, d, validate=lambda x, y: True): #pragma: no cover
os.fchown(f.fileno(), 0, 0)
os.fchmod(f.fileno(), 0644)
+
+
+def create_devices(morphology, destdir): # pragma: no cover
+ '''Creates device nodes if the morphology specifies them'''
+ perms_mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
+ if 'devices' in morphology and morphology['devices'] is not None:
+ for dev in morphology['devices']:
+ destfile = os.path.join(destdir, './' + dev['filename'])
+ mode = int(dev['permissions'], 8) & perms_mask
+ if dev['type'] == 'c':
+ mode = mode | stat.S_IFCHR
+ elif dev['type'] == 'b':
+ mode = mode | stat.S_IFBLK
+ else:
+ raise IOError('Cannot create device node %s,'
+ 'unrecognized device type "%s"'
+ % (destfile, dev['type']))
+ parent = os.path.dirname(destfile)
+ if not os.path.exists(parent):
+ os.makedirs(parent)
+ if not os.path.exists(destfile):
+ logging.debug("Creating device node %s" % destfile)
+ os.mknod(destfile, mode,
+ os.makedev(dev['major'], dev['minor']))
+ os.chown(destfile, dev['uid'], dev['gid'])
+
+
+def get_stratum_contents(cache, stratum_artifact): # pragma: no cover
+ '''Load a stratum from a local artifact cache.
+
+ Returns a list of ArtifactCacheReference instances for the chunks
+ contained in the stratum.
+
+ '''
+
+ with open(cache.get(stratum_artifact), 'r') as stratum_file:
+ try:
+ artifact_list = json.load(stratum_file,
+ encoding='unicode-escape')
+ except ValueError as e:
+ raise cliapp.AppException(
+ 'Corruption detected: %s while loading %s' %
+ (e, cache.artifact_filename(stratum_artifact)))
+ return [ArtifactCacheReference(a) for a in artifact_list]
diff --git a/ostree-repo-server b/ostree-repo-server
new file mode 100755
index 00000000..e6dc4a56
--- /dev/null
+++ b/ostree-repo-server
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+
+from BaseHTTPServer import HTTPServer
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+from SocketServer import ThreadingMixIn
+
+class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
+ """Handle requests in a separate thread"""
+
+handler = SimpleHTTPRequestHandler
+handler.protocol_version="HTTP/1.0"
+server_address = ('', 12324)
+
+httpd = ThreadedHTTPServer(server_address, handler)
+httpd.serve_forever()
diff --git a/tests.build/build-chunk-writes-log.script b/tests.build/build-chunk-writes-log.script
index e636924e..044e4567 100755
--- a/tests.build/build-chunk-writes-log.script
+++ b/tests.build/build-chunk-writes-log.script
@@ -22,16 +22,14 @@ set -eu
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-SOURCES="$DATADIR/cached-sources"
-find "$DATADIR/cache/artifacts" -name '*.chunk.*' |
- sed 's|\.chunk\..*||' | sort -u >"$SOURCES"
-
+refsdir="$DATADIR/cache/artifacts/repo/refs/heads"
+chunks=$(find "$refsdir" -name '*-misc' | sed -e "s:$refsdir/::" -e "s:\..*::")
found=false
-# list of sources in cache is not piped because while loop changes variable
-while read source; do
- [ -e "$source".build-log ] || continue
+
+for chunk in $chunks;
+do
+ [ -e "$DATADIR/cache/artifacts/$chunk".build-log ] || continue
found=true
break
-done <"$SOURCES"
+done
"$found"
-
diff --git a/tests.build/build-stratum-with-submodules.script b/tests.build/build-stratum-with-submodules.script
index bd6b97ce..d1daa292 100755
--- a/tests.build/build-stratum-with-submodules.script
+++ b/tests.build/build-stratum-with-submodules.script
@@ -61,6 +61,15 @@ EOF
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-system=$(ls "$DATADIR/cache/artifacts/"*hello-system-rootfs)
-tar tf $system | LC_ALL=C sort | sed '/^\.\/./s:^\./::' | grep -v '^baserock/'
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+ref=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph`
+ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::" | LC_ALL=C sort |
+sed '/^\.\/./s:^\./::' | grep -v '^baserock'
diff --git a/tests.build/build-stratum-with-submodules.stdout b/tests.build/build-stratum-with-submodules.stdout
index d4d03e13..864f253f 100644
--- a/tests.build/build-stratum-with-submodules.stdout
+++ b/tests.build/build-stratum-with-submodules.stdout
@@ -1,3 +1,2 @@
-./
-etc/
+etc
etc/os-release
diff --git a/tests.build/build-system-autotools.script b/tests.build/build-system-autotools.script
index 710a8f98..7ecb31be 100755
--- a/tests.build/build-system-autotools.script
+++ b/tests.build/build-system-autotools.script
@@ -47,7 +47,15 @@ git commit --quiet -m "Convert hello to an autotools project"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-for chunk in "$DATADIR/cache/artifacts/"*.chunk.*
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+refs=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph hello`
+for ref in $refs
do
- tar -tf "$chunk"
+ ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false "$ref" "$DATADIR/$ref"
+ find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::"
done | LC_ALL=C sort -u | sed '/^\.\/./s:^\./::' | grep -Ee '^(bin|etc)'
diff --git a/tests.build/build-system-autotools.stdout b/tests.build/build-system-autotools.stdout
index 683441c9..6dd6cda7 100644
--- a/tests.build/build-system-autotools.stdout
+++ b/tests.build/build-system-autotools.stdout
@@ -1,3 +1,3 @@
-bin/
+bin
bin/hello
-etc/
+etc
diff --git a/tests.build/build-system-cmake.script b/tests.build/build-system-cmake.script
index fe02f9dc..b761a5d5 100755
--- a/tests.build/build-system-cmake.script
+++ b/tests.build/build-system-cmake.script
@@ -49,7 +49,16 @@ git commit --quiet -m "Convert hello to a cmake project"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-for chunk in "$DATADIR/cache/artifacts/"*.chunk.*
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+refs=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph hello`
+for ref in $refs
do
- tar -tf "$chunk"
+ ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+ find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::"
done | LC_ALL=C sort -u | sed '/^\.\/./s:^\./::' | grep -Ee '^(usr/)?(bin|etc)'
diff --git a/tests.build/build-system-cmake.stdout b/tests.build/build-system-cmake.stdout
index 3410b113..861fd1fa 100644
--- a/tests.build/build-system-cmake.stdout
+++ b/tests.build/build-system-cmake.stdout
@@ -1,2 +1,2 @@
-usr/bin/
+usr/bin
usr/bin/hello
diff --git a/tests.build/build-system-cpan.script b/tests.build/build-system-cpan.script
index 103d5466..e6bd579c 100755
--- a/tests.build/build-system-cpan.script
+++ b/tests.build/build-system-cpan.script
@@ -71,7 +71,16 @@ git commit -q -m "Set custom install prefix for hello"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-for chunk in "$DATADIR/cache/artifacts/"*.chunk.*
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+refs=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph hello`
+for ref in $refs
do
- tar -tf "$chunk"
+ ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+ find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::"
done | LC_ALL=C sort | sed '/^\.\/./s:^\./::' | grep -F 'bin/hello'
diff --git a/tests.build/build-system-python-distutils.script b/tests.build/build-system-python-distutils.script
index e5c0ea74..44418655 100755
--- a/tests.build/build-system-python-distutils.script
+++ b/tests.build/build-system-python-distutils.script
@@ -69,12 +69,21 @@ git commit -q -m "Set custom install prefix for hello"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-for chunk in "$DATADIR/cache/artifacts/"*.chunk.*
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+refs=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph hello`
+for ref in $refs
do
- tar -tf "$chunk"
+ ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+ find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::"
done | LC_ALL=C sort -u | sed '/^\.\/./s:^\./::' | grep -Ee '^(bin|lib)' |
sed -e 's:^local/::' \
- -e 's:lib/python2.[6-9]/:lib/python2.x/:' \
- -e 's:/hello-0\.0\.0[^/]*\.egg-info$:/hello.egg-info/:' \
+ -e 's:lib/python2.[6-9]:lib/python2.x:' \
+ -e 's:/hello-0\.0\.0[^/]*\.egg-info$:/hello.egg-info:' \
-e 's:[^/]*-packages:packages:' \
-e '/^$/d'
diff --git a/tests.build/build-system-python-distutils.stdout b/tests.build/build-system-python-distutils.stdout
index 4d4c3a1e..a2ceb5ad 100644
--- a/tests.build/build-system-python-distutils.stdout
+++ b/tests.build/build-system-python-distutils.stdout
@@ -1,6 +1,6 @@
-bin/
+bin
bin/hello
-lib/
-lib/python2.x/
-lib/python2.x/packages/
-lib/python2.x/packages/hello.egg-info/
+lib
+lib/python2.x
+lib/python2.x/packages
+lib/python2.x/packages/hello.egg-info
diff --git a/tests.build/build-system.script b/tests.build/build-system.script
index 0180939a..d3e338cf 100755
--- a/tests.build/build-system.script
+++ b/tests.build/build-system.script
@@ -22,5 +22,14 @@ set -eu
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-system=$(ls "$DATADIR/cache/artifacts/"*hello-system-rootfs)
-tar tf $system | LC_ALL=C sort | sed '/^\.\/./s:^\./::' | grep -v '^baserock/'
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+ref=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph`
+ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+find $DATADIR/$ref/* | sed "s:^$DATADIR/$ref/::" | LC_ALL=C sort |
+sed '/^\.\/./s:^\./::' | grep -v '^baserock'
diff --git a/tests.build/build-system.stdout b/tests.build/build-system.stdout
index 4d0fac2f..1637c160 100644
--- a/tests.build/build-system.stdout
+++ b/tests.build/build-system.stdout
@@ -1,5 +1,4 @@
-./
-bin/
+bin
bin/hello
-etc/
+etc
etc/os-release
diff --git a/tests.build/cross-bootstrap.script b/tests.build/cross-bootstrap.script
index 245c2a13..eb9ade34 100755
--- a/tests.build/cross-bootstrap.script
+++ b/tests.build/cross-bootstrap.script
@@ -20,6 +20,9 @@
set -eu
+# cross-bootstrap needs rewriting for OSTree
+exit 0
+
"$SRCDIR/tests.build/setup-build-essential"
"$SRCDIR/scripts/test-morph" cross-bootstrap \
diff --git a/tests.build/morphless-chunks.script b/tests.build/morphless-chunks.script
index 5b19bc4a..f0eb1518 100755
--- a/tests.build/morphless-chunks.script
+++ b/tests.build/morphless-chunks.script
@@ -41,7 +41,15 @@ git commit -q -m "Convert hello into an autodetectable chunk"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
-for chunk in "$DATADIR/cache/artifacts/"*.chunk.*
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+cd "$DATADIR/workspace/master/test/morphs-repo"
+
+refs=`"$SRCDIR/scripts/test-morph" query-cache hello-system.morph hello`
+for ref in $refs
do
- tar -tf "$chunk"
-done | cat >/dev/null # No files get installed apart from metadata
+ ostree --repo="$DATADIR/cache/artifacts/repo" checkout --fsync=false \
+ "$ref" "$DATADIR/$ref"
+done | cat >/dev/null
diff --git a/tests.build/prefix.script b/tests.build/prefix.script
index 140617e1..a87671c5 100755
--- a/tests.build/prefix.script
+++ b/tests.build/prefix.script
@@ -66,7 +66,16 @@ git commit -q -m "Update stratum"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo master hello-system
+cd "$DATADIR"
+"$SRCDIR/scripts/test-morph" init workspace
+cd "$DATADIR/workspace"
+"$SRCDIR/scripts/test-morph" checkout test:morphs-repo master
+
+cd "$DATADIR/workspace/master/test/morphs-repo"
+test_morph="$SRCDIR/scripts/test-morph"
+first_chunk=$("$test_morph" query-cache hello-system.morph xyzzy | head -n1 |
+ cut -c -64)
+second_chunk=$("$test_morph" query-cache hello-system.morph plugh | head -n1 |
+ cut -c -64)
cd "$DATADIR/cache/artifacts"
-first_chunk=$(ls -1 *.chunk.xyzzy-* | head -n1 | cut -c -64)
-second_chunk=$(ls -1 *.chunk.plugh-* | head -n1 | cut -c -64)
cat $first_chunk.build-log $second_chunk.build-log
diff --git a/tests.build/rebuild-cached-stratum.script b/tests.build/rebuild-cached-stratum.script
index e2e0face..dacd441f 100755
--- a/tests.build/rebuild-cached-stratum.script
+++ b/tests.build/rebuild-cached-stratum.script
@@ -41,7 +41,7 @@ cache="$DATADIR/cache/artifacts"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo rebuild-cached-stratum hello-system
echo "first build:"
-(cd "$cache" && ls *.chunk.* *hello-stratum-* | sed 's/^[^.]*\./ /' |
+(cd "$cache" && ls *hello-stratum-* | sed 's/^[^.]*\./ /' |
LC_ALL=C sort -u)
# Change the chunk.
@@ -53,6 +53,6 @@ echo "first build:"
"$SRCDIR/scripts/test-morph" build-morphology \
test:morphs-repo rebuild-cached-stratum hello-system
echo "second build:"
-(cd "$cache" && ls *.chunk.* *hello-stratum-* | sed 's/^[^.]*\./ /' |
+(cd "$cache" && ls *hello-stratum-* | sed 's/^[^.]*\./ /' |
LC_ALL=C sort -u)
diff --git a/tests.build/rebuild-cached-stratum.stdout b/tests.build/rebuild-cached-stratum.stdout
index 9c53ee60..7a61bc55 100644
--- a/tests.build/rebuild-cached-stratum.stdout
+++ b/tests.build/rebuild-cached-stratum.stdout
@@ -1,21 +1,9 @@
first build:
- chunk.hello-bins
- chunk.hello-devel
- chunk.hello-doc
- chunk.hello-libs
- chunk.hello-locale
- chunk.hello-misc
stratum.hello-stratum-devel
stratum.hello-stratum-devel.meta
stratum.hello-stratum-runtime
stratum.hello-stratum-runtime.meta
second build:
- chunk.hello-bins
- chunk.hello-devel
- chunk.hello-doc
- chunk.hello-libs
- chunk.hello-locale
- chunk.hello-misc
stratum.hello-stratum-devel
stratum.hello-stratum-devel.meta
stratum.hello-stratum-runtime
diff --git a/without-test-modules b/without-test-modules
index fb66ff69..e8b61444 100644
--- a/without-test-modules
+++ b/without-test-modules
@@ -55,3 +55,6 @@ distbuild/timer_event_source.py
distbuild/worker_build_scheduler.py
# Not unit tested, since it needs a full system branch
morphlib/buildbranch.py
+morphlib/ostree.py
+morphlib/ostreeartifactcache.py
+morphlib/plugins/ostree_artifacts_plugin.py
diff --git a/yarns/architecture.yarn b/yarns/architecture.yarn
index 07274ec3..d68ed2e6 100644
--- a/yarns/architecture.yarn
+++ b/yarns/architecture.yarn
@@ -15,13 +15,15 @@ Morph Cross-Building Tests
Morph Cross-Bootstrap Tests
===========================
- SCENARIO cross-bootstraping a system for a different architecture
- GIVEN a workspace
- AND a git server
- AND a system called base-system-testarch.morph for the test architecture in the git server
- WHEN the user checks out the system branch called master
- THEN the user cross-bootstraps the system base-system-testarch.morph in branch master of repo test:morphs to the arch testarch
- FINALLY the git server is shut down
+Note: This test is broken because cross-bootstrap is not updated to use OSTree.
+
+> SCENARIO cross-bootstrapping a system for a different architecture
+> GIVEN a workspace
+> AND a git server
+> AND a system called base-system-testarch.morph for the test architecture in the git server
+> WHEN the user checks out the system branch called master
+> THEN the user cross-bootstraps the system base-system-testarch.morph in branch master of repo test:morphs to the arch testarch
+> FINALLY the git server is shut down
Architecture validation Tests
=============================
diff --git a/yarns/implementations.yarn b/yarns/implementations.yarn
index faae44a8..7a4a8484 100644
--- a/yarns/implementations.yarn
+++ b/yarns/implementations.yarn
@@ -1061,13 +1061,14 @@ Distbuild
read_cache_server_pid_file="$DATADIR/read-cache-server-pid"
start_cache_server "$read_cache_server_port_file" \
"$read_cache_server_pid_file" \
- "$artifact_dir"
+ "$artifact_dir" "$DATADIR/communal-cache.log"
write_cache_server_port_file="$DATADIR/write-cache-server-port"
write_cache_server_pid_file="$DATADIR/write-cache-server-pid"
start_cache_server "$write_cache_server_port_file" \
"$write_cache_server_pid_file" \
- "$artifact_dir" --enable-writes
+ "$artifact_dir" "$DATADIR/writeable-cache.log" \
+ --enable-writes
IMPLEMENTS FINALLY the communal cache server is terminated
stop_daemon "$DATADIR/read-cache-server-pid"
@@ -1082,7 +1083,7 @@ Distbuild
worker_cache_pid_file="$DATADIR/worker-cache-server-pid"
start_cache_server "$worker_cache_port_file" \
"$worker_cache_pid_file" \
- "$worker_artifacts"
+ "$worker_artifacts" "$DATADIR/worker-cache.log"
# start worker daemon
worker_daemon_port_file="$DATADIR/worker-daemon-port"
@@ -1104,6 +1105,14 @@ Distbuild
rm "$worker_daemon_port_file"
echo "$worker_daemon_port" >"$worker_daemon_port_file"
+ # serve artifact cache over http
+ worker_repo_pid_file="$DATADIR/worker-repo-pid"
+ mkdir "$worker_artifacts/repo"
+ cd "$worker_artifacts/repo"
+ start-stop-daemon --start --pidfile="$worker_repo_pid_file" \
+ --background --make-pidfile --verbose \
+ --startas="$SRCDIR/ostree-repo-server"
+
# start worker helper
helper_pid_file="$DATADIR/worker-daemon-helper-pid"
start-stop-daemon --start --pidfile="$helper_pid_file" \
@@ -1118,13 +1127,15 @@ Distbuild
exec "$SRCDIR/morph" --quiet \
--cachedir-min-space=0 --tempdir-min-space=0 \
--no-default-config --config "$DATADIR/morph.conf" \
- --cachedir "$DATADIR/distbuild-worker-cache" "$@"
+ --cachedir "$DATADIR/distbuild-worker-cache" \
+ --ostree-repo-mode="archive_z2" "$@"
EOF
IMPLEMENTS FINALLY the distbuild worker is terminated
stop_daemon "$DATADIR/worker-cache-server-pid"
stop_daemon "$DATADIR/worker-daemon-pid"
stop_daemon "$DATADIR/worker-daemon-helper-pid"
+ stop_daemon "$DATADIR/worker-repo-pid"
IMPLEMENTS GIVEN a distbuild controller
worker_cache_port_file="$DATADIR/worker-cache-server-port"
diff --git a/yarns/morph.shell-lib b/yarns/morph.shell-lib
index faf094ff..b2727326 100644
--- a/yarns/morph.shell-lib
+++ b/yarns/morph.shell-lib
@@ -39,7 +39,7 @@ run_morph()
"$SRCDIR"/morph --debug \
--cachedir-min-space=0 --tempdir-min-space=0 \
--no-default-config --config "$DATADIR/morph.conf" \
- --log="$DATADIR/log-$1" \
+ --log="$DATADIR/log-$1" --ostree-repo-mode="archive_z2" \
"$@" 2> "$DATADIR/result-$1" > "$DATADIR/out-$1"
local exit_code="$?"
for o in log result out; do
@@ -194,6 +194,7 @@ start_cache_server(){
--background --make-pidfile --verbose \
--startas="$SRCDIR/morph-cache-server" -- \
--port-file="$1" --no-fcgi \
+ --log="$4" \
--repo-dir="$DATADIR/gits" --direct-mode \
--bundle-dir="$DATADIR/bundles" \
--artifact-dir="$3" "$@"