summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS14
-rw-r--r--distbuild/__init__.py3
-rw-r--r--distbuild/connection_machine.py61
-rw-r--r--distbuild/worker_build_scheduler.py3
-rw-r--r--morphlib/buildcommand.py102
-rwxr-xr-xmorphlib/exts/initramfs.write4
-rwxr-xr-xmorphlib/exts/kvm.check2
-rwxr-xr-xmorphlib/exts/openstack.check2
-rwxr-xr-xmorphlib/exts/rawdisk.check31
-rwxr-xr-xmorphlib/exts/virtualbox-ssh.check2
-rw-r--r--morphlib/plugins/cross-bootstrap_plugin.py14
-rw-r--r--morphlib/plugins/deploy_plugin.py1
-rw-r--r--morphlib/remoteartifactcache.py11
-rw-r--r--morphlib/writeexts.py13
-rw-r--r--yarns/deployment.yarn83
-rw-r--r--yarns/implementations.yarn43
16 files changed, 324 insertions, 65 deletions
diff --git a/NEWS b/NEWS
index ba5d8274..cb420f2e 100644
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,20 @@ NEWS for Morph
This file contains high-level summaries of user-visible changes in
each Morph release.
+Version 14.24, released 2014-06-13
+----------------------------------
+
+* rawdisk deployments check that they have the btrfs module loaded first
+* distbuild should busy-wait less
+* fetching of artifacts should be atomic, so a failure to fetch the
+ metadata of an artifact doesn't confuse the build system when we have
+ the chunk, but no metadata
+* `morph deploy` now defaults to `--no-git-update`
+* `morph gc` now cleans up failed deployments, so they aren't left around
+ if morph terminates uncleanly
+* `morph edit` now only takes the name of the chunk, rather than the
+ name of the system and stratum that chunk is in
+
Version 14.23, released 2014-06-06
----------------------------------
diff --git a/distbuild/__init__.py b/distbuild/__init__.py
index 7274f6a9..fa0c3831 100644
--- a/distbuild/__init__.py
+++ b/distbuild/__init__.py
@@ -37,7 +37,8 @@ from json_router import JsonRouter
from helper_router import (HelperRouter, HelperRequest, HelperOutput,
HelperResult)
from initiator_connection import (InitiatorConnection, InitiatorDisconnect)
-from connection_machine import ConnectionMachine, Reconnect, StopConnecting
+from connection_machine import (ConnectionMachine, InitiatorConnectionMachine,
+ Reconnect, StopConnecting)
from worker_build_scheduler import (WorkerBuildQueuer,
WorkerConnection,
WorkerBuildRequest,
diff --git a/distbuild/connection_machine.py b/distbuild/connection_machine.py
index 648ce35a..24eec8e9 100644
--- a/distbuild/connection_machine.py
+++ b/distbuild/connection_machine.py
@@ -30,12 +30,13 @@ class Reconnect(object):
class StopConnecting(object):
- pass
-
+ def __init__(self, exception=None):
+ self.exception = exception
class ConnectError(object):
- pass
+ def __init__(self, exception):
+ self.exception = exception
class ProxyEventSource(object):
@@ -63,26 +64,29 @@ class ProxyEventSource(object):
class ConnectionMachine(distbuild.StateMachine):
- def __init__(self, addr, port, machine, extra_args):
- distbuild.StateMachine.__init__(self, 'connecting')
+ def __init__(self, addr, port, machine, extra_args,
+ reconnect_interval=1, max_retries=float('inf')):
+ super(ConnectionMachine, self).__init__('connecting')
self._addr = addr
self._port = port
self._machine = machine
self._extra_args = extra_args
self._socket = None
- self.reconnect_interval = 1
+ self._reconnect_interval = reconnect_interval
+ self._numof_retries = 0
+ self._max_retries = max_retries
def setup(self):
self._sock_proxy = ProxyEventSource()
self.mainloop.add_event_source(self._sock_proxy)
self._start_connect()
- self._timer = distbuild.TimerEventSource(self.reconnect_interval)
+ self._timer = distbuild.TimerEventSource(self._reconnect_interval)
self.mainloop.add_event_source(self._timer)
spec = [
# state, source, event_class, new_state, callback
- ('connecting', self._sock_proxy, distbuild.SocketWriteable,
+ ('connecting', self._sock_proxy, distbuild.SocketWriteable,
'connected', self._connect),
('connecting', self, StopConnecting, None, self._stop),
('connected', self, Reconnect, 'connecting', self._reconnect),
@@ -118,7 +122,12 @@ class ConnectionMachine(distbuild.StateMachine):
logging.error(
'Failed to connect to %s:%s: %s' %
(self._addr, self._port, str(e)))
- self.mainloop.queue_event(self, ConnectError())
+
+ if self._numof_retries < self._max_retries:
+ self.mainloop.queue_event(self, ConnectError(e))
+ else:
+ self.mainloop.queue_event(self, StopConnecting(e))
+
return
self._sock_proxy.event_source = None
logging.info('Connected to %s:%s' % (self._addr, self._port))
@@ -128,6 +137,8 @@ class ConnectionMachine(distbuild.StateMachine):
def _reconnect(self, event_source, event):
logging.info('Reconnecting to %s:%s' % (self._addr, self._port))
+ self._numof_retries += 1
+
if self._socket is not None:
self._socket.close()
self._timer.stop()
@@ -144,3 +155,35 @@ class ConnectionMachine(distbuild.StateMachine):
def _start_timer(self, event_source, event):
self._timer.start()
+ self._sock_proxy.event_source.close()
+ self._sock_proxy.event_source = None
+
+class InitiatorConnectionMachine(ConnectionMachine):
+
+ def __init__(self, app, addr, port, machine, extra_args,
+ reconnect_interval, max_retries):
+
+ self.cm = super(InitiatorConnectionMachine, self)
+ self.cm.__init__(addr, port, machine, extra_args,
+ reconnect_interval, max_retries)
+
+ self.app = app
+
+ def _connect(self, event_source, event):
+ self.app.status(msg='Connecting to %s:%s' % (self._addr, self._port))
+ self.cm._connect(event_source, event)
+
+ def _stop(self, event_source, event):
+ if event.exception:
+ self.app.status(msg="Couldn't connect to %s:%s: %s" %
+ (self._addr, self._port, event.exception.strerror))
+
+ self.cm._stop(event_source, event)
+
+ def _start_timer(self, event_source, event):
+ self.app.status(msg="Couldn't connect to %s:%s: %s" %
+ (self._addr, self._port, event.exception.strerror))
+ self.app.status(msg="Retrying in %d seconds" %
+ self._reconnect_interval)
+
+ self.cm._start_timer(event_source, event)
diff --git a/distbuild/worker_build_scheduler.py b/distbuild/worker_build_scheduler.py
index 57cc0224..39b7f021 100644
--- a/distbuild/worker_build_scheduler.py
+++ b/distbuild/worker_build_scheduler.py
@@ -536,6 +536,9 @@ class WorkerConnection(distbuild.StateMachine):
self.mainloop.queue_event(WorkerConnection, _NeedJob(self))
def _request_caching(self, event_source, event):
+ # This code should be moved into the morphlib.remoteartifactcache
+ # module. It would be good to share it with morphlib.buildcommand,
+ # which also wants to fetch artifacts from a remote cache.
distbuild.crash_point()
logging.debug('Requesting shared artifact cache to get artifacts')
diff --git a/morphlib/buildcommand.py b/morphlib/buildcommand.py
index 7ad7909d..f68046e3 100644
--- a/morphlib/buildcommand.py
+++ b/morphlib/buildcommand.py
@@ -274,31 +274,31 @@ class BuildCommand(object):
'name': a.name,
})
- self.app.status(msg='Checking if %(kind)s needs '
- 'building %(sha1)s',
- kind=a.source.morphology['kind'],
- sha1=a.source.sha1[:7])
-
- if self.is_built(a):
- self.cache_artifacts_locally([a])
- self.app.status(
- msg='The %(kind)s is cached at %(cache)s',
- kind=a.source.morphology['kind'],
- cache=os.path.basename(self.lac.artifact_filename(a))[:7])
- else:
- self.app.status(msg='Building %(kind)s %(name)s',
- name=a.name, kind=a.source.morphology['kind'])
- self.build_artifact(a, build_env)
+ self.cache_or_build_artifact(a, build_env)
self.app.status(msg='%(kind)s %(name)s is cached at %(cachepath)s',
kind=a.source.morphology['kind'], name=a.name,
cachepath=self.lac.artifact_filename(a),
chatty=(a.source.morphology['kind'] != "system"))
+
self.app.status_prefix = old_prefix
- def is_built(self, artifact):
- '''Does either cache already have the artifact?'''
- return self.lac.has(artifact) or (self.rac and self.rac.has(artifact))
+ def cache_or_build_artifact(self, artifact, build_env):
+ '''Make the built artifact available in the local cache.
+
+ This can be done by retrieving from a remote artifact cache, or if
+ that doesn't work for some reason, by building the artifact locally.
+
+ '''
+ if self.rac is not None:
+ try:
+ self.cache_artifacts_locally([artifact])
+ except morphlib.remoteartifactcache.GetError:
+ # Error is logged by the RemoteArtifactCache object.
+ pass
+
+ if not self.lac.has(artifact):
+ self.build_artifact(artifact, build_env)
def build_artifact(self, artifact, build_env):
'''Build one artifact.
@@ -307,6 +307,10 @@ class BuildCommand(object):
in either the local or remote cache already.
'''
+ self.app.status(msg='Building %(kind)s %(name)s',
+ name=artifact.name,
+ kind=artifact.source.morphology['kind'])
+
self.get_sources(artifact)
deps = self.get_recursive_deps(artifact)
self.cache_artifacts_locally(deps)
@@ -389,27 +393,46 @@ class BuildCommand(object):
def cache_artifacts_locally(self, artifacts):
'''Get artifacts missing from local cache from remote cache.'''
- def copy(remote, local):
- shutil.copyfileobj(remote, local)
- remote.close()
- local.close()
+ def fetch_files(to_fetch):
+ '''Fetch a set of files atomically.
+
+ If an error occurs during the transfer of any files, all downloaded
+ data is deleted, to ensure integrity of the local cache.
+
+ '''
+ try:
+ for remote, local in to_fetch:
+ shutil.copyfileobj(remote, local)
+ except BaseException:
+ for remote, local in to_fetch:
+ local.abort()
+ raise
+ else:
+ for remote, local in to_fetch:
+ remote.close()
+ local.close()
for artifact in artifacts:
+ # This block should fetch all artifact files in one go, using the
+ # 1.0/artifacts method of morph-cache-server. The code to do that
+ # needs bringing in from the distbuild.worker_build_connection
+ # module into morphlib.remoteartififactcache first.
+ to_fetch = []
if not self.lac.has(artifact):
- self.app.status(msg='Fetching to local cache: '
- 'artifact %(name)s',
- name=artifact.name)
- rac_file = self.rac.get(artifact)
- lac_file = self.lac.put(artifact)
- copy(rac_file, lac_file)
+ to_fetch.append((self.rac.get(artifact),
+ self.lac.put(artifact)))
if artifact.source.morphology.needs_artifact_metadata_cached:
if not self.lac.has_artifact_metadata(artifact, 'meta'):
- self.app.status(msg='Fetching to local cache: '
- 'artifact metadata %(name)s',
- name=artifact.name)
- copy(self.rac.get_artifact_metadata(artifact, 'meta'),
- self.lac.put_artifact_metadata(artifact, 'meta'))
+ to_fetch.append((
+ self.rac.get_artifact_metadata(artifact, 'meta'),
+ self.lac.put_artifact_metadata(artifact, 'meta')))
+
+ if len(to_fetch) > 0:
+ self.app.status(
+ msg='Fetching to local cache: artifact %(name)s',
+ name=artifact.name)
+ fetch_files(to_fetch)
def create_staging_area(self, build_env, use_chroot=True, extra_env={},
extra_path=[]):
@@ -479,6 +502,9 @@ class BuildCommand(object):
class InitiatorBuildCommand(BuildCommand):
+ RECONNECT_INTERVAL = 30 # seconds
+ MAX_RETRIES = 1
+
def __init__(self, app, addr, port):
self.app = app
self.addr = addr
@@ -501,7 +527,13 @@ class InitiatorBuildCommand(BuildCommand):
self.app.status(msg='Starting distributed build')
loop = distbuild.MainLoop()
- cm = distbuild.ConnectionMachine(
- self.addr, self.port, distbuild.Initiator, [self.app] + args)
+ cm = distbuild.InitiatorConnectionMachine(self.app,
+ self.addr,
+ self.port,
+ distbuild.Initiator,
+ [self.app] + args,
+ self.RECONNECT_INTERVAL,
+ self.MAX_RETRIES)
+
loop.add_state_machine(cm)
loop.run()
diff --git a/morphlib/exts/initramfs.write b/morphlib/exts/initramfs.write
index 815772f2..f8af6d84 100755
--- a/morphlib/exts/initramfs.write
+++ b/morphlib/exts/initramfs.write
@@ -23,5 +23,5 @@ INITRAMFS_PATH="$2"
(cd "$ROOTDIR" &&
find . -print0 |
- cpio -0 -H newc -o |
- gzip -c) >"$INITRAMFS_PATH"
+ cpio -0 -H newc -o) |
+ gzip -c | install -D -m644 /dev/stdin "$INITRAMFS_PATH"
diff --git a/morphlib/exts/kvm.check b/morphlib/exts/kvm.check
index 957d0893..1bb4007a 100755
--- a/morphlib/exts/kvm.check
+++ b/morphlib/exts/kvm.check
@@ -31,6 +31,8 @@ class KvmPlusSshCheckExtension(morphlib.writeexts.WriteExtension):
if len(args) != 1:
raise cliapp.AppException('Wrong number of command line args')
+ self.require_btrfs_in_deployment_host_kernel()
+
upgrade = self.get_environment_boolean('UPGRADE')
if upgrade:
raise cliapp.AppException(
diff --git a/morphlib/exts/openstack.check b/morphlib/exts/openstack.check
index a9a8fe1b..b5173011 100755
--- a/morphlib/exts/openstack.check
+++ b/morphlib/exts/openstack.check
@@ -26,6 +26,8 @@ class OpenStackCheckExtension(morphlib.writeexts.WriteExtension):
if len(args) != 1:
raise cliapp.AppException('Wrong number of command line args')
+ self.require_btrfs_in_deployment_host_kernel()
+
upgrade = self.get_environment_boolean('UPGRADE')
if upgrade:
raise cliapp.AppException(
diff --git a/morphlib/exts/rawdisk.check b/morphlib/exts/rawdisk.check
new file mode 100755
index 00000000..6a656ee7
--- /dev/null
+++ b/morphlib/exts/rawdisk.check
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''Preparatory checks for Morph 'rawdisk' write extension'''
+
+import cliapp
+
+import morphlib.writeexts
+
+
+class RawdiskCheckExtension(morphlib.writeexts.WriteExtension):
+ def process_args(self, args):
+ if len(args) != 1:
+ raise cliapp.AppException('Wrong number of command line args')
+
+ self.require_btrfs_in_deployment_host_kernel()
+
+RawdiskCheckExtension().run()
diff --git a/morphlib/exts/virtualbox-ssh.check b/morphlib/exts/virtualbox-ssh.check
index 1aeb8999..57d54db1 100755
--- a/morphlib/exts/virtualbox-ssh.check
+++ b/morphlib/exts/virtualbox-ssh.check
@@ -26,6 +26,8 @@ class VirtualBoxPlusSshCheckExtension(morphlib.writeexts.WriteExtension):
if len(args) != 1:
raise cliapp.AppException('Wrong number of command line args')
+ self.require_btrfs_in_deployment_host_kernel()
+
upgrade = self.get_environment_boolean('UPGRADE')
if upgrade:
raise cliapp.AppException(
diff --git a/morphlib/plugins/cross-bootstrap_plugin.py b/morphlib/plugins/cross-bootstrap_plugin.py
index ec0cfbcb..bfd0d047 100644
--- a/morphlib/plugins/cross-bootstrap_plugin.py
+++ b/morphlib/plugins/cross-bootstrap_plugin.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2013 Codethink Limited
+# Copyright (C) 2013-2014 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -299,18 +299,8 @@ class CrossBootstrapPlugin(cliapp.Plugin):
'Nothing to cross-compile. Only chunks built in \'bootstrap\' '
'mode can be cross-compiled.')
- # FIXME: merge with build-command's code
for i, a in enumerate(cross_chunks):
- if build_command.is_built(a):
- self.app.status(msg='The %(kind)s %(name)s is already built',
- kind=a.source.morphology['kind'],
- name=a.name)
- build_command.cache_artifacts_locally([a])
- else:
- self.app.status(msg='Cross-building %(kind)s %(name)s',
- kind=a.source.morphology['kind'],
- name=a.name)
- build_command.build_artifact(a, build_env)
+ build_command.cache_or_build_artifact(a, build_env)
for i, a in enumerate(native_chunks):
build_command.get_sources(a)
diff --git a/morphlib/plugins/deploy_plugin.py b/morphlib/plugins/deploy_plugin.py
index 1d582949..3afb7b17 100644
--- a/morphlib/plugins/deploy_plugin.py
+++ b/morphlib/plugins/deploy_plugin.py
@@ -274,6 +274,7 @@ class DeployPlugin(cliapp.Plugin):
self.app.settings['tempdir-min-space'],
'/', 0)
+ self.app.settings['no-git-update'] = True
cluster_name = morphlib.util.strip_morph_extension(args[0])
env_vars = args[1:]
diff --git a/morphlib/remoteartifactcache.py b/morphlib/remoteartifactcache.py
index 9f6bf69e..0f8edce8 100644
--- a/morphlib/remoteartifactcache.py
+++ b/morphlib/remoteartifactcache.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2012-2013 Codethink Limited
+# Copyright (C) 2012-2014 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -33,18 +33,19 @@ class GetError(cliapp.AppException):
cliapp.AppException.__init__(
self, 'Failed to get the artifact %s with cache key %s '
'from the artifact cache %s' %
- (artifact, artifact.cache_key, cache))
+ (artifact.basename(), artifact.cache_key, cache))
-class GetArtifactMetadataError(cliapp.AppException):
+class GetArtifactMetadataError(GetError):
def __init__(self, cache, artifact, name):
cliapp.AppException.__init__(
self, 'Failed to get metadata %s for the artifact %s '
- 'from the artifact cache %s' % (name, artifact, cache))
+ 'from the artifact cache %s' %
+ (name, artifact.basename(), cache))
-class GetSourceMetadataError(cliapp.AppException):
+class GetSourceMetadataError(GetError):
def __init__(self, cache, source, cache_key, name):
cliapp.AppException.__init__(
diff --git a/morphlib/writeexts.py b/morphlib/writeexts.py
index d6f23e0d..74587bd1 100644
--- a/morphlib/writeexts.py
+++ b/morphlib/writeexts.py
@@ -104,7 +104,18 @@ class WriteExtension(cliapp.Application):
self.output.write('%s\n' % (kwargs['msg'] % kwargs))
self.output.flush()
-
+
+ def check_for_btrfs_in_deployment_host_kernel(self):
+ with open('/proc/filesystems') as f:
+ text = f.read()
+ return '\tbtrfs\n' in text
+
+ def require_btrfs_in_deployment_host_kernel(self):
+ if not self.check_for_btrfs_in_deployment_host_kernel():
+ raise cliapp.AppException(
+ 'Error: Btrfs is required for this deployment, but was not '
+ 'detected in the kernel of the machine that is running Morph.')
+
def create_local_system(self, temp_root, raw_disk):
'''Create a raw system image locally.'''
size = self.get_disk_size()
diff --git a/yarns/deployment.yarn b/yarns/deployment.yarn
index fc21b826..67aecce2 100644
--- a/yarns/deployment.yarn
+++ b/yarns/deployment.yarn
@@ -102,3 +102,86 @@ deployed system contains the other. Since the baserock directory is in
every system, we can check for that.
AND tarball test.tar contains var/lib/sysroots/test-system/baserock
+
+Initramfs deployments
+=====================
+
+There's a few ways of creating an initramfs. We could:
+1. Build a sysroot and:
+ 1. Have a chunk turn that into a cpio archive, written into /boot.
+ 2. Embed it in the Linux kernel image, having the initramfs as part
+ of the BSP.
+2. Deploy an existing system as a cpio archive
+ 1. As a stand-alone system, without a rootfs
+ 2. Nested inside another system
+
+1.1 and 1.2 require system engineering work, so won't be mentioned here.
+
+ SCENARIO deploying a system with an initramfs
+ ASSUMING there is space for 5 512M disk images
+ GIVEN a workspace
+ AND a git server
+ WHEN the user checks out the system branch called master
+ GIVEN a cluster called C in system branch master
+ AND a system in cluster C in branch master called S
+
+2.2 needs a nested system that is deployed with the initramfs write
+extension.
+
+ GIVEN a subsystem in cluster C in branch master called S.I
+ AND subsystem S.I in cluster C in branch master builds test-system
+ AND subsystem S.I in cluster C in branch master has deployment type: initramfs
+
+The nested system needs to be placed somewhere in the parent. The
+traditional place for an initramfs is `/boot`.
+
+ AND subsystem S.I in cluster C in branch master has deployment location: boot/initramfs.gz
+
+1.1 and 2.2 need the write extension to configure the boot-loader to
+use the produced initramfs. Only write extensions that involve creating a disk image care, so we'll use `rawdisk.write`.
+
+ GIVEN system S in cluster C in branch master builds test-system
+ AND system S in cluster C in branch master has deployment type: rawdisk
+ AND system S in cluster C in branch master has deployment location: test.img
+ AND system S in cluster C in branch master has deployment variable: DISK_SIZE=512M
+
+Initramfs support is triggered by the `INITRAMFS_PATH` variable. It could have been made automatic, triggering the behaviour if `/boot/initramfs.gz` exists, but:
+
+1. There are a bunch of possible names, some of which imply different formats.
+2. If we decide on one specific name, how do we pick.
+3. If we allow multiple possible names, how do we handle multiple being possible.
+4. We may need to pick a non-standard name: e.g. We have a deployment
+ where the system loads a kernel and initramfs from a disk, then boots
+ the target in KVM, so the bootloader we want to use for the guest is
+ `initramfs.gz`, while the host's initramfs is `hyp-initramfs.gz`.
+5. We may have the initramfs come from a chunk the system built, but
+ for speed, we want this particular deployment not to use an initramfs,
+ even though we have a generic image that may support one.
+
+For all these reasons, despite there being redundancy in some cases,
+we're going to set `INITRAMFS_PATH` to the same as the nested deployment's
+location.
+
+ GIVEN system S in cluster C in branch master has deployment variable: INITRAMFS_PATH=boot/initramfs.gz
+
+Fully testing that the system is bootable requires a lot more time,
+infrastructure and dependencies, so we're just going to build it and
+inspect the result of the deployment.
+
+ WHEN the user builds the system test-system in branch master
+ AND the user attempts to deploy the cluster C in branch master
+ THEN morph succeeded
+ AND file workspace/master/test/morphs/test.img exists
+
+If the initramfs write extension works, the rootfs image should contain
+`boot/initramfs.gz`.
+
+ WHEN disk image workspace/master/test/morphs/test.img is mounted at mnt
+ THEN file mnt/systems/default/run/boot/initramfs.gz exists
+
+If the `rawdisk` write extension worked, then the bootloader config file
+will mention the initramfs, and the UUID of the disk.
+
+ AND file mnt/extlinux.conf matches initramfs
+ AND file mnt/extlinux.conf matches root=UUID=
+ FINALLY mnt is unmounted
diff --git a/yarns/implementations.yarn b/yarns/implementations.yarn
index 0635af72..66d47bfd 100644
--- a/yarns/implementations.yarn
+++ b/yarns/implementations.yarn
@@ -724,6 +724,49 @@ Check attributes of a file on the filesystem
IMPLEMENTS THEN file (\S+) is empty
stat -c %s "$DATADIR/$MATCH_1" | grep -Fx 0
+ IMPLEMENTS THEN file (\S+) matches (.*)
+ grep -q "$MATCH_2" "$DATADIR/$MATCH_1"
+
+Disk image manipulation
+-----------------------
+
+We need to test disk images we create. In the absence of tools for
+inspecting disks without mounting them, we need commands to handle this.
+
+ IMPLEMENTS WHEN disk image (\S+) is mounted at (.*)
+ mkdir -p "$DATADIR/$MATCH_2"
+ mount -o loop "$DATADIR/$MATCH_1" "$DATADIR/$MATCH_2"
+
+ IMPLEMENTS FINALLY (\S+) is unmounted
+ umount -d "$DATADIR/$MATCH_1"
+
+We may not have enough space to run some tests that have disk images.
+
+ IMPLEMENTS ASSUMING there is space for (\d+) (\d+)(\S*) disk images?
+ # Count is included as an argument, so that if we change the disk
+ # image sizes then it's more obvious when we need to change the
+ # assumption, since it's the same value.
+ count="$MATCH_1"
+ case "$MATCH_3" in
+ '')
+ size="$MATCH_2"
+ ;;
+ M)
+ size=$(expr "$MATCH_2" '*' 1024 '*' 1024 )
+ ;;
+ G)
+ size=$(expr "$MATCH_2" '*' 1024 '*' 1024 '*' 1024 )
+ ;;
+ *)
+ echo Unrecognized size suffix: "$MATCH_3" >&2
+ exit 1
+ esac
+ total_image_size="$(expr "$size" '*' "$count" )"
+ blocks="$(stat -f -c %a "$DATADIR")"
+ block_size="$(stat -f -c %S "$DATADIR")"
+ disk_free=$(expr "$blocks" '*' "$block_size" )
+ test "$disk_free" -gt "$total_image_size"
+
Check contents of a file
------------------------