summaryrefslogtreecommitdiff
path: root/nova/virt/images.py
diff options
context:
space:
mode:
authorMatthew Booth <mbooth@redhat.com>2016-06-08 10:13:00 +0100
committerMatthew Booth <mbooth@redhat.com>2016-07-21 10:52:30 +0100
commitcc2ae7526b35155687a5a1abbe4f2f80618c8f87 (patch)
tree6308dea92ae599374a655ea65d497da4ccc6a71c /nova/virt/images.py
parenteec3a2b9e8df358178eaea7069107fdd7df15ef3 (diff)
downloadnova-cc2ae7526b35155687a5a1abbe4f2f80618c8f87.tar.gz
Remove max_size argument to images.fetch and fetch_to_raw
images.fetch was passed a max_size argument, but did not use it. images.fetch_to_raw used the max_size argument to check that the image being downloaded is not larger than target instance's root disk. However, this check does not make sense in this context. fetch_to_raw is used to download directly to the image cache, which means when booting multiple instances on the same compute it only executes once. However, the check obviously needs to happen against every instance, not just the first to use a particular image. Consequently every image backend duplicates this check, making the check in fetch_to_raw both confusing and redundant. There are a couple of callers outside the libvirt driver. These do not pass the max_size argument, and are therefore unaffected. Implements: blueprint libvirt-instance-storage Change-Id: I70a559f3dc9b59097ff6923920f4377cca00d1b2
Diffstat (limited to 'nova/virt/images.py')
-rw-r--r--nova/virt/images.py26
1 files changed, 4 insertions, 22 deletions
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 0eb644f17d..41614eb090 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -29,7 +29,7 @@ from oslo_utils import units
import nova.conf
from nova import exception
-from nova.i18n import _, _LE
+from nova.i18n import _
from nova import image
from nova import utils
@@ -107,7 +107,7 @@ def _convert_image(source, dest, in_format, out_format, run_as_root):
raise exception.ImageUnacceptable(image_id=source, reason=msg)
-def fetch(context, image_href, path, max_size=0):
+def fetch(context, image_href, path):
with fileutils.remove_path_on_error(path):
IMAGE_API.download(context, image_href, dest_path=path)
@@ -116,9 +116,9 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
-def fetch_to_raw(context, image_href, path, max_size=0):
+def fetch_to_raw(context, image_href, path):
path_tmp = "%s.part" % path
- fetch(context, image_href, path_tmp, max_size=max_size)
+ fetch(context, image_href, path_tmp)
with fileutils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
@@ -135,24 +135,6 @@ def fetch_to_raw(context, image_href, path, max_size=0):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
- # We can't generally shrink incoming images, so disallow
- # images > size of the flavor we're booting. Checking here avoids
- # an immediate DoS where we convert large qcow images to raw
- # (which may compress well but not be sparse).
- # TODO(p-draigbrady): loop through all flavor sizes, so that
- # we might continue here and not discard the download.
- # If we did that we'd have to do the higher level size checks
- # irrespective of whether the base image was prepared or not.
- disk_size = data.virtual_size
- if max_size and max_size < disk_size:
- LOG.error(_LE('%(base)s virtual size %(disk_size)s '
- 'larger than flavor root disk size %(size)s'),
- {'base': path,
- 'disk_size': disk_size,
- 'size': max_size})
- raise exception.FlavorDiskSmallerThanImage(
- flavor_size=max_size, image_size=disk_size)
-
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)