1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
|
#
# Classes for building disk device xml
#
# Copyright 2006-2008, 2012-2014 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import os
import stat
import pwd
import subprocess
import logging
import re
import urlgrabber.progress as progress
from virtinst import diskbackend
from virtinst import util
from virtinst import VirtualDevice
from virtinst.xmlbuilder import XMLProperty
def _qemu_sanitize_drvtype(phystype, fmt, manual_format=False):
"""
Sanitize libvirt storage volume format to a valid qemu driver type
"""
raw_list = ["iso"]
if phystype == VirtualDisk.TYPE_BLOCK:
if not fmt:
return VirtualDisk.DRIVER_QEMU_RAW
if fmt and not manual_format:
return VirtualDisk.DRIVER_QEMU_RAW
if fmt in raw_list:
return VirtualDisk.DRIVER_QEMU_RAW
return fmt
def _name_uid(user):
"""
Return UID for string username
"""
pwdinfo = pwd.getpwnam(user)
return pwdinfo[2]
def _is_dir_searchable(uid, username, path):
"""
Check if passed directory is searchable by uid
"""
try:
statinfo = os.stat(path)
except OSError:
return False
if uid == statinfo.st_uid:
flag = stat.S_IXUSR
elif uid == statinfo.st_gid:
flag = stat.S_IXGRP
else:
flag = stat.S_IXOTH
if bool(statinfo.st_mode & flag):
return True
# Check POSIX ACL (since that is what we use to 'fix' access)
cmd = ["getfacl", path]
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
except OSError:
logging.debug("Didn't find the getfacl command.")
return False
if proc.returncode != 0:
logging.debug("Cmd '%s' failed: %s", cmd, err)
return False
return bool(re.search("user:%s:..x" % username, out))
def _distill_storage(conn, do_create, nomanaged,
path, vol_object, vol_install,
clone_path, backing_store,
*args):
"""
Validates and updates params when the backing storage is changed
"""
pool = None
path_is_pool = False
storage_capable = conn.check_support(conn.SUPPORT_CONN_STORAGE)
if vol_object:
pass
elif not storage_capable:
pass
elif path and not nomanaged:
path = os.path.abspath(path)
(vol_object, pool, path_is_pool) = diskbackend.manage_path(conn, path)
creator = None
backend = diskbackend.StorageBackend(conn, path, vol_object,
path_is_pool and pool or None)
if not do_create:
return backend, None
if backend.exists(auto_check=False) and path is not None:
if not clone_path:
return backend, None
if path and not (vol_install or pool or clone_path):
raise RuntimeError(_("Don't know how to create storage for "
"path '%s'. Use libvirt APIs to manage the parent directory "
"as a pool first.") % path)
if path or vol_install or pool or clone_path:
creator = diskbackend.StorageCreator(conn, path, pool,
vol_install, clone_path,
backing_store, *args)
return backend, creator
_TARGET_PROPS = ["file", "dev", "dir"]
class VirtualDisk(VirtualDevice):
virtual_device_type = VirtualDevice.VIRTUAL_DEV_DISK
DRIVER_FILE = "file"
DRIVER_PHY = "phy"
DRIVER_TAP = "tap"
DRIVER_QEMU = "qemu"
driver_names = [DRIVER_FILE, DRIVER_PHY, DRIVER_TAP, DRIVER_QEMU]
DRIVER_QEMU_RAW = "raw"
# No list here, since there are many other valid values
DRIVER_TAP_RAW = "aio"
DRIVER_TAP_QCOW = "qcow"
DRIVER_TAP_VMDK = "vmdk"
DRIVER_TAP_VDISK = "vdisk"
driver_types = [DRIVER_TAP_RAW, DRIVER_TAP_QCOW,
DRIVER_TAP_VMDK, DRIVER_TAP_VDISK]
CACHE_MODE_NONE = "none"
CACHE_MODE_WRITETHROUGH = "writethrough"
CACHE_MODE_WRITEBACK = "writeback"
CACHE_MODE_DIRECTSYNC = "directsync"
CACHE_MODE_UNSAFE = "unsafe"
cache_types = [CACHE_MODE_NONE, CACHE_MODE_WRITETHROUGH,
CACHE_MODE_WRITEBACK, CACHE_MODE_DIRECTSYNC, CACHE_MODE_UNSAFE]
DEVICE_DISK = "disk"
DEVICE_LUN = "lun"
DEVICE_CDROM = "cdrom"
DEVICE_FLOPPY = "floppy"
devices = [DEVICE_DISK, DEVICE_LUN, DEVICE_CDROM, DEVICE_FLOPPY]
TYPE_FILE = "file"
TYPE_BLOCK = "block"
TYPE_DIR = "dir"
types = [TYPE_FILE, TYPE_BLOCK, TYPE_DIR]
IO_MODE_NATIVE = "native"
IO_MODE_THREADS = "threads"
io_modes = [IO_MODE_NATIVE, IO_MODE_THREADS]
error_policies = ["ignore", "stop", "enospace", "report"]
@staticmethod
def disk_type_to_xen_driver_name(disk_type):
"""
Convert a value of VirtualDisk.type to it's associated Xen
<driver name=/> property
"""
if disk_type == VirtualDisk.TYPE_BLOCK:
return "phy"
elif disk_type == VirtualDisk.TYPE_FILE:
return "file"
return "file"
@staticmethod
def disk_type_to_target_prop(disk_type):
"""
Convert a value of VirtualDisk.type to it's associated XML
target property name
"""
if disk_type == VirtualDisk.TYPE_FILE:
return "file"
elif disk_type == VirtualDisk.TYPE_BLOCK:
return "dev"
elif disk_type == VirtualDisk.TYPE_DIR:
return "dir"
return "file"
@staticmethod
def path_exists(conn, path):
"""
Check if path exists. If we can't determine, return False
"""
if path is None:
return False
try:
(vol, pool, path_is_pool) = diskbackend.check_if_path_managed(
conn, path)
ignore = pool
if vol or path_is_pool:
return True
if not conn.is_remote():
return os.path.exists(path)
except:
pass
return False
@staticmethod
def check_path_search_for_user(conn, path, username):
"""
Check if the passed user has search permissions for all the
directories in the disk path.
@return: List of the directories the user cannot search, or empty list
@rtype : C{list}
"""
if path is None:
return []
if conn.is_remote():
return []
if username == "root":
return []
try:
uid = _name_uid(username)
except Exception, e:
logging.debug("Error looking up username: %s", str(e))
return []
fixlist = []
if os.path.isdir(path):
dirname = path
base = "-"
else:
dirname, base = os.path.split(path)
while base:
if not _is_dir_searchable(uid, username, dirname):
fixlist.append(dirname)
dirname, base = os.path.split(dirname)
return fixlist
@staticmethod
def check_path_search(conn, path):
# Only works for qemu and DAC
if conn.is_remote() or not conn.is_qemu_system():
return None, []
from virtcli import cliconfig
user = cliconfig.default_qemu_user
try:
for i in conn.caps.host.secmodels:
if i.model != "dac":
continue
label = (i.baselabels.get("kvm") or
i.baselabels.get("qemu"))
if not label:
continue
pwuid = pwd.getpwuid(
int(label.split(":")[0].replace("+", "")))
if pwuid:
user = pwuid[0]
except:
logging.debug("Exception grabbing qemu DAC user", exc_info=True)
return None, []
return user, VirtualDisk.check_path_search_for_user(conn, path, user)
@staticmethod
def fix_path_search_for_user(conn, path, username):
"""
Try to fix any permission problems found by check_path_search_for_user
@return: Return a dictionary of entries {broken path : error msg}
@rtype : C{dict}
"""
def fix_perms(dirname, useacl=True):
if useacl:
cmd = ["setfacl", "--modify", "user:%s:x" % username, dirname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
logging.debug("Ran command '%s'", cmd)
if out or err:
logging.debug("out=%s\nerr=%s", out, err)
if proc.returncode != 0:
raise ValueError(err)
else:
logging.debug("Setting +x on %s", dirname)
mode = os.stat(dirname).st_mode
newmode = mode | stat.S_IXOTH
os.chmod(dirname, newmode)
if os.stat(dirname).st_mode != newmode:
# Trying to change perms on vfat at least doesn't work
# but also doesn't seem to error. Try and detect that
raise ValueError(_("Permissions on '%s' did not stick") %
dirname)
fixlist = VirtualDisk.check_path_search_for_user(conn, path, username)
if not fixlist:
return []
fixlist.reverse()
errdict = {}
useacl = True
for dirname in fixlist:
try:
try:
fix_perms(dirname, useacl)
except:
# If acl fails, fall back to chmod and retry
if not useacl:
raise
useacl = False
logging.debug("setfacl failed, trying old fashioned way")
fix_perms(dirname, useacl)
except Exception, e:
errdict[dirname] = str(e)
return errdict
@staticmethod
def path_in_use_by(conn, path, shareable=False, read_only=False):
"""
Return a list of VM names that are using the passed path.
@param conn: virConnect to check VMs
@param path: Path to check for
@param shareable: Path we are checking is marked shareable, so
don't warn if it conflicts with another shareable source.
@param read_only: Path we are checking is marked read_only, so
don't warn if it conflicts with another read_only source.
"""
if not path:
return []
# Find all volumes that have 'path' somewhere in their backing chain
vols = []
volmap = dict((vol.backing_store, vol)
for vol in conn.fetch_all_vols() if vol.backing_store)
backpath = path
while backpath in volmap:
vol = volmap[backpath]
if vol in vols:
break
backpath = vol.target_path
vols.append(backpath)
ret = []
vms = conn.fetch_all_guests()
for vm in vms:
if not read_only:
if path in [vm.os.kernel, vm.os.initrd, vm.os.dtb]:
ret.append(vm.name)
continue
for disk in vm.get_devices("disk"):
if disk.path in vols and vm.name not in ret:
# VM uses the path indirectly via backing store
ret.append(vm.name)
break
if disk.path != path:
continue
if shareable and disk.shareable:
continue
if read_only and disk.read_only:
continue
ret.append(vm.name)
break
return ret
@staticmethod
def stat_local_path(path):
"""
Return tuple (storage type, storage size) for the passed path on
the local machine. This is a best effort attempt.
@return: tuple of
(True if regular file, False otherwise, default is True,
max size of storage, default is 0)
"""
try:
return util.stat_disk(path)
except:
return (True, 0)
@staticmethod
def lookup_vol_object(conn, name_tuple):
"""
Return a volume instance from a pool name, vol name tuple
"""
if not conn.check_support(conn.SUPPORT_CONN_STORAGE):
raise ValueError(_("Connection does not support storage lookup."))
try:
pool = conn.storagePoolLookupByName(name_tuple[0])
return pool.storageVolLookupByName(name_tuple[1])
except Exception, e:
raise ValueError(_("Couldn't lookup volume object: %s" % str(e)))
@staticmethod
def build_vol_install(*args, **kwargs):
return diskbackend.build_vol_install(*args, **kwargs)
@staticmethod
def num_to_target(num):
"""
Convert an index in range (1, 1024) to a disk /dev number
(like hda, hdb, hdaa, etc.)
"""
digits = []
for factor in range(0, 3):
amt = (num % (26 ** (factor + 1))) / (26 ** factor)
if amt == 0 and num >= (26 ** (factor + 1)):
amt = 26
num -= amt
digits.insert(0, amt)
seen_valid = False
gen_t = ""
for digit in digits:
if digit == 0:
if not seen_valid:
continue
digit = 1
seen_valid = True
gen_t += "%c" % (ord('a') + digit - 1)
return gen_t
@staticmethod
def target_to_num(tgt):
"""
Convert disk /dev number (like hda, hdb, hdaa, etc.) to an index
"""
num = 0
k = 0
if tgt[0] == 'x':
# This case is here for 'xvda'
tgt = tgt[1:]
for i, c in enumerate(reversed(tgt[2:])):
if i != 0:
k = 1
num += (ord(c) - ord('a') + k) * (26 ** i)
return num
_XML_PROP_ORDER = [
"type", "device",
"driver_name", "driver_type",
"driver_cache", "driver_io", "error_policy",
"_xmlpath", "target", "bus",
]
def __init__(self, *args, **kwargs):
VirtualDevice.__init__(self, *args, **kwargs)
self.__storage_backend = None
self._storage_creator = None
self.nomanaged = False
self.transient = False
#############################
# Public property-esque API #
#############################
def _get_path(self):
if self._storage_creator:
return self._storage_creator.path
return self._storage_backend.path
def _set_path(self, val):
if self._storage_creator:
raise ValueError("Can't change disk path if storage creation info "
"has been set.")
self._change_backend(val, None)
self._xmlpath = self.path
path = property(_get_path, _set_path)
def get_sparse(self):
if self._storage_creator:
return self._storage_creator.get_sparse()
return None
def get_vol_object(self):
return self._storage_backend.get_vol_object()
def get_vol_install(self):
if not self._storage_creator:
return None
return self._storage_creator.get_vol_install()
def get_size(self):
if self._storage_creator:
return self._storage_creator.get_size()
return self._storage_backend.get_size()
#############################
# Internal defaults helpers #
#############################
def _get_default_type(self):
if self._storage_creator:
return self._storage_creator.get_dev_type()
return self._storage_backend.get_dev_type()
def _get_default_driver_name(self):
if not self.path:
return None
if self.conn.is_qemu():
return self.DRIVER_QEMU
return None
def _get_default_driver_type(self):
"""
Set driver type from passed parameters
Where possible, we want to force /driver/@type = "raw" if installing
a QEMU VM. Without telling QEMU to expect a raw file, the emulator
is forced to autodetect, which has security implications:
http://lists.gnu.org/archive/html/qemu-devel/2008-04/msg00675.html
"""
if self.driver_name != self.DRIVER_QEMU:
return None
if self._storage_creator:
drvtype = self._storage_creator.get_driver_type()
else:
drvtype = self._storage_backend.get_driver_type()
return _qemu_sanitize_drvtype(self.type, drvtype)
##################
# XML properties #
##################
def _make_source_xpath(self):
return "./source/@" + self.disk_type_to_target_prop(self.type)
_xmlpath = XMLProperty(name="disk path",
make_xpath_cb=_make_source_xpath,
clear_first=["./source/@" + target for target in
_TARGET_PROPS])
sourceStartupPolicy = XMLProperty("./source/@startupPolicy")
device = XMLProperty("./@device",
default_cb=lambda s: s.DEVICE_DISK)
type = XMLProperty("./@type", default_cb=_get_default_type)
driver_name = XMLProperty("./driver/@name",
default_cb=_get_default_driver_name)
driver_type = XMLProperty("./driver/@type",
default_cb=_get_default_driver_type)
bus = XMLProperty("./target/@bus")
target = XMLProperty("./target/@dev")
removable = XMLProperty("./target/@removable", is_onoff=True)
read_only = XMLProperty("./readonly", is_bool=True)
shareable = XMLProperty("./shareable", is_bool=True)
driver_cache = XMLProperty("./driver/@cache")
driver_io = XMLProperty("./driver/@io")
error_policy = XMLProperty("./driver/@error_policy")
serial = XMLProperty("./serial")
iotune_rbs = XMLProperty("./iotune/read_bytes_sec", is_int=True)
iotune_ris = XMLProperty("./iotune/read_iops_sec", is_int=True)
iotune_tbs = XMLProperty("./iotune/total_bytes_sec", is_int=True)
iotune_tis = XMLProperty("./iotune/total_iops_sec", is_int=True)
iotune_wbs = XMLProperty("./iotune/write_bytes_sec", is_int=True)
iotune_wis = XMLProperty("./iotune/write_iops_sec", is_int=True)
#################################
# Validation assistance methods #
#################################
def _get_storage_backend(self):
if self.__storage_backend is None:
self.__storage_backend = diskbackend.StorageBackend(self.conn,
self._xmlpath,
None, None)
return self.__storage_backend
def _set_storage_backend(self, val):
self.__storage_backend = val
_storage_backend = property(_get_storage_backend, _set_storage_backend)
def set_create_storage(self, size=None, sparse=True,
fmt=None, vol_install=None,
clone_path=None, backing_store=None,
fake=False):
"""
Function that sets storage creation parameters. If this isn't
called, we assume that no storage creation is taking place and
will error accordingly.
@size is in gigs
@fake: If true, make like we are creating storage but fail
if we ever asked to do so.
"""
def _validate_path(p):
if p is None:
return
try:
d = VirtualDisk(self.conn)
d.path = p
# If this disk isn't managed, make sure we only perform
# non-managed lookup.
if (self._storage_creator or
(self.path and self._storage_backend.exists())):
d.nomanaged = not self.__managed_storage()
d.set_create_storage(fake=True)
d.validate()
except Exception, e:
raise ValueError(_("Error validating path %s: %s") % (p, e))
path = self.path
# Validate clone_path
if clone_path is not None:
clone_path = os.path.abspath(clone_path)
if backing_store is not None:
backing_store = os.path.abspath(backing_store)
if not fake:
_validate_path(clone_path)
_validate_path(backing_store)
if fake and size is None:
size = .000001
ignore, creator = _distill_storage(
self.conn, True, self.nomanaged, path, None,
vol_install, clone_path, backing_store,
size, sparse, fmt)
self._storage_creator = creator
if self._storage_creator:
self._storage_creator.fake = bool(fake)
self._xmlpath = self.path
else:
if (vol_install or clone_path):
raise RuntimeError("Need storage creation but it "
"didn't happen.")
if fmt and self.driver_name == self.DRIVER_QEMU:
self.driver_type = fmt
def is_cdrom(self):
return self.device == self.DEVICE_CDROM
def is_floppy(self):
return self.device == self.DEVICE_FLOPPY
def is_disk(self):
return self.device == self.DEVICE_DISK
def can_be_empty(self):
return self.is_floppy() or self.is_cdrom()
def _change_backend(self, path, vol_object):
backend, ignore = _distill_storage(
self.conn, False, self.nomanaged,
path, vol_object, None, None, None)
self._storage_backend = backend
def sync_path_props(self):
"""
Fills in the values of type, driver_type, and driver_name for
the associated backing storage. This needs to be manually called
if changing an existing disk's media.
"""
self.type = self._get_default_type()
self.driver_name = self._get_default_driver_name()
self.driver_type = self._get_default_driver_type()
def __managed_storage(self):
"""
Return bool representing if managed storage parameters have
been explicitly specified or filled in
"""
if self._storage_creator:
return self._storage_creator.is_managed()
return self._storage_backend.is_managed()
def creating_storage(self):
"""
Return True if the user requested us to create a device
"""
return bool(self._storage_creator)
def validate(self):
"""
function to validate all the complex interaction between the various
disk parameters.
"""
# No storage specified for a removable device type (CDROM, floppy)
if self.path is None:
if not self.can_be_empty():
raise ValueError(_("Device type '%s' requires a path") %
self.device)
return True
storage_capable = self.conn.check_support(
self.conn.SUPPORT_CONN_STORAGE)
if self.conn.is_remote():
if not storage_capable:
raise ValueError(_("Connection doesn't support remote "
"storage."))
# The main distinctions from this point forward:
# - Are we doing storage API operations or local media checks?
# - Do we need to create the storage?
managed_storage = self.__managed_storage()
create_media = self.creating_storage()
# If not creating the storage, our job is easy
if not create_media:
if not self._storage_backend.exists():
raise ValueError(
_("Must specify storage creation parameters for "
"non-existent path '%s'.") % self.path)
# Make sure we have access to the local path
if not managed_storage:
if (os.path.isdir(self.path) and not self.is_floppy()):
raise ValueError(_("The path '%s' must be a file or a "
"device, not a directory") % self.path)
return True
self._storage_creator.validate(self.device, self.type)
# Applicable for managed or local storage
ret = self.is_size_conflict()
if ret[0]:
raise ValueError(ret[1])
elif ret[1]:
logging.warn(ret[1])
def setup(self, meter=None):
"""
Build storage (if required)
If storage doesn't exist (a non-existent file 'path', or 'vol_install'
was specified), we create it.
@param meter: Progress meter to report file creation on
@type meter: instanceof urlgrabber.BaseMeter
"""
if not meter:
meter = progress.BaseMeter()
if not self._storage_creator:
return
volobj = self._storage_creator.create(meter)
self._storage_creator = None
if volobj:
self._change_backend(None, volobj)
def set_defaults(self, guest):
if self.is_cdrom():
self.read_only = True
if (guest.os.is_xenpv() and
self.type == VirtualDisk.TYPE_FILE and
self.driver_name is None and
util.is_blktap_capable(self.conn)):
self.driver_name = VirtualDisk.DRIVER_TAP
if not self.conn.is_qemu():
return
if not self.is_disk():
return
if not self.type == self.TYPE_BLOCK:
return
# Enable cache=none and io=native for block devices. Would
# be nice if qemu did this for us but that time has long passed.
if not self.driver_cache:
self.driver_cache = self.CACHE_MODE_NONE
if not self.driver_io:
self.driver_io = self.IO_MODE_NATIVE
def is_size_conflict(self):
"""
reports if disk size conflicts with available space
returns a two element tuple:
1. first element is True if fatal conflict occurs
2. second element is a string description of the conflict or None
Non fatal conflicts (sparse disk exceeds available space) will
return (False, "description of collision")
"""
if not self._storage_creator:
return (False, None)
return self._storage_creator.is_size_conflict()
def is_conflict_disk(self, conn=None):
"""
check if specified storage is in use by any other VMs on passed
connection.
@return: list of colliding VM names
@rtype: C{list}
"""
if not self.path:
return False
if not conn:
conn = self.conn
ret = self.path_in_use_by(conn, self.path,
shareable=self.shareable,
read_only=self.read_only)
return ret
def get_target_prefix(self, used_targets=None):
"""
Returns the suggested disk target prefix (hd, xvd, sd ...) for the
disk.
@returns: str prefix, or None if no reasonable guess can be made
"""
# The upper limits here aren't necessarilly 1024, but let the HV
# error as appropriate.
def _return(prefix):
nummap = {
"vd": 1024,
"xvd": 1024,
"fd": 2,
"hd": 4,
"sd": 1024,
}
return prefix, nummap[prefix]
if self.bus == "virtio":
return _return("vd")
elif self.bus == "xen":
return _return("xvd")
elif self.bus == "fdc" or self.is_floppy():
return _return("fd")
elif self.bus == "ide":
return _return("hd")
elif self.bus or not used_targets:
# sata, scsi, usb, sd
return _return("sd")
# If guest already has some disks defined
preforder = ["vd", "xvd", "sd", "hd"]
for pref in preforder:
for target in used_targets:
if target.startswith(pref):
return _return(pref)
return _return("sd")
def generate_target(self, skip_targets, pref_ctrl=None):
"""
Generate target device ('hda', 'sdb', etc..) for disk, excluding
any targets in 'skip_targets'. If given the 'pref_ctrl'
parameter, it tries to select the target so that the disk is
mapped onto that controller.
Sets self.target, and returns the generated value.
@param skip_targets: list of targets to exclude
@type skip_targets: C{list}
@param pref_ctrl: preferred controller to connect the disk to
@type pref_ctrl: C{int}
@raise ValueError: can't determine target type, no targets available
@returns generated target
@rtype C{str}
"""
prefix, maxnode = self.get_target_prefix(skip_targets)
skip_targets = [t for t in skip_targets if t and t.startswith(prefix)]
skip_targets.sort()
def get_target():
first_found = None
ran = range(maxnode)
if pref_ctrl is not None:
# We assume narrow SCSI bus and libvirt assigning 7
# (1-7, 8-14, etc.) devices per controller
ran = range(pref_ctrl * 7, (pref_ctrl + 1) * 7)
for i in ran:
gen_t = prefix + self.num_to_target(i + 1)
if gen_t in skip_targets:
skip_targets.remove(gen_t)
continue
if not skip_targets:
return gen_t
elif not first_found:
first_found = gen_t
if first_found:
return first_found
ret = get_target()
if ret:
self.target = ret
return ret
if pref_ctrl is not None:
# This basically means that we either chose full
# controller or didn't add any
raise ValueError(_("Controller number %d for disk of type %s has "
"no empty slot to use" % (pref_ctrl, prefix)))
else:
raise ValueError(_("Only %s disks of type '%s' are supported"
% (maxnode, prefix)))
VirtualDisk.register_type()
|