summaryrefslogtreecommitdiff
path: root/nova/virt/libvirt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/virt/libvirt')
-rw-r--r--nova/virt/libvirt/compat.py6
-rw-r--r--nova/virt/libvirt/driver.py505
-rw-r--r--nova/virt/libvirt/firewall.py19
-rw-r--r--nova/virt/libvirt/guest.py12
-rw-r--r--nova/virt/libvirt/host.py36
-rw-r--r--nova/virt/libvirt/imagebackend.py26
-rw-r--r--nova/virt/libvirt/imagecache.py31
-rw-r--r--nova/virt/libvirt/instancejobtracker.py21
-rw-r--r--nova/virt/libvirt/migration.py39
-rw-r--r--nova/virt/libvirt/storage/dmcrypt.py13
-rw-r--r--nova/virt/libvirt/storage/lvm.py14
-rw-r--r--nova/virt/libvirt/storage/rbd_utils.py28
-rw-r--r--nova/virt/libvirt/utils.py8
-rw-r--r--nova/virt/libvirt/vif.py36
-rw-r--r--nova/virt/libvirt/volume/iscsi.py3
-rw-r--r--nova/virt/libvirt/volume/mount.py29
-rw-r--r--nova/virt/libvirt/volume/net.py10
-rw-r--r--nova/virt/libvirt/volume/quobyte.py19
-rw-r--r--nova/virt/libvirt/volume/remotefs.py7
-rw-r--r--nova/virt/libvirt/volume/volume.py10
20 files changed, 396 insertions, 476 deletions
diff --git a/nova/virt/libvirt/compat.py b/nova/virt/libvirt/compat.py
index af2789ce4b..d5d78a9aa3 100644
--- a/nova/virt/libvirt/compat.py
+++ b/nova/virt/libvirt/compat.py
@@ -12,8 +12,6 @@
from oslo_log import log as logging
-from nova.i18n import _LW
-
LOG = logging.getLogger(__name__)
@@ -32,7 +30,7 @@ def get_domain_info(libvirt, host, virt_dom):
return virt_dom.info()
except libvirt.libvirtError as e:
if not host.has_min_version((1, 2, 11)) and is_race(e):
- LOG.warning(_LW('Race detected in libvirt.virDomain.info, '
- 'trying one more time'))
+ LOG.warning('Race detected in libvirt.virDomain.info, '
+ 'trying one more time')
return virt_dom.info()
raise
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b5b49cfb9b..e0a4a0c88c 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -71,9 +71,6 @@ from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE
-from nova.i18n import _LI
-from nova.i18n import _LW
from nova import image
from nova import keymgr
from nova.network import model as network_model
@@ -353,9 +350,9 @@ class LibvirtDriver(driver.ComputeDriver):
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
- LOG.warning(_LW('Invalid cachemode %(cache_mode)s specified '
- 'for disk type %(disk_type)s.'),
- {'cache_mode': cache_mode, 'disk_type': disk_type})
+ LOG.warning('Invalid cachemode %(cache_mode)s specified '
+ 'for disk type %(disk_type)s.',
+ {'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
@@ -436,15 +433,15 @@ class LibvirtDriver(driver.ComputeDriver):
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (fields.Architecture.I686,
fields.Architecture.X86_64)):
- LOG.warning(_LW('The libvirt driver is not tested on '
- '%(type)s/%(arch)s by the OpenStack project and '
- 'thus its quality can not be ensured. For more '
- 'information, see: http://docs.openstack.org/'
- 'developer/nova/support-matrix.html'),
+ LOG.warning('The libvirt driver is not tested on '
+ '%(type)s/%(arch)s by the OpenStack project and '
+ 'thus its quality can not be ensured. For more '
+ 'information, see: http://docs.openstack.org/'
+ 'developer/nova/support-matrix.html',
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
- LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
+ LOG.info("Connection event '%(enabled)d' reason '%(reason)s'",
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
@@ -462,11 +459,11 @@ class LibvirtDriver(driver.ComputeDriver):
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
- LOG.warning(_LW("Running libvirt-lxc without user namespaces is "
- "dangerous. Containers spawned by Nova will be run "
- "as the host's root user. It is highly suggested "
- "that user namespaces be used in a public or "
- "multi-tenant environment."))
+ LOG.warning("Running libvirt-lxc without user namespaces is "
+ "dangerous. Containers spawned by Nova will be run "
+ "as the host's root user. It is highly suggested "
+ "that user namespaces be used in a public or "
+ "multi-tenant environment.")
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
@@ -499,18 +496,18 @@ class LibvirtDriver(driver.ComputeDriver):
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
- LOG.warning(_LW('Running Nova with a libvirt version less than '
- '%(version)s is deprecated. The required minimum '
- 'version of libvirt will be raised to %(version)s '
- 'in the next release.'),
+ LOG.warning('Running Nova with a libvirt version less than '
+ '%(version)s is deprecated. The required minimum '
+ 'version of libvirt will be raised to %(version)s '
+ 'in the next release.',
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
- LOG.warning(_LW('Running Nova with a QEMU version less than '
- '%(version)s is deprecated. The required minimum '
- 'version of QEMU will be raised to %(version)s '
- 'in the next release.'),
+ LOG.warning('Running Nova with a QEMU version less than '
+ '%(version)s is deprecated. The required minimum '
+ 'version of QEMU will be raised to %(version)s '
+ 'in the next release.',
{'version': self._version_to_string(
NEXT_MIN_QEMU_VERSION)})
@@ -585,16 +582,16 @@ class LibvirtDriver(driver.ComputeDriver):
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:
- LOG.info(_LI('The live_migration_permit_post_copy is set '
- 'to True, but it is not supported.'))
+ LOG.info('The live_migration_permit_post_copy is set '
+ 'to True, but it is not supported.')
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
- LOG.info(_LI('The live_migration_permit_post_copy is set to '
- 'True and post copy live migration is available '
- 'so auto-converge will not be in use.'))
+ LOG.info('The live_migration_permit_post_copy is set to '
+ 'True and post copy live migration is available '
+ 'so auto-converge will not be in use.')
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
return migration_flags
@@ -814,9 +811,8 @@ class LibvirtDriver(driver.ComputeDriver):
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
- LOG.warning(_LW("Cannot destroy instance, operation time "
- "out"),
- instance=instance)
+ LOG.warning("Cannot destroy instance, operation time out",
+ instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
@@ -835,12 +831,12 @@ class LibvirtDriver(driver.ComputeDriver):
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
- LOG.warning(_LW('Error from libvirt during destroy. '
- 'Code=%(errcode)s Error=%(e)s; '
- 'attempt %(attempt)d of 3'),
- {'errcode': errcode, 'e': e,
- 'attempt': attempt},
- instance=instance)
+ LOG.warning('Error from libvirt during destroy. '
+ 'Code=%(errcode)s Error=%(e)s; '
+ 'attempt %(attempt)d of 3',
+ {'errcode': errcode, 'e': e,
+ 'attempt': attempt},
+ instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
@@ -850,8 +846,8 @@ class LibvirtDriver(driver.ComputeDriver):
if not is_okay:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error from libvirt during destroy. '
- 'Code=%(errcode)s Error=%(e)s'),
+ LOG.error('Error from libvirt during destroy. '
+ 'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode, 'e': e},
instance=instance)
@@ -871,8 +867,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = power_state.SHUTDOWN
if state == power_state.SHUTDOWN:
- LOG.info(_LI("Instance destroyed successfully."),
- instance=instance)
+ LOG.info("Instance destroyed successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
@@ -881,8 +876,7 @@ class LibvirtDriver(driver.ComputeDriver):
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
- LOG.info(_LI("Instance may be started again."),
- instance=instance)
+ LOG.info("Instance may be started again.", instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
@@ -891,8 +885,7 @@ class LibvirtDriver(driver.ComputeDriver):
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
- LOG.info(_LI("Going to destroy instance again."),
- instance=instance)
+ LOG.info("Going to destroy instance again.", instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
@@ -919,8 +912,8 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
ctxt.reraise = False
else:
- LOG.error(_LE('Error from libvirt during undefine. '
- 'Code=%(errcode)s Error=%(e)s'),
+ LOG.error('Error from libvirt during undefine. '
+ 'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
@@ -942,14 +935,14 @@ class LibvirtDriver(driver.ComputeDriver):
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
- LOG.warning(_LW("Instance may be still running, destroy "
- "it again."), instance=instance)
+ LOG.warning("Instance may be still running, destroy "
+ "it again.", instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
- LOG.exception(_LE('Error from libvirt during unfilter. '
- 'Code=%(errcode)s Error=%(e)s'),
+ LOG.exception(_('Error from libvirt during unfilter. '
+ 'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = _("Error unfiltering instance.")
@@ -998,8 +991,8 @@ class LibvirtDriver(driver.ComputeDriver):
# or deleted
ctxt.reraise = False
LOG.warning(
- _LW("Ignoring Volume Error on vol %(vol_id)s "
- "during delete %(exc)s"),
+ "Ignoring Volume Error on vol %(vol_id)s "
+ "during delete %(exc)s",
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
@@ -1247,7 +1240,7 @@ class LibvirtDriver(driver.ComputeDriver):
context, instance)
instance.save()
except Exception:
- LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
+ LOG.exception(_('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev, instance)
@@ -1377,8 +1370,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
- LOG.warning(_LW("During detach_volume, instance disappeared."),
- instance=instance)
+ LOG.warning("During detach_volume, instance disappeared.",
+ instance=instance)
except exception.DeviceNotFound:
raise exception.DiskNotFound(location=disk_dev)
except libvirt.libvirtError as ex:
@@ -1388,8 +1381,8 @@ class LibvirtDriver(driver.ComputeDriver):
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
- LOG.warning(_LW("During detach_volume, instance disappeared."),
- instance=instance)
+ LOG.warning("During detach_volume, instance disappeared.",
+ instance=instance)
else:
raise
@@ -1409,8 +1402,8 @@ class LibvirtDriver(driver.ComputeDriver):
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
- LOG.error(_LE('attaching network adapter failed.'),
- instance=instance, exc_info=True)
+ LOG.error('attaching network adapter failed.',
+ instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
@@ -1456,8 +1449,8 @@ class LibvirtDriver(driver.ComputeDriver):
if not interface:
mac = vif.get('address')
# The interface is gone so just log it as a warning.
- LOG.warning(_LW('Detaching interface %(mac)s failed because '
- 'the device is no longer found on the guest.'),
+ LOG.warning('Detaching interface %(mac)s failed because '
+ 'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
return
@@ -1471,15 +1464,14 @@ class LibvirtDriver(driver.ComputeDriver):
wait_for_detach()
except exception.DeviceNotFound:
# The interface is gone so just log it as a warning.
- LOG.warning(_LW('Detaching interface %(mac)s failed because '
- 'the device is no longer found on the guest.'),
+ LOG.warning('Detaching interface %(mac)s failed because '
+ 'the device is no longer found on the guest.',
{'mac': vif.get('address')}, instance=instance)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
- LOG.warning(_LW("During detach_interface, "
- "instance disappeared."),
- instance=instance)
+ LOG.warning("During detach_interface, instance disappeared.",
+ instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
@@ -1494,14 +1486,14 @@ class LibvirtDriver(driver.ComputeDriver):
mac = vif.get('address')
interface = guest.get_interface_by_cfg(cfg)
if interface:
- LOG.error(_LE('detaching network adapter failed.'),
- instance=instance, exc_info=True)
+ LOG.error('detaching network adapter failed.',
+ instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
# The interface is gone so just log it as a warning.
- LOG.warning(_LW('Detaching interface %(mac)s failed because '
- 'the device is no longer found on the guest.'),
+ LOG.warning('Detaching interface %(mac)s failed because '
+ 'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
@@ -1619,11 +1611,9 @@ class LibvirtDriver(driver.ComputeDriver):
instance, disk_path, image_type=source_type)
if live_snapshot:
- LOG.info(_LI("Beginning live snapshot process"),
- instance=instance)
+ LOG.info("Beginning live snapshot process", instance=instance)
else:
- LOG.info(_LI("Beginning cold snapshot process"),
- instance=instance)
+ LOG.info("Beginning cold snapshot process", instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
@@ -1640,8 +1630,8 @@ class LibvirtDriver(driver.ComputeDriver):
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
- LOG.warning(_LW('Performing standard snapshot because direct '
- 'snapshot failed: %(error)s'), {'error': e})
+ LOG.warning('Performing standard snapshot because direct '
+ 'snapshot failed: %(error)s', {'error': e})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
@@ -1676,7 +1666,7 @@ class LibvirtDriver(driver.ComputeDriver):
finally:
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
- LOG.info(_LI("Snapshot extracted, beginning image upload"),
+ LOG.info("Snapshot extracted, beginning image upload",
instance=instance)
# Upload that image to the image service
@@ -1689,7 +1679,7 @@ class LibvirtDriver(driver.ComputeDriver):
image_file)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_LE("Failed to snapshot image"))
+ LOG.exception(_("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
@@ -1697,7 +1687,7 @@ class LibvirtDriver(driver.ComputeDriver):
failed_snap, also_destroy_volume=True,
ignore_errors=True)
- LOG.info(_LI("Snapshot image upload complete"), instance=instance)
+ LOG.info("Snapshot image upload complete", instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
instance):
@@ -1830,7 +1820,7 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.NovaException as err:
if self._requires_quiesce(image_meta):
raise
- LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
+ LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err}, instance=instance)
try:
@@ -1881,8 +1871,8 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot_id,
status)
except Exception:
- LOG.exception(_LE('Failed to send updated snapshot status '
- 'to volume service.'))
+ LOG.exception(_('Failed to send updated snapshot status '
+ 'to volume service.'))
def _volume_snapshot_create(self, context, instance, guest,
volume_id, new_file):
@@ -1993,23 +1983,23 @@ class LibvirtDriver(driver.ComputeDriver):
# If the image says that quiesce is required then we fail.
if self._requires_quiesce(image_meta):
raise
- LOG.exception(_LE('Unable to create quiesced VM snapshot, '
- 'attempting again with quiescing disabled.'),
+ LOG.exception(_('Unable to create quiesced VM snapshot, '
+ 'attempting again with quiescing disabled.'),
instance=instance)
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled) as err:
# If the image says that quiesce is required then we need to fail.
if self._requires_quiesce(image_meta):
raise
- LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
+ LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err}, instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=False)
except libvirt.libvirtError:
- LOG.exception(_LE('Unable to create VM snapshot, '
- 'failing volume_snapshot operation.'),
+ LOG.exception(_('Unable to create VM snapshot, '
+ 'failing volume_snapshot operation.'),
instance=instance)
raise
@@ -2059,9 +2049,9 @@ class LibvirtDriver(driver.ComputeDriver):
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_LE('Error occurred during '
- 'volume_snapshot_create, '
- 'sending error status to Cinder.'),
+ LOG.exception(_('Error occurred during '
+ 'volume_snapshot_create, '
+ 'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
@@ -2252,9 +2242,9 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
- LOG.warning(_LW(
+ LOG.warning(
"Relative blockrebase support was not detected. "
- "Continuing with old behaviour."))
+ "Continuing with old behaviour.")
relative = False
LOG.debug(
@@ -2326,9 +2316,9 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_LE('Error occurred during '
- 'volume_snapshot_delete, '
- 'sending error status to Cinder.'),
+ LOG.exception(_('Error occurred during '
+ 'volume_snapshot_delete, '
+ 'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
@@ -2349,13 +2339,13 @@ class LibvirtDriver(driver.ComputeDriver):
soft_reboot_success = False
if soft_reboot_success:
- LOG.info(_LI("Instance soft rebooted successfully."),
+ LOG.info("Instance soft rebooted successfully.",
instance=instance)
return
else:
- LOG.warning(_LW("Failed to soft reboot instance. "
- "Trying hard reboot."),
- instance=instance)
+ LOG.warning("Failed to soft reboot instance. "
+ "Trying hard reboot.",
+ instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
@@ -2394,7 +2384,7 @@ class LibvirtDriver(driver.ComputeDriver):
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
- LOG.info(_LI("Instance shutdown successfully."),
+ LOG.info("Instance shutdown successfully.",
instance=instance)
self._create_domain(domain=guest._domain)
timer = loopingcall.FixedIntervalLoopingCall(
@@ -2402,8 +2392,8 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
return True
else:
- LOG.info(_LI("Instance may have been rebooted during soft "
- "reboot, so return now."), instance=instance)
+ LOG.info("Instance may have been rebooted during soft "
+ "reboot, so return now.", instance=instance)
return True
greenthread.sleep(1)
return False
@@ -2470,7 +2460,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = self.get_info(instance).state
if state == power_state.RUNNING:
- LOG.info(_LI("Instance rebooted successfully."),
+ LOG.info("Instance rebooted successfully.",
instance=instance)
raise loopingcall.LoopingCallDone()
@@ -2512,8 +2502,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
- LOG.info(_LI("Instance already shutdown."),
- instance=instance)
+ LOG.info("Instance already shutdown.", instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
@@ -2527,8 +2516,8 @@ class LibvirtDriver(driver.ComputeDriver):
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
- LOG.info(_LI("Instance shutdown successfully after %d "
- "seconds."), sec, instance=instance)
+ LOG.info("Instance shutdown successfully after %d seconds.",
+ sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
@@ -2556,7 +2545,7 @@ class LibvirtDriver(driver.ComputeDriver):
time.sleep(1)
- LOG.info(_LI("Instance failed to shutdown in %d seconds."),
+ LOG.info("Instance failed to shutdown in %d seconds.",
timeout, instance=instance)
return False
@@ -2587,9 +2576,9 @@ class LibvirtDriver(driver.ComputeDriver):
elif error_code == libvirt.VIR_ERR_OPERATION_INVALID:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
- LOG.exception(_LE('Error from libvirt while injecting an NMI to '
- '%(instance_uuid)s: '
- '[Error Code %(error_code)s] %(ex)s'),
+ LOG.exception(_('Error from libvirt while injecting an NMI to '
+ '%(instance_uuid)s: '
+ '[Error Code %(error_code)s] %(ex)s'),
{'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise
@@ -2756,8 +2745,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = self.get_info(instance).state
if state == power_state.RUNNING:
- LOG.info(_LI("Instance spawned successfully."),
- instance=instance)
+ LOG.info("Instance spawned successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
@@ -2772,7 +2760,7 @@ class LibvirtDriver(driver.ComputeDriver):
return out
def _append_to_file(self, data, fpath):
- LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
+ LOG.info('data: %(data)r, fpath: %(fpath)r',
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
@@ -2795,9 +2783,8 @@ class LibvirtDriver(driver.ComputeDriver):
path = console_log + "." + str(i)
i += 1
if remaining > 0:
- LOG.info(_LI('Truncated console log returned, '
- '%d bytes ignored'), remaining,
- instance=instance)
+ LOG.info('Truncated console log returned, '
+ '%d bytes ignored', remaining, instance=instance)
return log_data
def get_console_output(self, context, instance):
@@ -2818,8 +2805,8 @@ class LibvirtDriver(driver.ComputeDriver):
continue
if not os.path.exists(path):
- LOG.info(_LI('Instance is configured with a file console, '
- 'but the backing file is not (yet?) present'),
+ LOG.info('Instance is configured with a file console, '
+ 'but the backing file is not (yet?) present',
instance=instance)
return ""
@@ -2862,9 +2849,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
- LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
- 'any of the interfaces: %(ifaces)s'),
- {'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
+ LOG.warning('my_ip address (%(my_ip)s) was not found on '
+ 'any of the interfaces: %(ifaces)s',
+ {'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
@@ -2939,13 +2926,13 @@ class LibvirtDriver(driver.ComputeDriver):
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error on '%(path)s' while checking "
- "direct I/O: '%(ex)s'"),
+ LOG.error("Error on '%(path)s' while checking "
+ "direct I/O: '%(ex)s'",
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
- "'%(ex)s'"), {'path': dirpath, 'ex': e})
+ LOG.error("Error on '%(path)s' while checking direct I/O: "
+ "'%(ex)s'", {'path': dirpath, 'ex': e})
finally:
# ensure unlink(filepath) will actually remove the file by deleting
# the remaining link to it in close(fd)
@@ -3091,8 +3078,8 @@ class LibvirtDriver(driver.ComputeDriver):
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error injecting data into image '
- '%(img_id)s (%(e)s)'),
+ LOG.error('Error injecting data into image '
+ '%(img_id)s (%(e)s)',
{'img_id': img_id, 'e': e},
instance=instance)
@@ -3116,7 +3103,7 @@ class LibvirtDriver(driver.ComputeDriver):
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
- LOG.info(_LI('Creating image'), instance=instance)
+ LOG.info('Creating image', instance=instance)
inst_type = instance.get_flavor()
swap_mb = 0
@@ -3272,8 +3259,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._inject_data(backend, instance, injection_info)
elif need_inject:
- LOG.warning(_LW('File injection into a boot from volume '
- 'instance is not supported'), instance=instance)
+ LOG.warning('File injection into a boot from volume '
+ 'instance is not supported', instance=instance)
def _create_configdrive(self, context, instance, injection_info,
rescue=False):
@@ -3284,7 +3271,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance.device_metadata = self._build_device_metadata(context,
instance)
if configdrive.required_by(instance):
- LOG.info(_LI('Using config drive'), instance=instance)
+ LOG.info('Using config drive', instance=instance)
name = 'disk.config'
if rescue:
@@ -3317,7 +3304,7 @@ class LibvirtDriver(driver.ComputeDriver):
# possible while we're still using cache() under the hood.
config_disk_local_path = os.path.join(
libvirt_utils.get_instance_path(instance), name)
- LOG.info(_LI('Creating config drive at %(path)s'),
+ LOG.info('Creating config drive at %(path)s',
{'path': config_disk_local_path},
instance=instance)
@@ -3325,9 +3312,8 @@ class LibvirtDriver(driver.ComputeDriver):
cdb.make_drive(config_disk_local_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Creating config drive failed '
- 'with error: %s'),
- e, instance=instance)
+ LOG.error('Creating config drive failed with '
+ 'error: %s', e, instance=instance)
try:
config_disk.import_file(
@@ -3336,8 +3322,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(mikal): if the config drive was imported into RBD,
# then we no longer need the local copy
if CONF.libvirt.images_type == 'rbd':
- LOG.info(_LI('Deleting local config drive %(path)s '
- 'because it was imported into RBD.'),
+ LOG.info('Deleting local config drive %(path)s '
+ 'because it was imported into RBD.',
{'path': config_disk_local_path},
instance=instance)
os.unlink(config_disk_local_path)
@@ -3398,8 +3384,8 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
- LOG.warning(_LW("Instance disappeared while detaching "
- "a PCI device from it."))
+ LOG.warning("Instance disappeared while detaching "
+ "a PCI device from it.")
else:
raise
@@ -3409,7 +3395,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
- LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
+ LOG.error('Attaching PCI devices %(dev)s to %(dom)s failed.',
{'dev': pci_devs, 'dom': guest.id})
raise
@@ -3511,12 +3497,12 @@ class LibvirtDriver(driver.ComputeDriver):
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
- LOG.warning(_LW('Cannot update service status on host "%s" '
- 'since it is not registered.'), CONF.host)
+ LOG.warning('Cannot update service status on host "%s" '
+ 'since it is not registered.', CONF.host)
except Exception:
- LOG.warning(_LW('Cannot update service status on host "%s" '
- 'due to an unexpected exception.'), CONF.host,
- exc_info=True)
+ LOG.warning('Cannot update service status on host "%s" '
+ 'due to an unexpected exception.', CONF.host,
+ exc_info=True)
if enabled:
mount.get_manager().host_up(self._host)
@@ -3870,7 +3856,7 @@ class LibvirtDriver(driver.ComputeDriver):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
- LOG.warning(_LW("Too many id maps, only included first five."))
+ LOG.warning("Too many id maps, only included first five.")
for map_string in map_strings:
try:
idmap = klass()
@@ -3880,7 +3866,7 @@ class LibvirtDriver(driver.ComputeDriver):
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
- LOG.warning(_LW("Invalid value for id mapping %s"), map_string)
+ LOG.warning("Invalid value for id mapping %s", map_string)
return idmaps
def _get_guest_idmaps(self):
@@ -4191,8 +4177,7 @@ class LibvirtDriver(driver.ComputeDriver):
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
- LOG.info(_LI('Configuring timezone for windows instance to '
- 'localtime'))
+ LOG.info('Configuring timezone for windows instance to localtime')
clk.offset = 'localtime'
else:
clk.offset = 'utc'
@@ -4431,13 +4416,12 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper()
if not hasattr(libvirt, libvirt_perf_event_name):
- LOG.warning(_LW("Libvirt doesn't support event type %s."),
- event)
+ LOG.warning("Libvirt doesn't support event type %s.", event)
return False
if (event in PERF_EVENTS_CPU_FLAG_MAPPING
and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features):
- LOG.warning(_LW("Host does not support event type %s."), event)
+ LOG.warning("Host does not support event type %s.", event)
return False
return True
@@ -4459,9 +4443,9 @@ class LibvirtDriver(driver.ComputeDriver):
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
- LOG.warning(_LW("uefi support is without some kind of "
- "functional testing and therefore "
- "considered experimental."))
+ LOG.warning("uefi support is without some kind of "
+ "functional testing and therefore "
+ "considered experimental.")
uefi_logged = True
guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
caps.host.cpu.arch]
@@ -4888,10 +4872,10 @@ class LibvirtDriver(driver.ComputeDriver):
# to use the deprecated option "use_usb_tablet" or set a
# specific device to use
pointer_model = "usbtablet"
- LOG.warning(_LW('The option "use_usb_tablet" has been '
- 'deprecated for Newton in favor of the more '
- 'generic "pointer_model". Please update '
- 'nova.conf to address this change.'))
+ LOG.warning('The option "use_usb_tablet" has been '
+ 'deprecated for Newton in favor of the more '
+ 'generic "pointer_model". Please update '
+ 'nova.conf to address this change.')
if pointer_model == "usbtablet":
# We want a tablet if VNC is enabled, or SPICE is enabled and
@@ -4907,10 +4891,10 @@ class LibvirtDriver(driver.ComputeDriver):
# process of booting an instance if host is configured
# to use USB tablet without VNC or SPICE and SPICE
# agent disable.
- LOG.warning(_LW('USB tablet requested for guests by host '
- 'configuration. In order to accept this '
- 'request VNC should be enabled or SPICE '
- 'and SPICE agent disabled on host.'))
+ LOG.warning('USB tablet requested for guests by host '
+ 'configuration. In order to accept this '
+ 'request VNC should be enabled or SPICE '
+ 'and SPICE agent disabled on host.')
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
@@ -4926,10 +4910,10 @@ class LibvirtDriver(driver.ComputeDriver):
# For backward compatibility We don't want to break
# process of booting an instance if virtual machine mode
# is not configured as HVM.
- LOG.warning(_LW('USB tablet requested for guests by host '
- 'configuration. In order to accept this '
- 'request the machine mode should be '
- 'configured as HVM.'))
+ LOG.warning('USB tablet requested for guests by host '
+ 'configuration. In order to accept this '
+ 'request the machine mode should be '
+ 'configured as HVM.')
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
@@ -5083,8 +5067,8 @@ class LibvirtDriver(driver.ComputeDriver):
return guest
def _neutron_failed_callback(self, event_name, instance):
- LOG.error(_LE('Neutron Reported failure on event '
- '%(event)s for instance %(uuid)s'),
+ LOG.error('Neutron Reported failure on event '
+ '%(event)s for instance %(uuid)s',
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
@@ -5183,8 +5167,7 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.VirtualInterfaceCreateException()
except Exception:
# Any other error, be sure to clean up
- LOG.error(_LE('Failed to start libvirt guest'),
- instance=instance)
+ LOG.error('Failed to start libvirt guest', instance=instance)
with excutils.save_and_reraise_exception():
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
@@ -5204,8 +5187,8 @@ class LibvirtDriver(driver.ComputeDriver):
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
- LOG.warning(_LW("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "))
+ LOG.warning("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. ")
return 0
if not CONF.vcpu_pin_set:
@@ -5220,8 +5203,8 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(
- _LW("Couldn't retrieve the online CPUs due to a Libvirt "
- "error: %(error)s with error code: %(error_code)s"),
+ "Couldn't retrieve the online CPUs due to a Libvirt "
+ "error: %(error)s with error code: %(error_code)s",
{'error': ex, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
@@ -5473,9 +5456,9 @@ class LibvirtDriver(driver.ComputeDriver):
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
- LOG.warning(_LW("URI %(uri)s does not support "
- "listDevices: %(error)s"),
- {'uri': self._uri(), 'error': ex})
+ LOG.warning("URI %(uri)s does not support "
+ "listDevices: %(error)s",
+ {'uri': self._uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
@@ -5492,11 +5475,11 @@ class LibvirtDriver(driver.ComputeDriver):
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
if not getattr(self, '_bad_libvirt_numa_version_warn', False):
- LOG.warning(_LW('You are running with libvirt version %s '
- 'which is known to have broken NUMA support. '
- 'Consider patching or updating libvirt on '
- 'this host if you need NUMA support.'),
- self._version_to_string(ver))
+ LOG.warning('You are running with libvirt version %s '
+ 'which is known to have broken NUMA support. '
+ 'Consider patching or updating libvirt on '
+ 'this host if you need NUMA support.',
+ self._version_to_string(ver))
self._bad_libvirt_numa_version_warn = True
return False
@@ -5615,15 +5598,15 @@ class LibvirtDriver(driver.ComputeDriver):
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
- LOG.info(_LI('Getting block stats failed, device might have '
- 'been detached. Instance=%(instance_name)s '
- 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
+ LOG.info('Getting block stats failed, device might have '
+ 'been detached. Instance=%(instance_name)s '
+ 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s',
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
- LOG.info(_LI('Could not find domain in libvirt for instance %s. '
- 'Cannot get block stats for device'), instance.name,
+ LOG.info('Could not find domain in libvirt for instance %s. '
+ 'Cannot get block stats for device', instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
@@ -6016,7 +5999,7 @@ class LibvirtDriver(driver.ComputeDriver):
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
- LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
+ LOG.info('Instance launched has CPU info: %s', host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
@@ -6169,8 +6152,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
dom.abortJob()
except libvirt.libvirtError as e:
- LOG.error(_LE("Failed to cancel migration %s"),
- e, instance=instance)
+ LOG.error("Failed to cancel migration %s", e, instance=instance)
raise
def _verify_serial_console_is_disabled(self):
@@ -6277,8 +6259,7 @@ class LibvirtDriver(driver.ComputeDriver):
serial_console.release_port(host=hostname, port=port)
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Live Migration failure: %s"), e,
- instance=instance)
+ LOG.error("Live Migration failure: %s", e, instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
@@ -6398,8 +6379,8 @@ class LibvirtDriver(driver.ComputeDriver):
size_gb = 2
disk_gb += size_gb
except OSError as e:
- LOG.warning(_LW("Unable to stat %(disk)s: %(ex)s"),
- {'disk': path, 'ex': e})
+ LOG.warning("Unable to stat %(disk)s: %(ex)s",
+ {'disk': path, 'ex': e})
# Ignore error since we don't want to break
# the migration monitoring thread operation
@@ -6479,7 +6460,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
guest.abort_job()
except libvirt.libvirtError as e:
- LOG.warning(_LW("Failed to abort migration %s"),
+ LOG.warning("Failed to abort migration %s",
e, instance=instance)
self._clear_empty_migration(instance)
raise
@@ -6526,19 +6507,19 @@ class LibvirtDriver(driver.ComputeDriver):
if (n % 60) == 0:
lg = LOG.info
- lg(_LI("Migration running for %(secs)d secs, "
- "memory %(remaining)d%% remaining; "
- "(bytes processed=%(processed_memory)d, "
- "remaining=%(remaining_memory)d, "
- "total=%(total_memory)d)"),
+ lg("Migration running for %(secs)d secs, "
+ "memory %(remaining)d%% remaining; "
+ "(bytes processed=%(processed_memory)d, "
+ "remaining=%(remaining_memory)d, "
+ "total=%(total_memory)d)",
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
if info.data_remaining > progress_watermark:
- lg(_LI("Data remaining %(remaining)d bytes, "
- "low watermark %(watermark)d bytes "
- "%(last)d seconds ago"),
+ lg("Data remaining %(remaining)d bytes, "
+ "low watermark %(watermark)d bytes "
+ "%(last)d seconds ago",
{"remaining": info.data_remaining,
"watermark": progress_watermark,
"last": (now - progress_time)}, instance=instance)
@@ -6546,31 +6527,30 @@ class LibvirtDriver(driver.ComputeDriver):
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
- LOG.info(_LI("Migration operation has completed"),
+ LOG.info("Migration operation has completed",
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
- LOG.error(_LE("Migration operation has aborted"),
- instance=instance)
+ LOG.error("Migration operation has aborted", instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
- LOG.warning(_LW("Migration operation was cancelled"),
- instance=instance)
+ LOG.warning("Migration operation was cancelled",
+ instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data,
migration_status='cancelled')
break
else:
- LOG.warning(_LW("Unexpected migration job type: %d"),
- info.type, instance=instance)
+ LOG.warning("Unexpected migration job type: %d",
+ info.type, instance=instance)
time.sleep(0.5)
self._clear_empty_migration(instance)
@@ -6579,8 +6559,8 @@ class LibvirtDriver(driver.ComputeDriver):
try:
del self.active_migrations[instance.uuid]
except KeyError:
- LOG.warning(_LW("There are no records in active migrations "
- "for instance"), instance=instance)
+ LOG.warning("There are no records in active migrations "
+ "for instance", instance=instance)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
@@ -6641,8 +6621,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_migration, migrate_data,
finish_event, disk_paths)
except Exception as ex:
- LOG.warning(_LW("Error monitoring migration: %(ex)s"),
- {"ex": ex}, instance=instance, exc_info=True)
+ LOG.warning("Error monitoring migration: %(ex)s",
+ {"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
@@ -6833,11 +6813,10 @@ class LibvirtDriver(driver.ComputeDriver):
if cnt == max_retry - 1:
raise
else:
- LOG.warning(_LW('plug_vifs() failed %(cnt)d. Retry up to '
- '%(max_retry)d.'),
- {'cnt': cnt,
- 'max_retry': max_retry},
- instance=instance)
+ LOG.warning('plug_vifs() failed %(cnt)d. Retry up to '
+ '%(max_retry)d.',
+ {'cnt': cnt, 'max_retry': max_retry},
+ instance=instance)
greenthread.sleep(1)
# Store vncserver_listen and latest disk device info
@@ -7156,13 +7135,13 @@ class LibvirtDriver(driver.ComputeDriver):
config = guest.get_config()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
- LOG.warning(_LW('Error from libvirt while getting description of '
- '%(instance_name)s: [Error Code %(error_code)s] '
- '%(ex)s'),
- {'instance_name': instance.name,
- 'error_code': error_code,
- 'ex': ex},
- instance=instance)
+ LOG.warning('Error from libvirt while getting description of '
+ '%(instance_name)s: [Error Code %(error_code)s] '
+ '%(ex)s',
+ {'instance_name': instance.name,
+ 'error_code': error_code,
+ 'ex': ex},
+ instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return self._get_instance_disk_info_from_config(config,
@@ -7225,36 +7204,35 @@ class LibvirtDriver(driver.ComputeDriver):
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
- LOG.warning(_LW(
+ LOG.warning(
'Error from libvirt while getting description of '
- '%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
- ), {'instance_name': guest.name,
- 'error_code': error_code,
- 'ex': ex})
+ '%(instance_name)s: [Error Code %(error_code)s] %(ex)s',
+ {'instance_name': guest.name,
+ 'error_code': error_code,
+ 'ex': ex})
except OSError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
- LOG.warning(_LW('Periodic task is updating the host stat, '
- 'it is trying to get disk %(i_name)s, '
- 'but disk file was removed by concurrent '
- 'operations such as resize.'),
+ LOG.warning('Periodic task is updating the host stat, '
+ 'it is trying to get disk %(i_name)s, '
+ 'but disk file was removed by concurrent '
+ 'operations such as resize.',
{'i_name': guest.name})
elif e.errno == errno.EACCES:
- LOG.warning(_LW('Periodic task is updating the host stat, '
- 'it is trying to get disk %(i_name)s, '
- 'but access is denied. It is most likely '
- 'due to a VM that exists on the compute '
- 'node but is not managed by Nova.'),
- {'i_name': guest.name})
+ LOG.warning('Periodic task is updating the host stat, '
+ 'it is trying to get disk %(i_name)s, '
+ 'but access is denied. It is most likely '
+ 'due to a VM that exists on the compute '
+ 'node but is not managed by Nova.',
+ {'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
- LOG.warning(_LW('Periodic task is updating the host stats, '
- 'it is trying to get disk info for %(i_name)s, '
- 'but the backing volume block device was removed '
- 'by concurrent operations such as resize. '
- 'Error: %(error)s'),
- {'i_name': guest.name,
- 'error': e})
+ LOG.warning('Periodic task is updating the host stats, '
+ 'it is trying to get disk info for %(i_name)s, '
+ 'but the backing volume block device was removed '
+ 'by concurrent operations such as resize. '
+ 'Error: %(error)s',
+ {'i_name': guest.name, 'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
@@ -7434,7 +7412,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = self.get_info(instance).state
if state == power_state.RUNNING:
- LOG.info(_LI("Instance running successfully."), instance=instance)
+ LOG.info("Instance running successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
@@ -7589,7 +7567,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
except exception.SnapshotNotFound:
- LOG.warning(_LW("Failed to rollback snapshot (%s)"),
+ LOG.warning("Failed to rollback snapshot (%s)",
libvirt_utils.RESIZE_SNAPSHOT_NAME)
finally:
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
@@ -7910,24 +7888,23 @@ class LibvirtDriver(driver.ComputeDriver):
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
- LOG.info(_LI('Deleting instance files %s'), target_del,
+ LOG.info('Deleting instance files %s', target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
- LOG.error(_LE('Failed to cleanup directory %(target)s: '
- '%(e)s'), {'target': target_del, 'e': e},
- instance=instance)
+ LOG.error('Failed to cleanup directory %(target)s: %(e)s',
+ {'target': target_del, 'e': e}, instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
- LOG.info(_LI('Deletion of %s failed'), remaining_path,
+ LOG.info('Deletion of %s failed', remaining_path,
instance=instance)
return False
- LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
+ LOG.info('Deletion of %s complete', target_del, instance=instance)
return True
@property
@@ -7952,8 +7929,8 @@ class LibvirtDriver(driver.ComputeDriver):
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warning(
- _LW("Ignoring supplied device name: %(device_name)s. "
- "Libvirt can't honour user-supplied dev names"),
+ "Ignoring supplied device name: %(device_name)s. "
+ "Libvirt can't honour user-supplied dev names",
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
@@ -7974,7 +7951,7 @@ class LibvirtDriver(driver.ComputeDriver):
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warning(
- _LW('Ignoring supplied device name: %(suggested_dev)s'),
+ 'Ignoring supplied device name: %(suggested_dev)s',
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 8157e0aa03..13b3ad1f25 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -24,8 +24,6 @@ from oslo_utils import excutils
from oslo_utils import importutils
import nova.conf
-from nova.i18n import _LI
-from nova.i18n import _LW
import nova.virt.firewall as base_firewall
from nova.virt import netutils
@@ -55,8 +53,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
try:
libvirt = importutils.import_module('libvirt')
except ImportError:
- LOG.warning(_LW("Libvirt module could not be loaded. "
- "NWFilterFirewall will not work correctly."))
+ LOG.warning("Libvirt module could not be loaded. "
+ "NWFilterFirewall will not work correctly.")
self._host = host
self.static_filters_configured = False
@@ -109,10 +107,10 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
- LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
+ LOG.info('Called setup_basic_filtering in nwfilter',
instance=instance)
- LOG.info(_LI('Ensuring static filters'), instance=instance)
+ LOG.info('Ensuring static filters', instance=instance)
self._ensure_static_filters()
nodhcp_base_filter = self.get_base_filter_list(instance, False)
@@ -281,9 +279,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# This happens when the instance filter is still in use
# (ie. when the instance has not terminated properly)
- LOG.info(_LI('Failed to undefine network filter '
- '%(name)s. Try %(cnt)d of '
- '%(max_retry)d.'),
+ LOG.info('Failed to undefine network filter '
+ '%(name)s. Try %(cnt)d of %(max_retry)d.',
{'name': instance_filter_name,
'cnt': cnt + 1,
'max_retry': max_retry},
@@ -349,8 +346,8 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
else:
- LOG.info(_LI('Attempted to unfilter instance which is not '
- 'filtered'), instance=instance)
+ LOG.info('Attempted to unfilter instance which is not filtered',
+ instance=instance)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 87f1639adc..bafbee2e60 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -40,8 +40,6 @@ import six
from nova.compute import power_state
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE
-from nova.i18n import _LW
from nova import utils
from nova.virt import hardware
from nova.virt.libvirt import compat
@@ -127,7 +125,7 @@ class Guest(object):
guest = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error defining a guest with XML: %s'),
+ LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml))
return guest
@@ -141,8 +139,8 @@ class Guest(object):
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error launching a defined domain '
- 'with XML: %s'),
+ LOG.error('Error launching a defined domain '
+ 'with XML: %s',
self._encoded_xml, errors='ignore')
def poweroff(self):
@@ -177,7 +175,7 @@ class Guest(object):
LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid)
else:
- LOG.warning(_LW('Failed to set time: %(reason)s'),
+ LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid)
except Exception as ex:
# The highest priority is not to let this method crash and thus
@@ -210,7 +208,7 @@ class Guest(object):
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error enabling hairpin mode with XML: %s'),
+ LOG.error('Error enabling hairpin mode with XML: %s',
self._encoded_xml, errors='ignore')
def get_interfaces(self):
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index bc01f1533f..397f717f0c 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -48,9 +48,6 @@ import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE
-from nova.i18n import _LI
-from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
@@ -149,7 +146,7 @@ class Host(object):
try:
handler()
except Exception:
- LOG.exception(_LE('Exception handling connection event'))
+ LOG.exception(_('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@@ -378,8 +375,8 @@ class Host(object):
self._event_lifecycle_callback,
self)
except Exception as e:
- LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"),
- {'uri': self._uri, 'error': e})
+ LOG.warning("URI %(uri)s does not support events: %(error)s",
+ {'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
@@ -394,9 +391,9 @@ class Host(object):
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
- LOG.warning(_LW("URI %(uri)s does not support connection"
- " events: %(error)s"),
- {'uri': self._uri, 'error': e})
+ LOG.warning("URI %(uri)s does not support connection"
+ " events: %(error)s",
+ {'uri': self._uri, 'error': e})
return wrapped_conn
@@ -453,7 +450,7 @@ class Host(object):
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
- LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
+ LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
@@ -637,7 +634,7 @@ class Host(object):
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
- LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
+ LOG.info("Libvirt host capabilities %s", xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
@@ -658,8 +655,8 @@ class Host(object):
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
- LOG.warning(_LW("URI %(uri)s does not support full set"
- " of host capabilities: %(error)s"),
+ LOG.warning("URI %(uri)s does not support full set"
+ " of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
@@ -689,10 +686,9 @@ class Host(object):
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
- LOG.error(_LE('Hostname has changed from %(old)s '
- 'to %(new)s. A restart is required to take effect.'),
- {'old': self._hostname,
- 'new': hostname})
+ LOG.error('Hostname has changed from %(old)s '
+ 'to %(new)s. A restart is required to take effect.',
+ {'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
@@ -750,7 +746,7 @@ class Host(object):
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Error defining a secret with XML: %s'), xml)
+ LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
@@ -800,8 +796,8 @@ class Host(object):
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
- LOG.warning(_LW("couldn't obtain the memory from domain:"
- " %(uuid)s, exception: %(ex)s"),
+ LOG.warning("couldn't obtain the memory from domain:"
+ " %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 54a1bed315..f5352dccd2 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -31,7 +31,6 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE, _LI, _LW
from nova import image
from nova import keymgr
from nova import utils
@@ -248,8 +247,8 @@ class Image(object):
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
- LOG.warning(_LW('Unable to preallocate image at path: '
- '%(path)s'), {'path': self.path})
+ LOG.warning('Unable to preallocate image at path: %(path)s',
+ {'path': self.path})
return can_fallocate
def verify_base_size(self, base, size, base_size=0):
@@ -274,11 +273,11 @@ class Image(object):
base_size = self.get_disk_size(base)
if size < base_size:
- msg = _LE('%(base)s virtual size %(base_size)s '
- 'larger than flavor root disk size %(size)s')
- LOG.error(msg, {'base': base,
- 'base_size': base_size,
- 'size': size})
+ LOG.error('%(base)s virtual size %(base_size)s '
+ 'larger than flavor root disk size %(size)s',
+ {'base': base,
+ 'base_size': base_size,
+ 'size': size})
raise exception.FlavorDiskSmallerThanImage(
flavor_size=size, image_size=base_size)
@@ -483,10 +482,9 @@ class Flat(Image):
data = images.qemu_img_info(self.path)
return data.file_format
except exception.InvalidDiskInfo as e:
- LOG.info(_LI('Failed to get image info from path %(path)s; '
- 'error: %(error)s'),
- {'path': self.path,
- 'error': e})
+ LOG.info('Failed to get image info from path %(path)s; '
+ 'error: %(error)s',
+ {'path': self.path, 'error': e})
return 'raw'
def _supports_encryption(self):
@@ -728,8 +726,8 @@ class Lvm(Image):
self.ephemeral_key_uuid).get_encoded()
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Failed to retrieve ephemeral encryption"
- " key"))
+ LOG.error("Failed to retrieve ephemeral "
+ "encryption key")
else:
raise exception.InternalError(
_("Instance disk to be encrypted but no context provided"))
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 39542bc54f..e87317df29 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -32,9 +32,6 @@ from oslo_utils import encodeutils
import six
import nova.conf
-from nova.i18n import _LE
-from nova.i18n import _LI
-from nova.i18n import _LW
from nova import utils
from nova.virt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
@@ -197,10 +194,9 @@ class ImageCacheManager(imagecache.ImageCacheManager):
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
- LOG.warning(_LW('Instance %(instance)s is using a '
- 'backing file %(backing)s which '
- 'does not appear in the image '
- 'service'),
+ LOG.warning('Instance %(instance)s is using a '
+ 'backing file %(backing)s which '
+ 'does not appear in the image service',
{'instance': ent,
'backing': backing_file})
self.unexplained_images.remove(backing_path)
@@ -261,7 +257,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if not exists or age < maxage:
return
- LOG.info(_LI('Removing base or swap file: %s'), base_file)
+ LOG.info('Removing base or swap file: %s', base_file)
try:
os.remove(base_file)
@@ -279,14 +275,13 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
- LOG.error(_LE('Failed to remove %(base_file)s, '
- 'error was %(error)s'),
+ LOG.error('Failed to remove %(base_file)s, '
+ 'error was %(error)s',
{'base_file': base_file,
'error': e})
if age < maxage:
- LOG.info(_LI('Base or swap file too young to remove: %s'),
- base_file)
+ LOG.info('Base or swap file too young to remove: %s', base_file)
else:
_inner_remove_old_enough_file()
if remove_lock:
@@ -321,7 +316,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
def _mark_in_use(self, img_id, base_file):
"""Mark a single base image as in use."""
- LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
+ LOG.info('image %(id)s at (%(base_file)s): checking',
{'id': img_id, 'base_file': base_file})
if base_file in self.unexplained_images:
@@ -345,8 +340,8 @@ class ImageCacheManager(imagecache.ImageCacheManager):
error_images = self.used_swap_images - self.back_swap_images
for error_image in error_images:
- LOG.warning(_LW('%s swap image was used by instance'
- ' but no back files existing!'), error_image)
+ LOG.warning('%s swap image was used by instance'
+ ' but no back files existing!', error_image)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
LOG.debug('Verify base images')
@@ -368,16 +363,16 @@ class ImageCacheManager(imagecache.ImageCacheManager):
# Anything left is an unknown base image
for img in self.unexplained_images:
- LOG.warning(_LW('Unknown base file: %s'), img)
+ LOG.warning('Unknown base file: %s', img)
self.removable_base_files.append(img)
# Dump these lists
if self.active_base_files:
- LOG.info(_LI('Active base files: %s'),
+ LOG.info('Active base files: %s',
' '.join(self.active_base_files))
if self.removable_base_files:
- LOG.info(_LI('Removable base files: %s'),
+ LOG.info('Removable base files: %s',
' '.join(self.removable_base_files))
if self.remove_unused_base_images:
diff --git a/nova/virt/libvirt/instancejobtracker.py b/nova/virt/libvirt/instancejobtracker.py
index 011473cbcf..090e8bd954 100644
--- a/nova/virt/libvirt/instancejobtracker.py
+++ b/nova/virt/libvirt/instancejobtracker.py
@@ -20,9 +20,6 @@ import signal
from oslo_log import log as logging
-from nova.i18n import _LE
-from nova.i18n import _LW
-
LOG = logging.getLogger(__name__)
@@ -75,9 +72,9 @@ class InstanceJobTracker(object):
os.kill(pid, signal.SIGKILL)
except OSError as exc:
if exc.errno != errno.ESRCH:
- LOG.error(_LE('Failed to kill process %(pid)s '
- 'due to %(reason)s, while deleting the '
- 'instance.'), {'pid': pid, 'reason': exc},
+ LOG.error('Failed to kill process %(pid)s '
+ 'due to %(reason)s, while deleting the '
+ 'instance.', {'pid': pid, 'reason': exc},
instance=instance)
try:
@@ -85,14 +82,12 @@ class InstanceJobTracker(object):
os.kill(pid, 0)
except OSError as exc:
if exc.errno != errno.ESRCH:
- LOG.error(_LE('Unexpected error while checking process '
- '%(pid)s.'), {'pid': pid},
- instance=instance)
+ LOG.error('Unexpected error while checking process '
+ '%(pid)s.', {'pid': pid}, instance=instance)
else:
# The process is still around
- LOG.warning(_LW("Failed to kill a long running process "
- "%(pid)s related to the instance when "
- "deleting it."), {'pid': pid},
- instance=instance)
+ LOG.warning("Failed to kill a long running process "
+ "%(pid)s related to the instance when "
+ "deleting it.", {'pid': pid}, instance=instance)
self.remove_job(instance, pid)
diff --git a/nova/virt/libvirt/migration.py b/nova/virt/libvirt/migration.py
index d295ac9987..aa9823928d 100644
--- a/nova/virt/libvirt/migration.py
+++ b/nova/virt/libvirt/migration.py
@@ -24,8 +24,6 @@ from oslo_log import log as logging
from nova.compute import power_state
import nova.conf
-from nova.i18n import _LI
-from nova.i18n import _LW
LOG = logging.getLogger(__name__)
@@ -240,7 +238,7 @@ def find_job_type(guest, instance):
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
- LOG.info(_LI("Error %(ex)s, migration failed"),
+ LOG.info("Error %(ex)s, migration failed",
{"ex": ex}, instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
@@ -271,15 +269,14 @@ def should_abort(instance, now,
if (progress_timeout != 0 and
(now - progress_time) > progress_timeout):
- LOG.warning(_LW("Live migration stuck for %d sec"),
+ LOG.warning("Live migration stuck for %d sec",
(now - progress_time), instance=instance)
return True
if (completion_timeout != 0 and
elapsed > completion_timeout):
- LOG.warning(
- _LW("Live migration not completed after %d sec"),
- completion_timeout, instance=instance)
+ LOG.warning("Live migration not completed after %d sec",
+ completion_timeout, instance=instance)
return True
return False
@@ -359,8 +356,8 @@ def update_downtime(guest, instance,
instance=instance)
return olddowntime
- LOG.info(_LI("Increasing downtime to %(downtime)d ms "
- "after %(waittime)d sec elapsed time"),
+ LOG.info("Increasing downtime to %(downtime)d ms "
+ "after %(waittime)d sec elapsed time",
{"downtime": thisstep[1],
"waittime": thisstep[0]},
instance=instance)
@@ -368,8 +365,7 @@ def update_downtime(guest, instance,
try:
guest.migrate_configure_max_downtime(thisstep[1])
except libvirt.libvirtError as e:
- LOG.warning(_LW("Unable to increase max downtime to %(time)d"
- "ms: %(e)s"),
+ LOG.warning("Unable to increase max downtime to %(time)d ms: %(e)s",
{"time": thisstep[1], "e": e}, instance=instance)
return thisstep[1]
@@ -404,14 +400,13 @@ def trigger_postcopy_switch(guest, instance, migration):
try:
guest.migrate_start_postcopy()
except libvirt.libvirtError as e:
- LOG.warning(_LW("Failed to switch to post-copy live "
- "migration: %s"),
+ LOG.warning("Failed to switch to post-copy live migration: %s",
e, instance=instance)
else:
# NOTE(ltomas): Change the migration status to indicate that
# it is in post-copy active mode, i.e., the VM at
# destination is the active one
- LOG.info(_LI("Switching to post-copy migration mode"),
+ LOG.info("Switching to post-copy migration mode",
instance=instance)
migration.status = 'running (post-copy)'
migration.save()
@@ -443,8 +438,8 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
task = tasks.popleft()
if task == 'force-complete':
if migration.status == 'running (post-copy)':
- LOG.warning(_LW("Live-migration %s already switched "
- "to post-copy mode."),
+ LOG.warning("Live-migration %s already switched "
+ "to post-copy mode.",
instance=instance)
elif is_post_copy_enabled:
trigger_postcopy_switch(guest, instance, migration)
@@ -453,11 +448,11 @@ def run_tasks(guest, instance, active_migrations, on_migration_failure,
guest.pause()
on_migration_failure.append("unpause")
except Exception as e:
- LOG.warning(_LW("Failed to pause instance during "
- "live-migration %s"),
+ LOG.warning("Failed to pause instance during "
+ "live-migration %s",
e, instance=instance)
else:
- LOG.warning(_LW("Unknown migration task '%(task)s'"),
+ LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance)
@@ -488,11 +483,11 @@ def run_recover_tasks(host, guest, instance, on_migration_failure):
if state == power_state.PAUSED:
guest.resume()
except Exception as e:
- LOG.warning(_LW("Failed to resume paused instance "
- "before live-migration rollback %s"),
+ LOG.warning("Failed to resume paused instance "
+ "before live-migration rollback %s",
e, instance=instance)
else:
- LOG.warning(_LW("Unknown migration task '%(task)s'"),
+ LOG.warning("Unknown migration task '%(task)s'",
{"task": task}, instance=instance)
diff --git a/nova/virt/libvirt/storage/dmcrypt.py b/nova/virt/libvirt/storage/dmcrypt.py
index 7b21de7ab3..541435e99f 100644
--- a/nova/virt/libvirt/storage/dmcrypt.py
+++ b/nova/virt/libvirt/storage/dmcrypt.py
@@ -20,7 +20,6 @@ from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
-from nova.i18n import _LE
from nova.virt.libvirt import utils
@@ -67,8 +66,8 @@ def create_volume(target, device, cipher, key_size, key):
utils.execute(*cmd, process_input=key, run_as_root=True)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Could not start encryption for disk %(device)s: "
- "%(exception)s"), {'device': device, 'exception': e})
+ LOG.error("Could not start encryption for disk %(device)s: "
+ "%(exception)s", {'device': device, 'exception': e})
def delete_volume(target):
@@ -87,10 +86,10 @@ def delete_volume(target):
LOG.debug("Ignoring exit code 4, volume already destroyed")
else:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Could not disconnect encrypted volume "
- "%(volume)s. If dm-crypt device is still active "
- "it will have to be destroyed manually for "
- "cleanup to succeed."), {'volume': target})
+ LOG.error("Could not disconnect encrypted volume "
+ "%(volume)s. If dm-crypt device is still active "
+ "it will have to be destroyed manually for "
+ "cleanup to succeed.", {'volume': target})
def list_volumes():
diff --git a/nova/virt/libvirt/storage/lvm.py b/nova/virt/libvirt/storage/lvm.py
index a6fc568985..ca8ffe1e7c 100644
--- a/nova/virt/libvirt/storage/lvm.py
+++ b/nova/virt/libvirt/storage/lvm.py
@@ -27,7 +27,6 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
-from nova.i18n import _LW
from nova.virt.libvirt import utils
CONF = nova.conf.CONF
@@ -62,11 +61,11 @@ def create_volume(vg, lv, size, sparse=False):
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
- LOG.warning(_LW('Volume group %(vg)s will not be able'
- ' to hold sparse volume %(lv)s.'
- ' Virtual volume size is %(size)d bytes,'
- ' but free space on volume group is'
- ' only %(free_space)db.'),
+ LOG.warning('Volume group %(vg)s will not be able'
+ ' to hold sparse volume %(lv)s.'
+ ' Virtual volume size is %(size)d bytes,'
+ ' but free space on volume group is'
+ ' only %(free_space)db.',
{'vg': vg,
'free_space': free_space,
'size': size,
@@ -210,8 +209,7 @@ def clear_volume(path):
try:
volume_size = get_volume_size(path)
except exception.VolumeBDMPathNotFound:
- LOG.warning(_LW('ignoring missing logical volume %(path)s'),
- {'path': path})
+ LOG.warning('ignoring missing logical volume %(path)s', {'path': path})
return
if volume_clear_size != 0 and volume_clear_size < volume_size:
diff --git a/nova/virt/libvirt/storage/rbd_utils.py b/nova/virt/libvirt/storage/rbd_utils.py
index dc082aa3b5..974b5a9244 100644
--- a/nova/virt/libvirt/storage/rbd_utils.py
+++ b/nova/virt/libvirt/storage/rbd_utils.py
@@ -32,8 +32,6 @@ from oslo_utils import units
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE
-from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
@@ -78,7 +76,7 @@ class RBDVolumeProxy(object):
driver._disconnect_from_rados(client, ioctx)
except rbd.Error:
with excutils.save_and_reraise_exception():
- LOG.exception(_LE("error opening rbd image %s"), name)
+ LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
self.driver = driver
@@ -306,13 +304,13 @@ class RBDDriver(object):
try:
RbdProxy().remove(client.ioctx, name)
except rbd.ImageNotFound:
- LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '
- 'found, failed to remove'),
+ LOG.warning('image %(volume)s in pool %(pool)s can not be '
+ 'found, failed to remove',
{'volume': name, 'pool': self.pool})
except rbd.ImageHasSnapshots:
- LOG.error(_LE('image %(volume)s in pool %(pool)s has '
- 'snapshots, failed to remove'),
- {'volume': name, 'pool': self.pool})
+ LOG.error('image %(volume)s in pool %(pool)s has '
+ 'snapshots, failed to remove',
+ {'volume': name, 'pool': self.pool})
def import_image(self, base, name):
"""Import RBD volume from image file.
@@ -342,9 +340,8 @@ class RBDDriver(object):
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
- LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '
- 'failed'),
- {'volume': volume, 'pool': self.pool})
+ LOG.warning('rbd remove %(volume)s in pool %(pool)s failed',
+ {'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
@@ -406,17 +403,16 @@ class RBDDriver(object):
if force:
vol.unprotect_snap(name)
elif not ignore_errors:
- LOG.warning(_LW('snapshot(%(name)s) on rbd '
- 'image(%(img)s) is protected, '
- 'skipping'),
+ LOG.warning('snapshot(%(name)s) on rbd '
+ 'image(%(img)s) is protected, skipping',
{'name': name, 'img': volume})
return
LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
{'name': name, 'img': volume})
vol.remove_snap(name)
elif not ignore_errors:
- LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
- 'image(%(img)s)'),
+ LOG.warning('no snapshot(%(name)s) found on rbd '
+ 'image(%(img)s)',
{'name': name, 'img': volume})
def rollback_to_snap(self, volume, name):
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index a29342f976..b9f2d24a08 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -27,8 +27,6 @@ from oslo_log import log as logging
import nova.conf
from nova.i18n import _
-from nova.i18n import _LI
-from nova.i18n import _LW
from nova.objects import fields as obj_fields
from nova import utils
from nova.virt.disk import api as disk
@@ -167,7 +165,7 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
else:
return "tap"
else:
- LOG.info(_LI("tap-ctl check: %s"), out)
+ LOG.info("tap-ctl check: %s", out)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed")
@@ -279,8 +277,8 @@ def update_mtime(path):
# the same base image and using shared storage, so log the exception
# but don't fail. Ideally we'd know if we were on shared storage and
# would re-raise the error if we are not on shared storage.
- LOG.warning(_LW("Failed to update mtime on path %(path)s. "
- "Error: %(error)s"),
+ LOG.warning("Failed to update mtime on path %(path)s. "
+ "Error: %(error)s",
{'path': path, "error": exc})
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d7b60ff1f2..ac7b905a51 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -29,7 +29,6 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.network import os_vif_util
@@ -634,10 +633,8 @@ class LibvirtGenericVIFDriver(object):
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.exception(
- _LE("Failed while plugging ib hostdev vif"),
- instance=instance
- )
+ LOG.exception(_("Failed while plugging ib hostdev vif"),
+ instance=instance)
def plug_802qbg(self, instance, vif):
pass
@@ -679,7 +676,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while plugging vif"), instance=instance)
+ LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
@@ -700,7 +697,7 @@ class LibvirtGenericVIFDriver(object):
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while plugging vif"), instance=instance)
+ LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
@@ -754,7 +751,7 @@ class LibvirtGenericVIFDriver(object):
linux_net.create_tap_dev(dev, multiqueue=multiqueue)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while plugging vif"), instance=instance)
+ LOG.exception(_("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
@@ -817,16 +814,14 @@ class LibvirtGenericVIFDriver(object):
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
@@ -844,8 +839,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
@@ -864,7 +858,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
- LOG.exception(_LE("Failed while unplugging ib hostdev vif"))
+ LOG.exception(_("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
@@ -900,8 +894,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
@@ -909,8 +902,7 @@ class LibvirtGenericVIFDriver(object):
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
@@ -926,8 +918,7 @@ class LibvirtGenericVIFDriver(object):
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_vhostuser(self, instance, vif):
pass
@@ -943,8 +934,7 @@ class LibvirtGenericVIFDriver(object):
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
- LOG.exception(
- _LE("Failed while unplugging vif"), instance=instance)
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index dfe72a2c36..44a7d80a5f 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -16,7 +16,6 @@ from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
-from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
@@ -73,7 +72,7 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
- LOG.warning(_LW('Ignoring VolumeDeviceNotFound: %s'), exc)
+ LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
diff --git a/nova/virt/libvirt/volume/mount.py b/nova/virt/libvirt/volume/mount.py
index b761a33494..476e914932 100644
--- a/nova/virt/libvirt/volume/mount.py
+++ b/nova/virt/libvirt/volume/mount.py
@@ -23,7 +23,7 @@ import six
import nova.conf
from nova import exception
-from nova.i18n import _LE, _LW
+from nova.i18n import _
from nova import utils
CONF = nova.conf.CONF
@@ -111,8 +111,7 @@ class _HostMountStateManager(object):
"""
with self.cond:
if self.state is not None:
- LOG.warning(_LW("host_up called, but we think host is "
- "already up"))
+ LOG.warning("host_up called, but we think host is already up")
self._host_down()
# Wait until all operations using a previous state generation are
@@ -139,8 +138,7 @@ class _HostMountStateManager(object):
"""
with self.cond:
if self.state is None:
- LOG.warning(_LW("host_down called, but we don't think host "
- "is up"))
+ LOG.warning("host_down called, but we don't think host is up")
return
self._host_down()
@@ -313,10 +311,10 @@ class _HostMountState(object):
# We're not going to raise the exception because we're
# in the desired state anyway. However, this is still
# unusual so we'll log it.
- LOG.exception(_LE('Error mounting %(fstype)s export '
- '%(export)s on %(mountpoint)s. '
- 'Continuing because mountpount is '
- 'mounted despite this.'),
+ LOG.exception(_('Error mounting %(fstype)s export '
+ '%(export)s on %(mountpoint)s. '
+ 'Continuing because mountpount is '
+ 'mounted despite this.'),
{'fstype': fstype, 'export': export,
'mountpoint': mountpoint})
@@ -353,10 +351,9 @@ class _HostMountState(object):
try:
mount.remove_attachment(vol_name, instance.uuid)
except KeyError:
- LOG.warning(_LW("Request to remove attachment "
- "(%(vol_name)s, %(instance)s) from "
- "%(mountpoint)s, but we don't think it's in "
- "use."),
+ LOG.warning("Request to remove attachment "
+ "(%(vol_name)s, %(instance)s) from "
+ "%(mountpoint)s, but we don't think it's in use.",
{'vol_name': vol_name, 'instance': instance.uuid,
'mountpoint': mountpoint})
@@ -384,15 +381,15 @@ class _HostMountState(object):
utils.execute('umount', mountpoint, run_as_root=True,
attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as ex:
- LOG.error(_LE("Couldn't unmount %(mountpoint)s: %(reason)s"),
+ LOG.error("Couldn't unmount %(mountpoint)s: %(reason)s",
{'mountpoint': mountpoint, 'reason': six.text_type(ex)})
if not os.path.ismount(mountpoint):
try:
utils.execute('rmdir', mountpoint)
except processutils.ProcessExecutionError as ex:
- LOG.error(_LE("Couldn't remove directory %(mountpoint)s: "
- "%(reason)s"),
+ LOG.error("Couldn't remove directory %(mountpoint)s: "
+ "%(reason)s",
{'mountpoint': mountpoint,
'reason': six.text_type(ex)})
return False
diff --git a/nova/virt/libvirt/volume/net.py b/nova/virt/libvirt/volume/net.py
index 94120ddc91..cec43ce93b 100644
--- a/nova/virt/libvirt/volume/net.py
+++ b/nova/virt/libvirt/volume/net.py
@@ -14,7 +14,7 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
-from nova.i18n import _, _LW
+from nova.i18n import _
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
@@ -81,10 +81,10 @@ class LibvirtNetVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
# NOTE(mriedem): We'll have to be extra careful about this in case
# the reason we got here is due to an old volume connection created
# before we started preferring the Cinder settings in Ocata.
- LOG.warning(_LW('Falling back to Nova configuration values for '
- 'RBD authentication. Cinder should be configured '
- 'for auth with Ceph volumes. This fallback will '
- 'be dropped in the Nova 16.0.0 Pike release.'))
+ LOG.warning('Falling back to Nova configuration values for '
+ 'RBD authentication. Cinder should be configured '
+ 'for auth with Ceph volumes. This fallback will '
+ 'be dropped in the Nova 16.0.0 Pike release.')
# use the nova config values
conf.auth_username = CONF.libvirt.rbd_user
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index 4dea6c121e..241fd839dc 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -24,8 +24,6 @@ import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
-from nova.i18n import _LE
-from nova.i18n import _LI
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs
@@ -53,7 +51,7 @@ def mount_volume(volume, mnt_base, configfile=None):
mnt_base)
# Run mount command but do not fail on already mounted exit code
utils.execute(*command, check_exit_code=[0, 4])
- LOG.info(_LI('Mounted volume: %s'), volume)
+ LOG.info('Mounted volume: %s', volume)
def umount_volume(mnt_base):
@@ -62,10 +60,9 @@ def umount_volume(mnt_base):
utils.execute('umount.quobyte', mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
- LOG.error(_LE("The Quobyte volume at %s is still in use."),
- mnt_base)
+ LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
- LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"),
+ LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
@@ -81,8 +78,8 @@ def validate_volume(mnt_base):
raise nova_exception.InternalError(msg)
if not os.access(mnt_base, os.W_OK | os.X_OK):
- msg = (_LE("Volume is not writable. Please broaden the file"
- " permissions. Mount: %s") % mnt_base)
+ msg = _("Volume is not writable. Please broaden the file"
+ " permissions. Mount: %s") % mnt_base
raise nova_exception.InternalError(msg)
@@ -121,8 +118,8 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
- LOG.info(_LI('Fixing previous mount %s which was not'
- ' unmounted correctly.'), mount_path)
+ LOG.info('Fixing previous mount %s which was not'
+ ' unmounted correctly.', mount_path)
umount_volume(mount_path)
if not mounted:
@@ -143,7 +140,7 @@ class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
umount_volume(mount_path)
else:
- LOG.info(_LI("Trying to disconnected unmounted volume at %s"),
+ LOG.info("Trying to disconnected unmounted volume at %s",
mount_path)
def _normalize_export(self, export):
diff --git a/nova/virt/libvirt/volume/remotefs.py b/nova/virt/libvirt/volume/remotefs.py
index ef1e3bdf81..8c72e055db 100644
--- a/nova/virt/libvirt/volume/remotefs.py
+++ b/nova/virt/libvirt/volume/remotefs.py
@@ -24,7 +24,7 @@ from oslo_utils import importutils
import six
import nova.conf
-from nova.i18n import _LE, _LW
+from nova.i18n import _
from nova import utils
LOG = logging.getLogger(__name__)
@@ -52,7 +52,7 @@ def mount_share(mount_path, export_path,
utils.execute(*mount_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
- LOG.warning(_LW("%s is already mounted"), export_path)
+ LOG.warning("%s is already mounted", export_path)
else:
raise
@@ -70,8 +70,7 @@ def unmount_share(mount_path, export_path):
if 'target is busy' in six.text_type(exc):
LOG.debug("The share %s is still in use.", export_path)
else:
- LOG.exception(_LE("Couldn't unmount the share %s"),
- export_path)
+ LOG.exception(_("Couldn't unmount the share %s"), export_path)
class RemoteFilesystem(object):
diff --git a/nova/virt/libvirt/volume/volume.py b/nova/virt/libvirt/volume/volume.py
index 4577e1df07..ffaa7f3f0b 100644
--- a/nova/virt/libvirt/volume/volume.py
+++ b/nova/virt/libvirt/volume/volume.py
@@ -21,8 +21,6 @@ from oslo_log import log as logging
import nova.conf
from nova import exception
-from nova.i18n import _LE
-from nova.i18n import _LW
from nova import profiler
from nova.virt.libvirt import config as vconfig
import nova.virt.libvirt.driver
@@ -76,8 +74,8 @@ class LibvirtBaseVolumeDriver(object):
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
- LOG.warning(_LW('Unknown content in connection_info/'
- 'qos_specs: %s'), specs)
+ LOG.warning('Unknown content in connection_info/'
+ 'qos_specs: %s', specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
@@ -85,8 +83,8 @@ class LibvirtBaseVolumeDriver(object):
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
- LOG.error(_LE('Unknown content in '
- 'connection_info/access_mode: %s'),
+ LOG.error('Unknown content in '
+ 'connection_info/access_mode: %s',
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)