summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNgo Quoc Cuong <cuongnq@vn.fujitsu.com>2017-05-22 16:00:08 +0700
committerNgo Quoc Cuong <cuongnq@vn.fujitsu.com>2017-07-18 09:03:39 +0000
commitefae7d796a1750ce1aa34e42c1f5dd10cf22eace (patch)
tree485726a2f037b755bb437b04097be45a8586a49c
parent247f9b8120fb901ec420a59a500db0db9dad445a (diff)
downloadnova-efae7d796a1750ce1aa34e42c1f5dd10cf22eace.tar.gz
Remove translation of log messages
The i18n team has decided not to translate the logs because it seems like it not very useful; operators prefer to have them in English so that they can search for those strings on the internet. Partially fix on nova/conductor, nova/console, nova/consoleauth, nova/db and nova/image other paths will be fixed on next commits Change-Id: I132f60cb4743f85c96ec5231d6f860cfb0815758
-rw-r--r--nova/conductor/api.py15
-rw-r--r--nova/conductor/manager.py42
-rw-r--r--nova/console/manager.py3
-rw-r--r--nova/console/serial.py9
-rw-r--r--nova/console/websocketproxy.py3
-rw-r--r--nova/console/xvp.py6
-rw-r--r--nova/consoleauth/manager.py11
-rw-r--r--nova/db/api.py3
-rw-r--r--nova/db/sqlalchemy/api.py50
-rw-r--r--nova/db/sqlalchemy/api_migrations/migrate_repo/versions/030_require_cell_setup.py9
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/216_havana.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py10
-rw-r--r--nova/db/sqlalchemy/utils.py6
-rw-r--r--nova/image/download/__init__.py6
-rw-r--r--nova/image/glance.py57
21 files changed, 121 insertions, 151 deletions
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 6a55cfb7dd..3f8aeafd47 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -20,7 +20,6 @@ import oslo_messaging as messaging
from nova import baserpc
from nova.conductor import rpcapi
import nova.conf
-from nova.i18n import _LI, _LW
CONF = nova.conf.CONF
@@ -67,16 +66,16 @@ class API(object):
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
- LOG.info(_LI('nova-conductor connection '
- 'established successfully'))
+ LOG.info('nova-conductor connection '
+ 'established successfully')
break
except messaging.MessagingTimeout:
has_timedout = True
- LOG.warning(_LW('Timed out waiting for nova-conductor. '
- 'Is it running? Or did this service start '
- 'before nova-conductor? '
- 'Reattempting establishment of '
- 'nova-conductor connection...'))
+ LOG.warning('Timed out waiting for nova-conductor. '
+ 'Is it running? Or did this service start '
+ 'before nova-conductor? '
+ 'Reattempting establishment of '
+ 'nova-conductor connection...')
class ComputeTaskAPI(object):
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 3f92a731ae..574ee09a47 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -37,7 +37,7 @@ from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
-from nova.i18n import _, _LE, _LI, _LW
+from nova.i18n import _
from nova import image
from nova import manager
from nova import network
@@ -69,7 +69,7 @@ def targets_cell(fn):
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
- LOG.error(_LE('InstanceMapping not found, unable to target cell'),
+ LOG.error('InstanceMapping not found, unable to target cell',
instance=instance)
im = None
else:
@@ -230,7 +230,7 @@ class ComputeTaskManager(base.Base):
self.notifier = rpc.get_notifier('compute', CONF.host)
def reset(self):
- LOG.info(_LI('Reloading compute RPC API'))
+ LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -363,8 +363,7 @@ class ComputeTaskManager(base.Base):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
- msg = _LE('Failed to deallocate networks')
- LOG.exception(msg, instance=instance)
+ LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
@@ -439,8 +438,8 @@ class ComputeTaskManager(base.Base):
migration.status = 'error'
migration.save()
except Exception as ex:
- LOG.error(_LE('Migration of instance %(instance_id)s to host'
- ' %(dest)s unexpectedly failed.'),
+ LOG.error('Migration of instance %(instance_id)s to host'
+ ' %(dest)s unexpectedly failed.',
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
_set_vm_state(context, instance, ex, vm_states.ERROR,
@@ -702,18 +701,18 @@ class ComputeTaskManager(base.Base):
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
- LOG.warning(_LW("No valid host found for unshelve instance"),
+ LOG.warning("No valid host found for unshelve instance",
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
- LOG.error(_LE("Unshelve attempted but an error "
- "has occurred"), instance=instance)
+ LOG.error("Unshelve attempted but an error "
+ "has occurred", instance=instance)
else:
- LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
- 'SHELVED_OFFLOADED'), instance=instance)
+ LOG.error('Unshelve attempted but vm_state not SHELVED or '
+ 'SHELVED_OFFLOADED', instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
@@ -763,7 +762,7 @@ class ComputeTaskManager(base.Base):
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
- LOG.warning(_LW("No valid host found for rebuild"),
+ LOG.warning("No valid host found for rebuild",
instance=instance)
except exception.UnsupportedPolicyException as ex:
request_spec = request_spec.to_legacy_request_spec_dict()
@@ -772,9 +771,8 @@ class ComputeTaskManager(base.Base):
'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
- LOG.warning(_LW("Server with unsupported policy "
- "cannot be rebuilt"),
- instance=instance)
+ LOG.warning("Server with unsupported policy "
+ "cannot be rebuilt", instance=instance)
try:
migration = objects.Migration.get_by_instance_and_status(
@@ -864,9 +862,9 @@ class ComputeTaskManager(base.Base):
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
- LOG.error(_LE('No cell mapping found for cell0 while '
- 'trying to record scheduling failure. '
- 'Setup is incomplete.'))
+ LOG.error('No cell mapping found for cell0 while '
+ 'trying to record scheduling failure. '
+ 'Setup is incomplete.')
return
build_requests = build_requests or []
@@ -919,7 +917,7 @@ class ComputeTaskManager(base.Base):
hosts = self._schedule_instances(context, request_specs[0],
instance_uuids)
except Exception as exc:
- LOG.exception(_LE('Failed to schedule instances'))
+ LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests)
return
@@ -941,8 +939,8 @@ class ComputeTaskManager(base.Base):
context, host['host'])
host_mapping_cache[host['host']] = host_mapping
except exception.HostMappingNotFound as exc:
- LOG.error(_LE('No host-to-cell mapping found for selected '
- 'host %(host)s. Setup is incomplete.'),
+ LOG.error('No host-to-cell mapping found for selected '
+ 'host %(host)s. Setup is incomplete.',
{'host': host['host']})
self._bury_in_cell0(context, request_spec, exc,
build_requests=[build_request],
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 6308f31b09..6e6bc6ffdd 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -22,7 +22,6 @@ from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova.console import xvp
from nova import exception
-from nova.i18n import _LI
from nova import manager
from nova import objects
from nova import utils
@@ -49,7 +48,7 @@ class ConsoleProxyManager(manager.Manager):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def reset(self):
- LOG.info(_LI('Reloading compute RPC API'))
+ LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
diff --git a/nova/console/serial.py b/nova/console/serial.py
index 28a3c7a0c9..9a4028e3dd 100644
--- a/nova/console/serial.py
+++ b/nova/console/serial.py
@@ -21,7 +21,6 @@ import six.moves
import nova.conf
from nova import exception
-from nova.i18n import _LW
from nova import utils
LOG = logging.getLogger(__name__)
@@ -71,10 +70,10 @@ def _get_port_range():
start, stop = map(int, config_range.split(':'))
if start >= stop:
default_port_range = nova.conf.serial_console.DEFAULT_PORT_RANGE
- LOG.warning(_LW("serial_console.port_range should be in the "
- "format <start>:<stop> and start < stop, "
- "Given value %(port_range)s is invalid. "
- "Taking the default port range %(default)s."),
+ LOG.warning("serial_console.port_range should be in the "
+ "format <start>:<stop> and start < stop, "
+ "Given value %(port_range)s is invalid. "
+ "Taking the default port range %(default)s.",
{'port_range': config_range,
'default': default_port_range})
start, stop = map(int, default_port_range.split(':'))
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index 2357a18968..245560ad24 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -31,7 +31,6 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
-from nova.i18n import _LW
LOG = logging.getLogger(__name__)
@@ -96,7 +95,7 @@ class NovaProxyRequestHandlerBase(object):
except Cookie.CookieError:
# NOTE(stgleb): Do not print out cookie content
# for security reasons.
- LOG.warning(_LW('Found malformed cookie'))
+ LOG.warning('Found malformed cookie')
else:
if 'token' in cookie:
token = cookie['token'].value
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 3bbbd81107..10ff7acce9 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -26,7 +26,7 @@ from oslo_utils import excutils
import nova.conf
from nova import context
from nova import db
-from nova.i18n import _, _LE
+from nova.i18n import _
from nova import utils
@@ -100,7 +100,7 @@ class XVPConsoleProxy(object):
cfile.write(config)
except IOError:
with excutils.save_and_reraise_exception():
- LOG.exception(_LE("Failed to write configuration file"))
+ LOG.exception("Failed to write configuration file")
def _xvp_stop(self):
LOG.debug('Stopping xvp')
@@ -123,7 +123,7 @@ class XVPConsoleProxy(object):
'-c', CONF.xvp.console_xvp_conf,
'-l', CONF.xvp.console_xvp_log)
except processutils.ProcessExecutionError as err:
- LOG.error(_LE('Error starting xvp: %s'), err)
+ LOG.error('Error starting xvp: %s', err)
def _xvp_restart(self):
LOG.debug('Restarting xvp')
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 2ed2e2787e..52d4529e54 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -27,7 +27,6 @@ from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import context as nova_context
-from nova.i18n import _LI
from nova import manager
from nova import objects
@@ -63,7 +62,7 @@ class ConsoleAuthManager(manager.Manager):
return self._mc_instance
def reset(self):
- LOG.info(_LI('Reloading compute RPC API'))
+ LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -102,8 +101,8 @@ class ConsoleAuthManager(manager.Manager):
self.mc_instance.set(instance_uuid.encode('UTF-8'),
jsonutils.dumps(tokens))
- LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"),
- {'token': token, 'token_dict': token_dict})
+ LOG.info("Received Token: %(token)s, %(token_dict)s",
+ {'token': token, 'token_dict': token_dict})
def _validate_token(self, context, token):
instance_uuid = token['instance_uuid']
@@ -131,8 +130,8 @@ class ConsoleAuthManager(manager.Manager):
def check_token(self, context, token):
token_str = self.mc.get(token.encode('UTF-8'))
token_valid = (token_str is not None)
- LOG.info(_LI("Checking Token: %(token)s, %(token_valid)s"),
- {'token': token, 'token_valid': token_valid})
+ LOG.info("Checking Token: %(token)s, %(token_valid)s",
+ {'token': token, 'token_valid': token_valid})
if token_valid:
token = jsonutils.loads(token_str)
if self._validate_token(context, token):
diff --git a/nova/db/api.py b/nova/db/api.py
index e0e01321f7..275340dbad 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -32,7 +32,6 @@ from oslo_log import log as logging
from nova.cells import rpcapi as cells_rpcapi
import nova.conf
-from nova.i18n import _LE
CONF = nova.conf.CONF
@@ -1787,7 +1786,7 @@ def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed)
except Exception:
- LOG.exception(_LE("Failed to notify cells of bw_usage update"))
+ LOG.exception("Failed to notify cells of bw_usage update")
return rv
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 675099425b..afe1b80e40 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -65,7 +65,7 @@ import nova.conf
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
-from nova.i18n import _, _LI, _LE, _LW
+from nova.i18n import _
from nova import safe_utils
profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
@@ -905,7 +905,7 @@ def floating_ip_get(context, id):
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
- LOG.warning(_LW("Invalid floating IP ID %s in request"), id)
+ LOG.warning("Invalid floating IP ID %s in request", id)
raise exception.InvalidID(id=id)
return result
@@ -1589,7 +1589,7 @@ def virtual_interface_create(context, values):
vif_ref.update(values)
vif_ref.save(context.session)
except db_exc.DBError:
- LOG.exception(_LE("VIF creation failed with a database error."))
+ LOG.exception("VIF creation failed with a database error.")
raise exception.VirtualInterfaceCreateException()
return vif_ref
@@ -1936,7 +1936,7 @@ def instance_get(context, instance_id, columns_to_join=None):
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
- LOG.warning(_LW("Invalid instance id %s in request"), instance_id)
+ LOG.warning("Invalid instance id %s in request", instance_id)
raise exception.InvalidID(id=instance_id)
@@ -3689,17 +3689,17 @@ def _refresh_quota_usages(quota_usage, until_refresh, in_use):
:param in_use: Actual quota usage for the resource.
"""
if quota_usage.in_use != in_use:
- LOG.info(_LI('quota_usages out of sync, updating. '
- 'project_id: %(project_id)s, '
- 'user_id: %(user_id)s, '
- 'resource: %(res)s, '
- 'tracked usage: %(tracked_use)s, '
- 'actual usage: %(in_use)s'),
- {'project_id': quota_usage.project_id,
- 'user_id': quota_usage.user_id,
- 'res': quota_usage.resource,
- 'tracked_use': quota_usage.in_use,
- 'in_use': in_use})
+ LOG.info('quota_usages out of sync, updating. '
+ 'project_id: %(project_id)s, '
+ 'user_id: %(user_id)s, '
+ 'resource: %(res)s, '
+ 'tracked usage: %(tracked_use)s, '
+ 'actual usage: %(in_use)s',
+ {'project_id': quota_usage.project_id,
+ 'user_id': quota_usage.user_id,
+ 'res': quota_usage.resource,
+ 'tracked_use': quota_usage.in_use,
+ 'in_use': in_use})
else:
LOG.debug('QuotaUsage has not changed, refresh is unnecessary for: %s',
dict(quota_usage))
@@ -3896,8 +3896,8 @@ def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
context.session.add(usage_ref)
if unders:
- LOG.warning(_LW("Change will make usage less than 0 for the following "
- "resources: %s"), unders)
+ LOG.warning("Change will make usage less than 0 for the following "
+ "resources: %s", unders)
if overs:
if project_quotas == user_quotas:
@@ -5598,9 +5598,9 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
- LOG.info(_LI("Volume(%s) has lower stats then what is in "
- "the database. Instance must have been rebooted "
- "or crashed. Updating totals."), id)
+ LOG.info("Volume(%s) has lower stats then what is in "
+ "the database. Instance must have been rebooted "
+ "or crashed. Updating totals.", id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
@@ -5959,8 +5959,8 @@ def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
if attempt < max_retries - 1:
ctxt.reraise = False
else:
- LOG.warning(_LW("Add metadata failed for aggregate %(id)s "
- "after %(retries)s retries"),
+ LOG.warning("Add metadata failed for aggregate %(id)s "
+ "after %(retries)s retries",
{"id": aggregate_id, "retries": max_retries})
@@ -6380,7 +6380,7 @@ def _archive_if_instance_deleted(table, shadow_table, instances, conn,
result_delete = conn.execute(delete_statement)
return result_delete.rowcount
except db_exc.DBReferenceError as ex:
- LOG.warning(_LW('Failed to archive %(table)s: %(error)s'),
+ LOG.warning('Failed to archive %(table)s: %(error)s',
{'table': table.__tablename__,
'error': six.text_type(ex)})
return 0
@@ -6472,8 +6472,8 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
- LOG.warning(_LW("IntegrityError detected when archiving table "
- "%(tablename)s: %(error)s"),
+ LOG.warning("IntegrityError detected when archiving table "
+ "%(tablename)s: %(error)s",
{'tablename': tablename, 'error': six.text_type(ex)})
if ((max_rows is None or rows_archived < max_rows)
diff --git a/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/030_require_cell_setup.py b/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/030_require_cell_setup.py
index e59b12433a..6e7670f55b 100644
--- a/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/030_require_cell_setup.py
+++ b/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/030_require_cell_setup.py
@@ -14,7 +14,7 @@ from oslo_log import log as logging
from sqlalchemy import MetaData, Table, func, select
from nova import exception
-from nova.i18n import _, _LW
+from nova.i18n import _
from nova import objects
LOG = logging.getLogger(__name__)
@@ -54,7 +54,6 @@ def upgrade(migrate_engine):
host_mappings = Table('host_mappings', meta, autoload=True)
count = select([func.count()]).select_from(host_mappings).scalar()
if count == 0:
- msg = _LW('No host mappings were found, but are required for Ocata. '
- 'Please run nova-manage cell_v2 simple_cell_setup before '
- 'continuing.')
- LOG.warning(msg)
+ LOG.warning('No host mappings were found, but are required for Ocata. '
+ 'Please run nova-manage cell_v2 simple_cell_setup before '
+ 'continuing.')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
index 317e3c3f93..efb29a40a1 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
@@ -21,8 +21,6 @@ from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
-from nova.i18n import _LE
-
LOG = logging.getLogger(__name__)
@@ -81,7 +79,7 @@ def _create_shadow_tables(migrate_engine):
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
- LOG.exception(_LE('Exception while creating table.'))
+ LOG.exception('Exception while creating table.')
raise
@@ -1075,7 +1073,7 @@ def upgrade(migrate_engine):
table.create()
except Exception:
LOG.info(repr(table))
- LOG.exception(_LE('Exception while creating table.'))
+ LOG.exception('Exception while creating table.')
raise
# task log unique constraint
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
index d9484a7bfd..1d123869a9 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
@@ -15,8 +15,6 @@
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
@@ -33,8 +31,8 @@ def upgrade(migrate_engine):
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
- LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
- 'because an equivalent index already exists.'))
+ LOG.info('Skipped adding reservations_deleted_expire_idx '
+ 'because an equivalent index already exists.')
return
# Based on expire_reservations query
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
index d9484a7bfd..1d123869a9 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
@@ -15,8 +15,6 @@
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
@@ -33,8 +31,8 @@ def upgrade(migrate_engine):
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
- LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
- 'because an equivalent index already exists.'))
+ LOG.info('Skipped adding reservations_deleted_expire_idx '
+ 'because an equivalent index already exists.')
return
# Based on expire_reservations query
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py b/nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py
index 3b8ba9f154..012cf4556a 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/274_update_instances_project_id_index.py
@@ -16,8 +16,6 @@
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
@@ -33,8 +31,8 @@ def upgrade(migrate_engine):
for index in instances.indexes:
if [c.name for c in index.columns] == ['project_id', 'deleted']:
- LOG.info(_LI('Skipped adding instances_project_id_deleted_idx '
- 'because an equivalent index already exists.'))
+ LOG.info('Skipped adding instances_project_id_deleted_idx '
+ 'because an equivalent index already exists.')
break
else:
index = Index('instances_project_id_deleted_idx',
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py b/nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py
index afba49cd58..bd01c99ac0 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py
@@ -13,8 +13,6 @@
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
@@ -37,8 +35,8 @@ def _get_table_index(migrate_engine):
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
- LOG.info(_LI('Skipped adding %s because an equivalent index'
- ' already exists.'), INDEX_NAME)
+ LOG.info('Skipped adding %s because an equivalent index'
+ ' already exists.', INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py b/nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py
index e3c9445ed2..2402721ceb 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/295_add_virtual_interfaces_uuid_index.py
@@ -14,8 +14,6 @@
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['uuid']
@@ -37,8 +35,8 @@ def _get_table_index(migrate_engine):
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
- LOG.info(_LI('Skipped adding %s because an equivalent index'
- ' already exists.'), INDEX_NAME)
+ LOG.info('Skipped adding %s because an equivalent index'
+ ' already exists.', INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py b/nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py
index 5457f70af0..766e6a37cf 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/319_add_instances_deleted_created_at_index.py
@@ -16,8 +16,6 @@
from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['deleted', 'created_at']
@@ -39,8 +37,8 @@ def _get_table_index(migrate_engine):
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
- LOG.info(_LI('Skipped adding %s because an equivalent index'
- ' already exists.'), INDEX_NAME)
+ LOG.info('Skipped adding %s because an equivalent index'
+ ' already exists.', INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py b/nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py
index b87fcf6984..65d2ec62e0 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/347_add_updated_at_index.py
@@ -14,8 +14,6 @@ from oslo_log import log as logging
from sqlalchemy import MetaData, Table, Index
from sqlalchemy.engine import reflection
-from nova.i18n import _LI
-
LOG = logging.getLogger(__name__)
INDEX_COLUMNS_1 = ['project_id']
@@ -42,16 +40,16 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
table = Table(TABLE_NAME, meta, autoload=True)
if _get_table_index(migrate_engine, TABLE_NAME, INDEX_COLUMNS_1):
- LOG.info(_LI('Skipped adding %s because an equivalent index'
- ' already exists.'), INDEX_NAME_1)
+ LOG.info('Skipped adding %s because an equivalent index'
+ ' already exists.', INDEX_NAME_1)
else:
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS_1]
index = Index(INDEX_NAME_1, *columns)
index.create(migrate_engine)
if _get_table_index(migrate_engine, TABLE_NAME, INDEX_COLUMNS_2):
- LOG.info(_LI('Skipped adding %s because an equivalent index'
- ' already exists.'), INDEX_NAME_2)
+ LOG.info('Skipped adding %s because an equivalent index'
+ ' already exists.', INDEX_NAME_2)
else:
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS_2]
index = Index(INDEX_NAME_2, *columns)
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index f408cb9bd8..85770d6258 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -23,7 +23,7 @@ from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
-from nova.i18n import _, _LE
+from nova.i18n import _
LOG = logging.getLogger(__name__)
@@ -109,8 +109,8 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
# which raises unwrapped OperationalError, so we should catch it until
# oslo.db would wraps all such exceptions
LOG.info(repr(shadow_table))
- LOG.exception(_LE('Exception while creating table.'))
+ LOG.exception('Exception while creating table.')
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
- LOG.exception(_LE('Exception while creating table.'))
+ LOG.exception('Exception while creating table.')
diff --git a/nova/image/download/__init__.py b/nova/image/download/__init__.py
index 81c7de485f..0b5d3453d7 100644
--- a/nova/image/download/__init__.py
+++ b/nova/image/download/__init__.py
@@ -17,8 +17,6 @@ from oslo_log import log as logging
import stevedore.driver
import stevedore.extension
-from nova.i18n import _LE
-
LOG = logging.getLogger(__name__)
@@ -36,8 +34,8 @@ def load_transfer_modules():
schemes_list = mgr.driver.get_schemes()
for scheme in schemes_list:
if scheme in module_dictionary:
- LOG.error(_LE('%(scheme)s is registered as a module twice. '
- '%(module_name)s is not being used.'),
+ LOG.error('%(scheme)s is registered as a module twice. '
+ '%(module_name)s is not being used.',
{'scheme': scheme,
'module_name': module_name})
else:
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 360f1bc8e6..00eb04121f 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -42,7 +42,6 @@ import six.moves.urllib.parse as urlparse
import nova.conf
from nova import exception
-from nova.i18n import _LE, _LI, _LW
import nova.image.download as image_xfers
from nova import objects
from nova.objects import fields
@@ -115,11 +114,10 @@ def get_api_servers():
if '//' not in api_server:
api_server = 'http://' + api_server
# NOTE(sdague): remove in O.
- LOG.warning(
- _LW("No protocol specified in for api_server '%s', "
- "please update [glance] api_servers with fully "
- "qualified url including scheme (http / https)"),
- api_server)
+ LOG.warning("No protocol specified in for api_server '%s', "
+ "please update [glance] api_servers with fully "
+ "qualified url including scheme (http / https)",
+ api_server)
api_servers.append(api_server)
random.shuffle(api_servers)
return itertools.cycle(api_servers)
@@ -177,9 +175,9 @@ class GlanceClientWrapper(object):
else:
extra = 'done trying'
- LOG.exception(_LE("Error contacting glance server "
- "'%(server)s' for '%(method)s', "
- "%(extra)s."),
+ LOG.exception("Error contacting glance server "
+ "'%(server)s' for '%(method)s', "
+ "%(extra)s.",
{'server': self.api_server,
'method': method, 'extra': extra})
if attempt == num_attempts:
@@ -208,8 +206,8 @@ class GlanceImageServiceV2(object):
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
- LOG.error(_LE('When loading the module %(module_str)s the '
- 'following error occurred: %(ex)s'),
+ LOG.error('When loading the module %(module_str)s the '
+ 'following error occurred: %(ex)s',
{'module_str': str(mod), 'ex': ex})
def show(self, context, image_id, include_locations=False,
@@ -255,8 +253,8 @@ class GlanceImageServiceV2(object):
except KeyError:
return None
except Exception:
- LOG.error(_LE("Failed to instantiate the download handler "
- "for %(scheme)s"), {'scheme': scheme})
+ LOG.error("Failed to instantiate the download handler "
+ "for %(scheme)s", {'scheme': scheme})
return
def detail(self, context, **kwargs):
@@ -286,11 +284,10 @@ class GlanceImageServiceV2(object):
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
- LOG.info(_LI("Successfully transferred "
- "using %s"), o.scheme)
+ LOG.info("Successfully transferred using %s", o.scheme)
return
except Exception:
- LOG.exception(_LE("Download image error"))
+ LOG.exception("Download image error")
try:
image_chunks = self._client.call(context, 2, 'data', image_id)
@@ -323,8 +320,8 @@ class GlanceImageServiceV2(object):
)
except cursive_exception.SignatureVerificationError:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Image signature verification failed '
- 'for image: %s'), image_id)
+ LOG.error('Image signature verification failed '
+ 'for image: %s', image_id)
close_file = False
if data is None and dst_path:
@@ -340,13 +337,13 @@ class GlanceImageServiceV2(object):
verifier.update(chunk)
verifier.verify()
- LOG.info(_LI('Image signature verification succeeded '
- 'for image: %s'), image_id)
+ LOG.info('Image signature verification succeeded '
+ 'for image: %s', image_id)
except cryptography.exceptions.InvalidSignature:
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Image signature verification failed '
- 'for image: %s'), image_id)
+ LOG.error('Image signature verification failed '
+ 'for image: %s', image_id)
return image_chunks
else:
try:
@@ -356,16 +353,16 @@ class GlanceImageServiceV2(object):
data.write(chunk)
if verifier:
verifier.verify()
- LOG.info(_LI('Image signature verification succeeded '
- 'for image %s'), image_id)
+ LOG.info('Image signature verification succeeded '
+ 'for image %s', image_id)
except cryptography.exceptions.InvalidSignature:
data.truncate(0)
with excutils.save_and_reraise_exception():
- LOG.error(_LE('Image signature verification failed '
- 'for image: %s'), image_id)
+ LOG.error('Image signature verification failed '
+ 'for image: %s', image_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
- LOG.error(_LE("Error writing to %(path)s: %(exception)s"),
+ LOG.error("Error writing to %(path)s: %(exception)s",
{'path': dst_path, 'exception': ex})
finally:
if close_file:
@@ -445,9 +442,9 @@ class GlanceImageServiceV2(object):
supported_disk_formats[0])
return supported_disk_formats[0]
- LOG.warning(_LW('Unable to determine disk_format schema from the '
- 'Image Service v2 API. Defaulting to '
- '%(preferred_disk_format)s.'),
+ LOG.warning('Unable to determine disk_format schema from the '
+ 'Image Service v2 API. Defaulting to '
+ '%(preferred_disk_format)s.',
{'preferred_disk_format': preferred_disk_formats[0]})
return preferred_disk_formats[0]